repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
stevenbrichards/boto
|
boto/cognito/identity/layer1.py
|
135
|
24701
|
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.cognito.identity import exceptions
class CognitoIdentityConnection(AWSQueryConnection):
"""
Amazon Cognito
Amazon Cognito is a web service that delivers scoped temporary
credentials to mobile devices and other untrusted environments.
Amazon Cognito uniquely identifies a device and supplies the user
with a consistent identity over the lifetime of an application.
Using Amazon Cognito, you can enable authentication with one or
more third-party identity providers (Facebook, Google, or Login
with Amazon), and you can also choose to support unauthenticated
access from your app. Cognito delivers a unique identifier for
each user and acts as an OpenID token provider trusted by AWS
Security Token Service (STS) to access temporary, limited-
privilege AWS credentials.
To provide end-user credentials, first make an unsigned call to
GetId. If the end user is authenticated with one of the supported
identity providers, set the `Logins` map with the identity
provider token. `GetId` returns a unique identifier for the user.
Next, make an unsigned call to GetOpenIdToken, which returns the
OpenID token necessary to call STS and retrieve AWS credentials.
This call expects the same `Logins` map as the `GetId` call, as
well as the `IdentityID` originally returned by `GetId`. The token
returned by `GetOpenIdToken` can be passed to the STS operation
`AssumeRoleWithWebIdentity`_ to retrieve AWS credentials.
"""
APIVersion = "2014-06-30"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "cognito-identity.us-east-1.amazonaws.com"
ServiceName = "CognitoIdentity"
TargetPrefix = "AWSCognitoIdentityService"
ResponseError = JSONResponseError
_faults = {
"LimitExceededException": exceptions.LimitExceededException,
"ResourceConflictException": exceptions.ResourceConflictException,
"DeveloperUserAlreadyRegisteredException": exceptions.DeveloperUserAlreadyRegisteredException,
"TooManyRequestsException": exceptions.TooManyRequestsException,
"InvalidParameterException": exceptions.InvalidParameterException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InternalErrorException": exceptions.InternalErrorException,
"NotAuthorizedException": exceptions.NotAuthorizedException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(CognitoIdentityConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_identity_pool(self, identity_pool_name,
allow_unauthenticated_identities,
supported_login_providers=None,
developer_provider_name=None,
open_id_connect_provider_ar_ns=None):
"""
Creates a new identity pool. The identity pool is a store of
user identity information that is specific to your AWS
account. The limit on identity pools is 60 per account.
:type identity_pool_name: string
:param identity_pool_name: A string that you provide.
:type allow_unauthenticated_identities: boolean
:param allow_unauthenticated_identities: TRUE if the identity pool
supports unauthenticated logins.
:type supported_login_providers: map
:param supported_login_providers: Optional key:value pairs mapping
provider names to provider app IDs.
:type developer_provider_name: string
:param developer_provider_name: The "domain" by which Cognito will
refer to your users. This name acts as a placeholder that allows
your backend and the Cognito service to communicate about the
developer provider. For the `DeveloperProviderName`, you can use
letters as well as period ( `.`), underscore ( `_`), and dash (
`-`).
Once you have set a developer provider name, you cannot change it.
Please take care in setting this parameter.
:type open_id_connect_provider_ar_ns: list
:param open_id_connect_provider_ar_ns:
"""
params = {
'IdentityPoolName': identity_pool_name,
'AllowUnauthenticatedIdentities': allow_unauthenticated_identities,
}
if supported_login_providers is not None:
params['SupportedLoginProviders'] = supported_login_providers
if developer_provider_name is not None:
params['DeveloperProviderName'] = developer_provider_name
if open_id_connect_provider_ar_ns is not None:
params['OpenIdConnectProviderARNs'] = open_id_connect_provider_ar_ns
return self.make_request(action='CreateIdentityPool',
body=json.dumps(params))
def delete_identity_pool(self, identity_pool_id):
"""
Deletes a user pool. Once a pool is deleted, users will not be
able to authenticate with the pool.
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
"""
params = {'IdentityPoolId': identity_pool_id, }
return self.make_request(action='DeleteIdentityPool',
body=json.dumps(params))
def describe_identity_pool(self, identity_pool_id):
"""
Gets details about a particular identity pool, including the
pool name, ID description, creation date, and current number
of users.
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
"""
params = {'IdentityPoolId': identity_pool_id, }
return self.make_request(action='DescribeIdentityPool',
body=json.dumps(params))
def get_id(self, account_id, identity_pool_id, logins=None):
"""
Generates (or retrieves) a Cognito ID. Supplying multiple
logins will create an implicit linked account.
:type account_id: string
:param account_id: A standard AWS account ID (9+ digits).
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
:type logins: map
:param logins: A set of optional name-value pairs that map provider
names to provider tokens.
The available provider names for `Logins` are as follows:
+ Facebook: `graph.facebook.com`
+ Google: `accounts.google.com`
+ Amazon: `www.amazon.com`
"""
params = {
'AccountId': account_id,
'IdentityPoolId': identity_pool_id,
}
if logins is not None:
params['Logins'] = logins
return self.make_request(action='GetId',
body=json.dumps(params))
def get_open_id_token(self, identity_id, logins=None):
"""
Gets an OpenID token, using a known Cognito ID. This known
Cognito ID is returned by GetId. You can optionally add
additional logins for the identity. Supplying multiple logins
creates an implicit link.
The OpenId token is valid for 15 minutes.
:type identity_id: string
:param identity_id: A unique identifier in the format REGION:GUID.
:type logins: map
:param logins: A set of optional name-value pairs that map provider
names to provider tokens.
"""
params = {'IdentityId': identity_id, }
if logins is not None:
params['Logins'] = logins
return self.make_request(action='GetOpenIdToken',
body=json.dumps(params))
def get_open_id_token_for_developer_identity(self, identity_pool_id,
logins, identity_id=None,
token_duration=None):
"""
Registers (or retrieves) a Cognito `IdentityId` and an OpenID
Connect token for a user authenticated by your backend
authentication process. Supplying multiple logins will create
an implicit linked account. You can only specify one developer
provider as part of the `Logins` map, which is linked to the
identity pool. The developer provider is the "domain" by which
Cognito will refer to your users.
You can use `GetOpenIdTokenForDeveloperIdentity` to create a
new identity and to link new logins (that is, user credentials
issued by a public provider or developer provider) to an
existing identity. When you want to create a new identity, the
`IdentityId` should be null. When you want to associate a new
login with an existing authenticated/unauthenticated identity,
you can do so by providing the existing `IdentityId`. This API
will create the identity in the specified `IdentityPoolId`.
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
:type identity_id: string
:param identity_id: A unique identifier in the format REGION:GUID.
:type logins: map
:param logins: A set of optional name-value pairs that map provider
names to provider tokens. Each name-value pair represents a user
from a public provider or developer provider. If the user is from a
developer provider, the name-value pair will follow the syntax
`"developer_provider_name": "developer_user_identifier"`. The
developer provider is the "domain" by which Cognito will refer to
your users; you provided this domain while creating/updating the
identity pool. The developer user identifier is an identifier from
your backend that uniquely identifies a user. When you create an
identity pool, you can specify the supported logins.
:type token_duration: long
:param token_duration: The expiration time of the token, in seconds.
You can specify a custom expiration time for the token so that you
can cache it. If you don't provide an expiration time, the token is
valid for 15 minutes. You can exchange the token with Amazon STS
for temporary AWS credentials, which are valid for a maximum of one
hour. The maximum token duration you can set is 24 hours. You
should take care in setting the expiration time for a token, as
there are significant security implications: an attacker could use
a leaked token to access your AWS resources for the token's
duration.
"""
params = {
'IdentityPoolId': identity_pool_id,
'Logins': logins,
}
if identity_id is not None:
params['IdentityId'] = identity_id
if token_duration is not None:
params['TokenDuration'] = token_duration
return self.make_request(action='GetOpenIdTokenForDeveloperIdentity',
body=json.dumps(params))
def list_identities(self, identity_pool_id, max_results, next_token=None):
"""
Lists the identities in a pool.
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
:type max_results: integer
:param max_results: The maximum number of identities to return.
:type next_token: string
:param next_token: A pagination token.
"""
params = {
'IdentityPoolId': identity_pool_id,
'MaxResults': max_results,
}
if next_token is not None:
params['NextToken'] = next_token
return self.make_request(action='ListIdentities',
body=json.dumps(params))
def list_identity_pools(self, max_results, next_token=None):
"""
Lists all of the Cognito identity pools registered for your
account.
:type max_results: integer
:param max_results: The maximum number of identities to return.
:type next_token: string
:param next_token: A pagination token.
"""
params = {'MaxResults': max_results, }
if next_token is not None:
params['NextToken'] = next_token
return self.make_request(action='ListIdentityPools',
body=json.dumps(params))
def lookup_developer_identity(self, identity_pool_id, identity_id=None,
developer_user_identifier=None,
max_results=None, next_token=None):
"""
Retrieves the `IdentityID` associated with a
`DeveloperUserIdentifier` or the list of
`DeveloperUserIdentifier`s associated with an `IdentityId` for
an existing identity. Either `IdentityID` or
`DeveloperUserIdentifier` must not be null. If you supply only
one of these values, the other value will be searched in the
database and returned as a part of the response. If you supply
both, `DeveloperUserIdentifier` will be matched against
`IdentityID`. If the values are verified against the database,
the response returns both values and is the same as the
request. Otherwise a `ResourceConflictException` is thrown.
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
:type identity_id: string
:param identity_id: A unique identifier in the format REGION:GUID.
:type developer_user_identifier: string
:param developer_user_identifier: A unique ID used by your backend
authentication process to identify a user. Typically, a developer
identity provider would issue many developer user identifiers, in
keeping with the number of users.
:type max_results: integer
:param max_results: The maximum number of identities to return.
:type next_token: string
:param next_token: A pagination token. The first call you make will
have `NextToken` set to null. After that the service will return
`NextToken` values as needed. For example, let's say you make a
request with `MaxResults` set to 10, and there are 20 matches in
the database. The service will return a pagination token as a part
of the response. This token can be used to call the API again and
get results starting from the 11th match.
"""
params = {'IdentityPoolId': identity_pool_id, }
if identity_id is not None:
params['IdentityId'] = identity_id
if developer_user_identifier is not None:
params['DeveloperUserIdentifier'] = developer_user_identifier
if max_results is not None:
params['MaxResults'] = max_results
if next_token is not None:
params['NextToken'] = next_token
return self.make_request(action='LookupDeveloperIdentity',
body=json.dumps(params))
def merge_developer_identities(self, source_user_identifier,
destination_user_identifier,
developer_provider_name, identity_pool_id):
"""
Merges two users having different `IdentityId`s, existing in
the same identity pool, and identified by the same developer
provider. You can use this action to request that discrete
users be merged and identified as a single user in the Cognito
environment. Cognito associates the given source user (
`SourceUserIdentifier`) with the `IdentityId` of the
`DestinationUserIdentifier`. Only developer-authenticated
users can be merged. If the users to be merged are associated
with the same public provider, but as two different users, an
exception will be thrown.
:type source_user_identifier: string
:param source_user_identifier: User identifier for the source user. The
value should be a `DeveloperUserIdentifier`.
:type destination_user_identifier: string
:param destination_user_identifier: User identifier for the destination
user. The value should be a `DeveloperUserIdentifier`.
:type developer_provider_name: string
:param developer_provider_name: The "domain" by which Cognito will
refer to your users. This is a (pseudo) domain name that you
provide while creating an identity pool. This name acts as a
placeholder that allows your backend and the Cognito service to
communicate about the developer provider. For the
`DeveloperProviderName`, you can use letters as well as period (.),
underscore (_), and dash (-).
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
"""
params = {
'SourceUserIdentifier': source_user_identifier,
'DestinationUserIdentifier': destination_user_identifier,
'DeveloperProviderName': developer_provider_name,
'IdentityPoolId': identity_pool_id,
}
return self.make_request(action='MergeDeveloperIdentities',
body=json.dumps(params))
def unlink_developer_identity(self, identity_id, identity_pool_id,
developer_provider_name,
developer_user_identifier):
"""
Unlinks a `DeveloperUserIdentifier` from an existing identity.
Unlinked developer users will be considered new identities
next time they are seen. If, for a given Cognito identity, you
remove all federated identities as well as the developer user
identifier, the Cognito identity becomes inaccessible.
:type identity_id: string
:param identity_id: A unique identifier in the format REGION:GUID.
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
:type developer_provider_name: string
:param developer_provider_name: The "domain" by which Cognito will
refer to your users.
:type developer_user_identifier: string
:param developer_user_identifier: A unique ID used by your backend
authentication process to identify a user.
"""
params = {
'IdentityId': identity_id,
'IdentityPoolId': identity_pool_id,
'DeveloperProviderName': developer_provider_name,
'DeveloperUserIdentifier': developer_user_identifier,
}
return self.make_request(action='UnlinkDeveloperIdentity',
body=json.dumps(params))
def unlink_identity(self, identity_id, logins, logins_to_remove):
"""
Unlinks a federated identity from an existing account.
Unlinked logins will be considered new identities next time
they are seen. Removing the last linked login will make this
identity inaccessible.
:type identity_id: string
:param identity_id: A unique identifier in the format REGION:GUID.
:type logins: map
:param logins: A set of optional name-value pairs that map provider
names to provider tokens.
:type logins_to_remove: list
:param logins_to_remove: Provider names to unlink from this identity.
"""
params = {
'IdentityId': identity_id,
'Logins': logins,
'LoginsToRemove': logins_to_remove,
}
return self.make_request(action='UnlinkIdentity',
body=json.dumps(params))
def update_identity_pool(self, identity_pool_id, identity_pool_name,
allow_unauthenticated_identities,
supported_login_providers=None,
developer_provider_name=None,
open_id_connect_provider_ar_ns=None):
"""
Updates a user pool.
:type identity_pool_id: string
:param identity_pool_id: An identity pool ID in the format REGION:GUID.
:type identity_pool_name: string
:param identity_pool_name: A string that you provide.
:type allow_unauthenticated_identities: boolean
:param allow_unauthenticated_identities: TRUE if the identity pool
supports unauthenticated logins.
:type supported_login_providers: map
:param supported_login_providers: Optional key:value pairs mapping
provider names to provider app IDs.
:type developer_provider_name: string
:param developer_provider_name: The "domain" by which Cognito will
refer to your users.
:type open_id_connect_provider_ar_ns: list
:param open_id_connect_provider_ar_ns:
"""
params = {
'IdentityPoolId': identity_pool_id,
'IdentityPoolName': identity_pool_name,
'AllowUnauthenticatedIdentities': allow_unauthenticated_identities,
}
if supported_login_providers is not None:
params['SupportedLoginProviders'] = supported_login_providers
if developer_provider_name is not None:
params['DeveloperProviderName'] = developer_provider_name
if open_id_connect_provider_ar_ns is not None:
params['OpenIdConnectProviderARNs'] = open_id_connect_provider_ar_ns
return self.make_request(action='UpdateIdentityPool',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
|
mit
|
chalasr/Flask-P2P
|
venv/lib/python2.7/site-packages/jinja2/environment.py
|
614
|
47244
|
# -*- coding: utf-8 -*-
"""
jinja2.environment
~~~~~~~~~~~~~~~~~~
Provides a class that holds runtime and parsing time options.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from jinja2 import nodes
from jinja2.defaults import BLOCK_START_STRING, \
BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \
KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
from jinja2.lexer import get_lexer, TokenStream
from jinja2.parser import Parser
from jinja2.nodes import EvalContext
from jinja2.optimizer import optimize
from jinja2.compiler import generate
from jinja2.runtime import Undefined, new_context
from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
TemplatesNotFound, TemplateRuntimeError
from jinja2.utils import import_string, LRUCache, Markup, missing, \
concat, consume, internalcode
from jinja2._compat import imap, ifilter, string_types, iteritems, \
text_type, reraise, implements_iterator, implements_to_string, \
get_next, encode_filename, PY2, PYPY
from functools import reduce
# for direct template usage we have up to ten living environments
_spontaneous_environments = LRUCache(10)
# the function to create jinja traceback objects. This is dynamically
# imported on the first exception in the exception handler.
_make_traceback = None
def get_spontaneous_environment(*args):
"""Return a new spontaneous environment. A spontaneous environment is an
unnamed and unaccessible (in theory) environment that is used for
templates generated from a string and not from the file system.
"""
try:
env = _spontaneous_environments.get(args)
except TypeError:
return Environment(*args)
if env is not None:
return env
_spontaneous_environments[args] = env = Environment(*args)
env.shared = True
return env
def create_cache(size):
"""Return the cache class for the given size."""
if size == 0:
return None
if size < 0:
return {}
return LRUCache(size)
def copy_cache(cache):
"""Create an empty copy of the given cache."""
if cache is None:
return None
elif type(cache) is dict:
return {}
return LRUCache(cache.capacity)
def load_extensions(environment, extensions):
"""Load the extensions from the list and bind it to the environment.
Returns a dict of instantiated environments.
"""
result = {}
for extension in extensions:
if isinstance(extension, string_types):
extension = import_string(extension)
result[extension.identifier] = extension(environment)
return result
def _environment_sanity_check(environment):
"""Perform a sanity check on the environment."""
assert issubclass(environment.undefined, Undefined), 'undefined must ' \
'be a subclass of undefined because filters depend on it.'
assert environment.block_start_string != \
environment.variable_start_string != \
environment.comment_start_string, 'block, variable and comment ' \
'start strings must be different'
assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
'newline_sequence set to unknown line ending string.'
return environment
class Environment(object):
r"""The core component of Jinja is the `Environment`. It contains
important shared variables like configuration, filters, tests,
globals and others. Instances of this class may be modified if
they are not shared and if no template was loaded so far.
Modifications on environments after the first template was loaded
will lead to surprising effects and undefined behavior.
Here the possible initialization parameters:
`block_start_string`
The string marking the begin of a block. Defaults to ``'{%'``.
`block_end_string`
The string marking the end of a block. Defaults to ``'%}'``.
`variable_start_string`
The string marking the begin of a print statement.
Defaults to ``'{{'``.
`variable_end_string`
The string marking the end of a print statement. Defaults to
``'}}'``.
`comment_start_string`
The string marking the begin of a comment. Defaults to ``'{#'``.
`comment_end_string`
The string marking the end of a comment. Defaults to ``'#}'``.
`line_statement_prefix`
If given and a string, this will be used as prefix for line based
statements. See also :ref:`line-statements`.
`line_comment_prefix`
If given and a string, this will be used as prefix for line based
based comments. See also :ref:`line-statements`.
.. versionadded:: 2.2
`trim_blocks`
If this is set to ``True`` the first newline after a block is
removed (block, not variable tag!). Defaults to `False`.
`lstrip_blocks`
If this is set to ``True`` leading spaces and tabs are stripped
from the start of a line to a block. Defaults to `False`.
`newline_sequence`
The sequence that starts a newline. Must be one of ``'\r'``,
``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
useful default for Linux and OS X systems as well as web
applications.
`keep_trailing_newline`
Preserve the trailing newline when rendering templates.
The default is ``False``, which causes a single newline,
if present, to be stripped from the end of the template.
.. versionadded:: 2.7
`extensions`
List of Jinja extensions to use. This can either be import paths
as strings or extension classes. For more information have a
look at :ref:`the extensions documentation <jinja-extensions>`.
`optimized`
should the optimizer be enabled? Default is `True`.
`undefined`
:class:`Undefined` or a subclass of it that is used to represent
undefined values in the template.
`finalize`
A callable that can be used to process the result of a variable
expression before it is output. For example one can convert
`None` implicitly into an empty string here.
`autoescape`
If set to true the XML/HTML autoescaping feature is enabled by
default. For more details about auto escaping see
:class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also
be a callable that is passed the template name and has to
return `True` or `False` depending on autoescape should be
enabled by default.
.. versionchanged:: 2.4
`autoescape` can now be a function
`loader`
The template loader for this environment.
`cache_size`
The size of the cache. Per default this is ``50`` which means
that if more than 50 templates are loaded the loader will clean
out the least recently used template. If the cache size is set to
``0`` templates are recompiled all the time, if the cache size is
``-1`` the cache will not be cleaned.
`auto_reload`
Some loaders load templates from locations where the template
sources may change (ie: file system or database). If
`auto_reload` is set to `True` (default) every time a template is
requested the loader checks if the source changed and if yes, it
will reload the template. For higher performance it's possible to
disable that.
`bytecode_cache`
If set to a bytecode cache object, this object will provide a
cache for the internal Jinja bytecode so that templates don't
have to be parsed if they were not changed.
See :ref:`bytecode-cache` for more information.
"""
#: if this environment is sandboxed. Modifying this variable won't make
#: the environment sandboxed though. For a real sandboxed environment
#: have a look at jinja2.sandbox. This flag alone controls the code
#: generation by the compiler.
sandboxed = False
#: True if the environment is just an overlay
overlayed = False
#: the environment this environment is linked to if it is an overlay
linked_to = None
#: shared environments have this set to `True`. A shared environment
#: must not be modified
shared = False
#: these are currently EXPERIMENTAL undocumented features.
exception_handler = None
exception_formatter = None
def __init__(self,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
lstrip_blocks=LSTRIP_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
keep_trailing_newline=KEEP_TRAILING_NEWLINE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False,
loader=None,
cache_size=50,
auto_reload=True,
bytecode_cache=None):
# !!Important notice!!
# The constructor accepts quite a few arguments that should be
# passed by keyword rather than position. However it's important to
# not change the order of arguments because it's used at least
# internally in those cases:
# - spontaneous environments (i18n extension and Template)
# - unittests
# If parameter changes are required only add parameters at the end
# and don't change the arguments (or the defaults!) of the arguments
# existing already.
# lexer / parser information
self.block_start_string = block_start_string
self.block_end_string = block_end_string
self.variable_start_string = variable_start_string
self.variable_end_string = variable_end_string
self.comment_start_string = comment_start_string
self.comment_end_string = comment_end_string
self.line_statement_prefix = line_statement_prefix
self.line_comment_prefix = line_comment_prefix
self.trim_blocks = trim_blocks
self.lstrip_blocks = lstrip_blocks
self.newline_sequence = newline_sequence
self.keep_trailing_newline = keep_trailing_newline
# runtime information
self.undefined = undefined
self.optimized = optimized
self.finalize = finalize
self.autoescape = autoescape
# defaults
self.filters = DEFAULT_FILTERS.copy()
self.tests = DEFAULT_TESTS.copy()
self.globals = DEFAULT_NAMESPACE.copy()
# set the loader provided
self.loader = loader
self.cache = create_cache(cache_size)
self.bytecode_cache = bytecode_cache
self.auto_reload = auto_reload
# load extensions
self.extensions = load_extensions(self, extensions)
_environment_sanity_check(self)
def add_extension(self, extension):
"""Adds an extension after the environment was created.
.. versionadded:: 2.5
"""
self.extensions.update(load_extensions(self, [extension]))
def extend(self, **attributes):
"""Add the items to the instance of the environment if they do not exist
yet. This is used by :ref:`extensions <writing-extensions>` to register
callbacks and configuration values without breaking inheritance.
"""
for key, value in iteritems(attributes):
if not hasattr(self, key):
setattr(self, key, value)
def overlay(self, block_start_string=missing, block_end_string=missing,
variable_start_string=missing, variable_end_string=missing,
comment_start_string=missing, comment_end_string=missing,
line_statement_prefix=missing, line_comment_prefix=missing,
trim_blocks=missing, lstrip_blocks=missing,
extensions=missing, optimized=missing,
undefined=missing, finalize=missing, autoescape=missing,
loader=missing, cache_size=missing, auto_reload=missing,
bytecode_cache=missing):
"""Create a new overlay environment that shares all the data with the
current environment except of cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
environment automatically gets all the extensions of the environment it
is linked to plus optional extra extensions.
Creating overlays should happen after the initial environment was set
up completely. Not all attributes are truly linked, some are just
copied over so modifications on the original environment may not shine
through.
"""
args = dict(locals())
del args['self'], args['cache_size'], args['extensions']
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.overlayed = True
rv.linked_to = self
for key, value in iteritems(args):
if value is not missing:
setattr(rv, key, value)
if cache_size is not missing:
rv.cache = create_cache(cache_size)
else:
rv.cache = copy_cache(self.cache)
rv.extensions = {}
for key, value in iteritems(self.extensions):
rv.extensions[key] = value.bind(rv)
if extensions is not missing:
rv.extensions.update(load_extensions(rv, extensions))
return _environment_sanity_check(rv)
lexer = property(get_lexer, doc="The lexer for this environment.")
def iter_extensions(self):
"""Iterates over the extensions by priority."""
return iter(sorted(self.extensions.values(),
key=lambda x: x.priority))
def getitem(self, obj, argument):
"""Get an item or attribute of an object but prefer the item."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
return getattr(obj, attr)
except AttributeError:
pass
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Get an item or attribute of an object but prefer the attribute.
Unlike :meth:`getitem` the attribute *must* be a bytestring.
"""
try:
return getattr(obj, attribute)
except AttributeError:
pass
try:
return obj[attribute]
except (TypeError, LookupError, AttributeError):
return self.undefined(obj=obj, name=attribute)
def call_filter(self, name, value, args=None, kwargs=None,
context=None, eval_ctx=None):
"""Invokes a filter on a value the same way the compiler does it.
.. versionadded:: 2.7
"""
func = self.filters.get(name)
if func is None:
raise TemplateRuntimeError('no filter named %r' % name)
args = [value] + list(args or ())
if getattr(func, 'contextfilter', False):
if context is None:
raise TemplateRuntimeError('Attempted to invoke context '
'filter without context')
args.insert(0, context)
elif getattr(func, 'evalcontextfilter', False):
if eval_ctx is None:
if context is not None:
eval_ctx = context.eval_ctx
else:
eval_ctx = EvalContext(self)
args.insert(0, eval_ctx)
elif getattr(func, 'environmentfilter', False):
args.insert(0, self)
return func(*args, **(kwargs or {}))
def call_test(self, name, value, args=None, kwargs=None):
"""Invokes a test on a value the same way the compiler does it.
.. versionadded:: 2.7
"""
func = self.tests.get(name)
if func is None:
raise TemplateRuntimeError('no test named %r' % name)
return func(value, *(args or ()), **(kwargs or {}))
@internalcode
def parse(self, source, name=None, filename=None):
"""Parse the sourcecode and return the abstract syntax tree. This
tree of nodes is used by the compiler to convert the template into
executable source- or bytecode. This is useful for debugging or to
extract information from templates.
If you are :ref:`developing Jinja2 extensions <writing-extensions>`
this gives you a good overview of the node tree generated.
"""
try:
return self._parse(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def _parse(self, source, name, filename):
"""Internal parsing function used by `parse` and `compile`."""
return Parser(self, source, name, encode_filename(filename)).parse()
def lex(self, source, name=None, filename=None):
"""Lex the given sourcecode and return a generator that yields
tokens as tuples in the form ``(lineno, token_type, value)``.
This can be useful for :ref:`extension development <writing-extensions>`
and debugging templates.
This does not perform preprocessing. If you want the preprocessing
of the extensions to be applied you have to filter source through
the :meth:`preprocess` method.
"""
source = text_type(source)
try:
return self.lexer.tokeniter(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def preprocess(self, source, name=None, filename=None):
"""Preprocesses the source with all extensions. This is automatically
called for all parsing and compiling methods but *not* for :meth:`lex`
because there you usually only want the actual source tokenized.
"""
return reduce(lambda s, e: e.preprocess(s, name, filename),
self.iter_extensions(), text_type(source))
def _tokenize(self, source, name, filename=None, state=None):
"""Called by the parser to do the preprocessing and filtering
for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
"""
source = self.preprocess(source, name, filename)
stream = self.lexer.tokenize(source, name, filename, state)
for ext in self.iter_extensions():
stream = ext.filter_stream(stream)
if not isinstance(stream, TokenStream):
stream = TokenStream(stream, name, filename)
return stream
def _generate(self, source, name, filename, defer_init=False):
"""Internal hook that can be overridden to hook a different generate
method in.
.. versionadded:: 2.5
"""
return generate(source, self, name, filename, defer_init=defer_init)
def _compile(self, source, filename):
"""Internal hook that can be overridden to hook a different compile
method in.
.. versionadded:: 2.5
"""
return compile(source, filename, 'exec')
@internalcode
def compile(self, source, name=None, filename=None, raw=False,
defer_init=False):
"""Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
the `filename` parameter is the estimated filename of the template on
the file system. If the template came from a database or memory this
can be omitted.
The return value of this method is a python code object. If the `raw`
parameter is `True` the return value will be a string with python
code equivalent to the bytecode returned otherwise. This method is
mainly used internally.
`defer_init` is use internally to aid the module code generator. This
causes the generated code to be able to import without the global
environment variable to be set.
.. versionadded:: 2.4
`defer_init` parameter added.
"""
source_hint = None
try:
if isinstance(source, string_types):
source_hint = source
source = self._parse(source, name, filename)
if self.optimized:
source = optimize(source, self)
source = self._generate(source, name, filename,
defer_init=defer_init)
if raw:
return source
if filename is None:
filename = '<template>'
else:
filename = encode_filename(filename)
return self._compile(source, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def compile_expression(self, source, undefined_to_none=True):
"""A handy helper method that returns a callable that accepts keyword
arguments that appear as variables in the expression. If called it
returns the result of the expression.
This is useful if applications want to use the same rules as Jinja
in template "configuration files" or similar situations.
Example usage:
>>> env = Environment()
>>> expr = env.compile_expression('foo == 42')
>>> expr(foo=23)
False
>>> expr(foo=42)
True
Per default the return value is converted to `None` if the
expression returns an undefined value. This can be changed
by setting `undefined_to_none` to `False`.
>>> env.compile_expression('var')() is None
True
>>> env.compile_expression('var', undefined_to_none=False)()
Undefined
.. versionadded:: 2.1
"""
parser = Parser(self, source, state='variable')
exc_info = None
try:
expr = parser.parse_expression()
if not parser.stream.eos:
raise TemplateSyntaxError('chunk after expression',
parser.stream.current.lineno,
None, None)
expr.set_environment(self)
except TemplateSyntaxError:
exc_info = sys.exc_info()
if exc_info is not None:
self.handle_exception(exc_info, source_hint=source)
body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
return TemplateExpression(template, undefined_to_none)
def compile_templates(self, target, extensions=None, filter_func=None,
zip='deflated', log_function=None,
ignore_errors=True, py_compile=False):
"""Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be will be stored in a directory.
By default a deflate zip algorithm is used, to switch to
the stored algorithm, `zip` can be set to ``'stored'``.
`extensions` and `filter_func` are passed to :meth:`list_templates`.
Each template returned will be compiled to the target folder or
zipfile.
By default template compilation errors are ignored. In case a
log function is provided, errors are logged. If you want template
syntax errors to abort the compilation you can set `ignore_errors`
to `False` and you will get an exception on syntax errors.
If `py_compile` is set to `True` .pyc files will be written to the
target instead of standard .py files. This flag does not do anything
on pypy and Python 3 where pyc files are not picked up by itself and
don't give much benefit.
.. versionadded:: 2.4
"""
from jinja2.loaders import ModuleLoader
if log_function is None:
log_function = lambda x: None
if py_compile:
if not PY2 or PYPY:
from warnings import warn
warn(Warning('py_compile has no effect on pypy or Python 3'))
py_compile = False
else:
import imp, marshal
py_header = imp.get_magic() + \
u'\xff\xff\xff\xff'.encode('iso-8859-15')
# Python 3.3 added a source filesize to the header
if sys.version_info >= (3, 3):
py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15')
def write_file(filename, data, mode):
if zip:
info = ZipInfo(filename)
info.external_attr = 0o755 << 16
zip_file.writestr(info, data)
else:
f = open(os.path.join(target, filename), mode)
try:
f.write(data)
finally:
f.close()
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
stored=ZIP_STORED)[zip])
log_function('Compiling into Zip archive "%s"' % target)
else:
if not os.path.isdir(target):
os.makedirs(target)
log_function('Compiling into folder "%s"' % target)
try:
for name in self.list_templates(extensions, filter_func):
source, filename, _ = self.loader.get_source(self, name)
try:
code = self.compile(source, name, filename, True, True)
except TemplateSyntaxError as e:
if not ignore_errors:
raise
log_function('Could not compile "%s": %s' % (name, e))
continue
filename = ModuleLoader.get_module_filename(name)
if py_compile:
c = self._compile(code, encode_filename(filename))
write_file(filename + 'c', py_header +
marshal.dumps(c), 'wb')
log_function('Byte-compiled "%s" as %s' %
(name, filename + 'c'))
else:
write_file(filename, code, 'w')
log_function('Compiled "%s" as %s' % (name, filename))
finally:
if zip:
zip_file.close()
log_function('Finished compiling templates')
def list_templates(self, extensions=None, filter_func=None):
"""Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
If there are other files in the template folder besides the
actual templates, the returned list can be filtered. There are two
ways: either `extensions` is set to a list of file extensions for
templates, or a `filter_func` can be provided which is a callable that
is passed a template name and should return `True` if it should end up
in the result list.
If the loader does not support that, a :exc:`TypeError` is raised.
.. versionadded:: 2.4
"""
x = self.loader.list_templates()
if extensions is not None:
if filter_func is not None:
raise TypeError('either extensions or filter_func '
'can be passed, but not both')
filter_func = lambda x: '.' in x and \
x.rsplit('.', 1)[1] in extensions
if filter_func is not None:
x = ifilter(filter_func, x)
return x
def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
global _make_traceback
if exc_info is None:
exc_info = sys.exc_info()
# the debugging module is imported when it's used for the first time.
# we're doing a lot of stuff there and for applications that do not
# get any exceptions in template rendering there is no need to load
# all of that.
if _make_traceback is None:
from jinja2.debug import make_traceback as _make_traceback
traceback = _make_traceback(exc_info, source_hint)
if rendered and self.exception_formatter is not None:
return self.exception_formatter(traceback)
if self.exception_handler is not None:
self.exception_handler(traceback)
exc_type, exc_value, tb = traceback.standard_exc_info
reraise(exc_type, exc_value, tb)
def join_path(self, template, parent):
"""Join a template with the parent. By default all the lookups are
relative to the loader root so this method returns the `template`
parameter unchanged, but if the paths should be relative to the
parent template, this function can be used to calculate the real
template name.
Subclasses may override this method and implement template path
joining here.
"""
return template
@internalcode
def _load_template(self, name, globals):
if self.loader is None:
raise TypeError('no loader for this environment specified')
if self.cache is not None:
template = self.cache.get(name)
if template is not None and (not self.auto_reload or \
template.is_up_to_date):
return template
template = self.loader.load(self, name, globals)
if self.cache is not None:
self.cache[name] = template
return template
@internalcode
def get_template(self, name, parent=None, globals=None):
"""Load a template from the loader. If a loader is configured this
method ask the loader for the template and returns a :class:`Template`.
If the `parent` parameter is not `None`, :meth:`join_path` is called
to get the real template name before loading.
The `globals` parameter can be used to provide template wide globals.
These variables are available in the context at render time.
If the template does not exist a :exc:`TemplateNotFound` exception is
raised.
.. versionchanged:: 2.4
If `name` is a :class:`Template` object it is returned from the
function unchanged.
"""
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
return self._load_template(name, self.make_globals(globals))
@internalcode
def select_template(self, names, parent=None, globals=None):
"""Works like :meth:`get_template` but tries a number of templates
before it fails. If it cannot find any of the templates, it will
raise a :exc:`TemplatesNotFound` exception.
.. versionadded:: 2.3
.. versionchanged:: 2.4
If `names` contains a :class:`Template` object it is returned
from the function unchanged.
"""
if not names:
raise TemplatesNotFound(message=u'Tried to select from an empty list '
u'of templates.')
globals = self.make_globals(globals)
for name in names:
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
try:
return self._load_template(name, globals)
except TemplateNotFound:
pass
raise TemplatesNotFound(names)
@internalcode
def get_or_select_template(self, template_name_or_list,
parent=None, globals=None):
"""Does a typecheck and dispatches to :meth:`select_template`
if an iterable of template names is given, otherwise to
:meth:`get_template`.
.. versionadded:: 2.3
"""
if isinstance(template_name_or_list, string_types):
return self.get_template(template_name_or_list, parent, globals)
elif isinstance(template_name_or_list, Template):
return template_name_or_list
return self.select_template(template_name_or_list, parent, globals)
def from_string(self, source, globals=None, template_class=None):
"""Load a template from a string. This parses the source given and
returns a :class:`Template` object.
"""
globals = self.make_globals(globals)
cls = template_class or self.template_class
return cls.from_code(self, self.compile(source), globals, None)
def make_globals(self, d):
"""Return a dict for the globals."""
if not d:
return self.globals
return dict(self.globals, **d)
class Template(object):
"""The central template object. This class represents a compiled template
and is used to evaluate it.
Normally the template object is generated from an :class:`Environment` but
it also has a constructor that makes it possible to create a template
instance directly using the constructor. It takes the same arguments as
the environment constructor but it's not possible to specify a loader.
Every template object has a few methods and members that are guaranteed
to exist. However it's important that a template object should be
considered immutable. Modifications on the object are not supported.
Template objects created from the constructor rather than an environment
do have an `environment` attribute that points to a temporary environment
that is probably shared with other templates created with the constructor
and compatible settings.
>>> template = Template('Hello {{ name }}!')
>>> template.render(name='John Doe')
u'Hello John Doe!'
>>> stream = template.stream(name='John Doe')
>>> stream.next()
u'Hello John Doe!'
>>> stream.next()
Traceback (most recent call last):
...
StopIteration
"""
def __new__(cls, source,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
lstrip_blocks=LSTRIP_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
keep_trailing_newline=KEEP_TRAILING_NEWLINE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False):
env = get_spontaneous_environment(
block_start_string, block_end_string, variable_start_string,
variable_end_string, comment_start_string, comment_end_string,
line_statement_prefix, line_comment_prefix, trim_blocks,
lstrip_blocks, newline_sequence, keep_trailing_newline,
frozenset(extensions), optimized, undefined, finalize, autoescape,
None, 0, False, None)
return env.from_string(source, template_class=cls)
@classmethod
def from_code(cls, environment, code, globals, uptodate=None):
"""Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
"""
namespace = {
'environment': environment,
'__file__': code.co_filename
}
exec(code, namespace)
rv = cls._from_namespace(environment, namespace, globals)
rv._uptodate = uptodate
return rv
@classmethod
def from_module_dict(cls, environment, module_dict, globals):
"""Creates a template object from a module. This is used by the
module loader to create a template object.
.. versionadded:: 2.4
"""
return cls._from_namespace(environment, module_dict, globals)
@classmethod
def _from_namespace(cls, environment, namespace, globals):
t = object.__new__(cls)
t.environment = environment
t.globals = globals
t.name = namespace['name']
t.filename = namespace['__file__']
t.blocks = namespace['blocks']
# render function and module
t.root_render_func = namespace['root']
t._module = None
# debug and loader helpers
t._debug_info = namespace['debug_info']
t._uptodate = None
# store the reference
namespace['environment'] = environment
namespace['__jinja_template__'] = t
return t
def render(self, *args, **kwargs):
"""This method accepts the same arguments as the `dict` constructor:
A dict, a dict subclass or some keyword arguments. If no arguments
are given the context will be empty. These two calls do the same::
template.render(knights='that say nih')
template.render({'knights': 'that say nih'})
This will return the rendered template as unicode string.
"""
vars = dict(*args, **kwargs)
try:
return concat(self.root_render_func(self.new_context(vars)))
except Exception:
exc_info = sys.exc_info()
return self.environment.handle_exception(exc_info, True)
def stream(self, *args, **kwargs):
"""Works exactly like :meth:`generate` but returns a
:class:`TemplateStream`.
"""
return TemplateStream(self.generate(*args, **kwargs))
def generate(self, *args, **kwargs):
"""For very large templates it can be useful to not render the whole
template at once but evaluate each statement after another and yield
piece for piece. This method basically does exactly that and returns
a generator that yields one item after another as unicode strings.
It accepts the same arguments as :meth:`render`.
"""
vars = dict(*args, **kwargs)
try:
for event in self.root_render_func(self.new_context(vars)):
yield event
except Exception:
exc_info = sys.exc_info()
else:
return
yield self.environment.handle_exception(exc_info, True)
def new_context(self, vars=None, shared=False, locals=None):
"""Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
is passed as it to the context without adding the globals.
`locals` can be a dict of local variables for internal usage.
"""
return new_context(self.environment, self.name, self.blocks,
vars, shared, self.globals, locals)
def make_module(self, vars=None, shared=False, locals=None):
"""This method works like the :attr:`module` attribute when called
without arguments but it will evaluate the template on every call
rather than caching it. It's also possible to provide
a dict which is then used as context. The arguments are the same
as for the :meth:`new_context` method.
"""
return TemplateModule(self, self.new_context(vars, shared, locals))
@property
def module(self):
"""The template as module. This is used for imports in the
template runtime but is also useful if one wants to access
exported template variables from the Python layer:
>>> t = Template('{% macro foo() %}42{% endmacro %}23')
>>> unicode(t.module)
u'23'
>>> t.module.foo()
u'42'
"""
if self._module is not None:
return self._module
self._module = rv = self.make_module()
return rv
def get_corresponding_lineno(self, lineno):
"""Return the source line number of a line number in the
generated bytecode as they are not in sync.
"""
for template_line, code_line in reversed(self.debug_info):
if code_line <= lineno:
return template_line
return 1
@property
def is_up_to_date(self):
"""If this variable is `False` there is a newer version available."""
if self._uptodate is None:
return True
return self._uptodate()
@property
def debug_info(self):
"""The debug info mapping."""
return [tuple(imap(int, x.split('='))) for x in
self._debug_info.split('&')]
def __repr__(self):
if self.name is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.name)
return '<%s %s>' % (self.__class__.__name__, name)
@implements_to_string
class TemplateModule(object):
"""Represents an imported template. All the exported names of the
template are available as attributes on this object. Additionally
converting it into an unicode- or bytestrings renders the contents.
"""
def __init__(self, template, context):
self._body_stream = list(template.root_render_func(context))
self.__dict__.update(context.get_exported())
self.__name__ = template.name
def __html__(self):
return Markup(concat(self._body_stream))
def __str__(self):
return concat(self._body_stream)
def __repr__(self):
if self.__name__ is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.__name__)
return '<%s %s>' % (self.__class__.__name__, name)
class TemplateExpression(object):
"""The :meth:`jinja2.Environment.compile_expression` method returns an
instance of this object. It encapsulates the expression-like access
to the template with an expression it wraps.
"""
def __init__(self, template, undefined_to_none):
self._template = template
self._undefined_to_none = undefined_to_none
def __call__(self, *args, **kwargs):
context = self._template.new_context(dict(*args, **kwargs))
consume(self._template.root_render_func(context))
rv = context.vars['result']
if self._undefined_to_none and isinstance(rv, Undefined):
rv = None
return rv
@implements_iterator
class TemplateStream(object):
"""A template stream works pretty much like an ordinary python generator
but it can buffer multiple items to reduce the number of total iterations.
Per default the output is unbuffered which means that for every unbuffered
instruction in the template one unicode string is yielded.
If buffering is enabled with a buffer size of 5, five items are combined
into a new unicode string. This is mainly useful if you are streaming
big templates to a client via WSGI which flushes after each iteration.
"""
def __init__(self, gen):
self._gen = gen
self.disable_buffering()
def dump(self, fp, encoding=None, errors='strict'):
"""Dump the complete stream into a file or file-like object.
Per default unicode strings are written, if you want to encode
before writing specify an `encoding`.
Example usage::
Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
"""
close = False
if isinstance(fp, string_types):
fp = open(fp, encoding is None and 'w' or 'wb')
close = True
try:
if encoding is not None:
iterable = (x.encode(encoding, errors) for x in self)
else:
iterable = self
if hasattr(fp, 'writelines'):
fp.writelines(iterable)
else:
for item in iterable:
fp.write(item)
finally:
if close:
fp.close()
def disable_buffering(self):
"""Disable the output buffering."""
self._next = get_next(self._gen)
self.buffered = False
def enable_buffering(self, size=5):
"""Enable buffering. Buffer `size` items before yielding them."""
if size <= 1:
raise ValueError('buffer size too small')
def generator(next):
buf = []
c_size = 0
push = buf.append
while 1:
try:
while c_size < size:
c = next()
push(c)
if c:
c_size += 1
except StopIteration:
if not c_size:
return
yield concat(buf)
del buf[:]
c_size = 0
self.buffered = True
self._next = get_next(generator(get_next(self._gen)))
def __iter__(self):
return self
def __next__(self):
return self._next()
# hook in default template class. if anyone reads this comment: ignore that
# it's possible to use custom templates ;-)
Environment.template_class = Template
|
mit
|
egonw/citeulike
|
plugins/python/worldcat.py
|
2
|
1451
|
#!/usr/bin/env python
import re, sys, urlparse, urllib2
from cultools import urlparams, bail
import socket
socket.setdefaulttimeout(15)
#
# Read URL from stdin and check it's OK
#
url = sys.stdin.readline().strip()
oclc_match = re.search(r'/(oclc|isbn)/([0-9\-]+)', url, re.IGNORECASE)
if not oclc_match:
bail("Couldn't find either an 'oclc' or 'isbn' in the URL (" + url + ")")
type = oclc_match.group(1)
id = oclc_match.group(2)
#
# Fetch the page - don't need it, but it validates the URL the user posted
#
try:
page = urllib2.urlopen(url).read().strip()
except:
bail("Couldn't fetch page (" + url + ")")
if (type == "isbn"):
isbn = id
m = re.search(r'/oclc/(\d+)', page)
if m:
oclc = m.group(1)
else:
bail("Couldn't locate OCLC on page "+url)
else:
oclc = id
m = re.search(r'rft.isbn=([\w\-]+)', page)
if m:
isbn = m.group(1)
else:
isbn = ""
#
# Fetch the RIS file
#
ris_file_url = 'http://www.worldcat.org/oclc/%s?page=endnote' % oclc
try:
ris_file = urllib2.urlopen(ris_file_url).read()
except:
bail("Could not fetch RIS file (" + ris_file_url + ")")
# print ris_file
if not re.search(r'TY\s{1,4}-', ris_file):
bail("RIS file doesn't have a 'TY -'")
print "begin_tsv"
print "linkout\tWCAT\t\t%s\t\t" % oclc
if isbn != "":
isbn = re.sub('-','',isbn)
print "linkout\tISBN\t\t%s\t\t" % isbn
print "isbn\t%s" % isbn
print "end_tsv"
print "begin_ris"
print "%s" % (ris_file)
print "end_ris"
print "status\tok"
|
bsd-3-clause
|
GeorgeJahad/graphite-api
|
tests/test_http.py
|
14
|
1174
|
from . import TestCase
class HttpTestCase(TestCase):
def test_cors(self):
response = self.app.options('/render')
self.assertFalse(
'Access-Control-Allow-Origin' in response.headers.keys())
response = self.app.options('/render', headers=(
('Origin', 'https://example.com'),
))
self.assertEqual(response.headers['Access-Control-Allow-Origin'],
'https://example.com')
response = self.app.options('/render', headers=(
('Origin', 'http://foo.example.com:8888'),
))
self.assertEqual(response.headers['Access-Control-Allow-Origin'],
'http://foo.example.com:8888')
response = self.app.options('/', headers=(
('Origin', 'http://foo.example.com'),
))
self.assertFalse(
'Access-Control-Allow-Origin' in response.headers.keys())
def test_trailing_slash(self):
response = self.app.get('/render?target=foo')
self.assertEqual(response.status_code, 200)
response = self.app.get('/render/?target=foo')
self.assertEqual(response.status_code, 200)
|
apache-2.0
|
bblacey/FreeCAD-MacOS-CI
|
src/Mod/Path/PathScripts/PathAreaUtils.py
|
9
|
15642
|
import area
from nc.nc import *
import PathScripts.nc.iso
import math
import PathKurveUtils
# some globals, to save passing variables as parameters too much
area_for_feed_possible = None
tool_radius_for_pocket = None
def cut_curve(curve, need_rapid, p, rapid_safety_space, current_start_depth, final_depth):
prev_p = p
first = True
#comment("cut_curve:14 rss:" + str(rapid_safety_space) + " current start depth :" + str(current_start_depth) + " final depth :" + str(final_depth) + " need rapid: " + str(need_rapid))
for vertex in curve.getVertices():
if need_rapid and first:
# rapid across
rapid(vertex.p.x, vertex.p.y)
##rapid down
rapid(z = current_start_depth + rapid_safety_space)
#feed down
feed(z = final_depth)
first = False
else:
if vertex.type == 1:
arc_ccw(vertex.p.x, vertex.p.y, i = vertex.c.x, j = vertex.c.y)
elif vertex.type == -1:
arc_cw(vertex.p.x, vertex.p.y, i = vertex.c.x, j = vertex.c.y)
else:
feed(vertex.p.x, vertex.p.y)
prev_p = vertex.p
return prev_p
def area_distance(a, old_area):
best_dist = None
for curve in a.getCurves():
for vertex in curve.getVertices():
c = old_area.NearestPoint(vertex.p)
d = c.dist(vertex.p)
if best_dist == None or d < best_dist:
best_dist = d
for curve in old_area.getCurves():
for vertex in curve.getVertices():
c = a.NearestPoint(vertex.p)
d = c.dist(vertex.p)
if best_dist == None or d < best_dist:
best_dist = d
return best_dist
def make_obround(p0, p1, radius):
dir = p1 - p0
d = dir.length()
dir.normalize()
right = area.Point(dir.y, -dir.x)
obround = area.Area()
c = area.Curve()
vt0 = p0 + right * radius
vt1 = p1 + right * radius
vt2 = p1 - right * radius
vt3 = p0 - right * radius
c.append(area.Vertex(0, vt0, area.Point(0, 0)))
c.append(area.Vertex(0, vt1, area.Point(0, 0)))
c.append(area.Vertex(1, vt2, p1))
c.append(area.Vertex(0, vt3, area.Point(0, 0)))
c.append(area.Vertex(1, vt0, p0))
obround.append(c)
return obround
def feed_possible(p0, p1):
if p0 == p1:
return True
obround = make_obround(p0, p1, tool_radius_for_pocket)
a = area.Area(area_for_feed_possible)
obround.Subtract(a)
if obround.num_curves() > 0:
return False
return True
def cut_curvelist1(curve_list, rapid_safety_space, current_start_depth, depth, clearance_height, keep_tool_down_if_poss):
p = area.Point(0, 0)
first = True
for curve in curve_list:
need_rapid = True
if first == False:
s = curve.FirstVertex().p
if keep_tool_down_if_poss == True:
# see if we can feed across
if feed_possible(p, s):
need_rapid = False
elif s.x == p.x and s.y == p.y:
need_rapid = False
if need_rapid:
rapid(z = clearance_height)
p = cut_curve(curve, need_rapid, p, rapid_safety_space, current_start_depth, depth)
first = False
rapid(z = clearance_height)
def cut_curvelist2(curve_list, rapid_safety_space, current_start_depth, depth, clearance_height, keep_tool_down_if_poss,start_point):
p = area.Point(0, 0)
start_x,start_y=start_point
first = True
for curve in curve_list:
need_rapid = True
if first == True:
direction = "On";radius = 0.0;offset_extra = 0.0; roll_radius = 0.0;roll_on = 0.0; roll_off = 0.0; rapid_safety_space; step_down = math.fabs(depth);extend_at_start = 0.0;extend_at_end = 0.0
PathKurveUtils.make_smaller( curve, start = area.Point(start_x,start_y))
PathKurveUtils.profile(curve, direction, radius , offset_extra, roll_radius, roll_on, roll_off, rapid_safety_space , clearance_height, current_start_depth, step_down , depth, extend_at_start, extend_at_end)
else:
s = curve.FirstVertex().p
if keep_tool_down_if_poss == True:
# see if we can feed across
if feed_possible(p, s):
need_rapid = False
elif s.x == p.x and s.y == p.y:
need_rapid = False
cut_curve(curve, need_rapid, p, rapid_safety_space, current_start_depth, depth)
first = False #change to True if you want to rapid back to start side before zigging again with unidirectional set
rapid(z = clearance_height)
def recur(arealist, a1, stepover, from_center):
# this makes arealist by recursively offsetting a1 inwards
if a1.num_curves() == 0:
return
if from_center:
arealist.insert(0, a1)
else:
arealist.append(a1)
a_offset = area.Area(a1)
a_offset.Offset(stepover)
# split curves into new areas
if area.holes_linked():
for curve in a_offset.getCurves():
a2 = area.Area()
a2.append(curve)
recur(arealist, a2, stepover, from_center)
else:
# split curves into new areas
a_offset.Reorder()
a2 = None
for curve in a_offset.getCurves():
if curve.IsClockwise():
if a2 != None:
a2.append(curve)
else:
if a2 != None:
recur(arealist, a2, stepover, from_center)
a2 = area.Area()
a2.append(curve)
if a2 != None:
recur(arealist, a2, stepover, from_center)
def get_curve_list(arealist, reverse_curves = False):
curve_list = list()
for a in arealist:
for curve in a.getCurves():
if reverse_curves == True:
curve.Reverse()
curve_list.append(curve)
return curve_list
curve_list_for_zigs = []
rightward_for_zigs = True
sin_angle_for_zigs = 0.0
cos_angle_for_zigs = 1.0
sin_minus_angle_for_zigs = 0.0
cos_minus_angle_for_zigs = 1.0
one_over_units = 1.0
def make_zig_curve(curve, y0, y, zig_unidirectional):
if rightward_for_zigs:
curve.Reverse()
# find a high point to start looking from
high_point = None
for vertex in curve.getVertices():
if high_point == None:
high_point = vertex.p
elif vertex.p.y > high_point.y:
# use this as the new high point
high_point = vertex.p
elif math.fabs(vertex.p.y - high_point.y) < 0.002 * one_over_units:
# equal high point
if rightward_for_zigs:
# use the furthest left point
if vertex.p.x < high_point.x:
high_point = vertex.p
else:
# use the furthest right point
if vertex.p.x > high_point.x:
high_point = vertex.p
zig = area.Curve()
high_point_found = False
zig_started = False
zag_found = False
for i in range(0, 2): # process the curve twice because we don't know where it will start
prev_p = None
for vertex in curve.getVertices():
if zag_found: break
if prev_p != None:
if zig_started:
zig.append(unrotated_vertex(vertex))
if math.fabs(vertex.p.y - y) < 0.002 * one_over_units:
zag_found = True
break
elif high_point_found:
if math.fabs(vertex.p.y - y0) < 0.002 * one_over_units:
if zig_started:
zig.append(unrotated_vertex(vertex))
elif math.fabs(prev_p.y - y0) < 0.002 * one_over_units and vertex.type == 0:
zig.append(area.Vertex(0, unrotated_point(prev_p), area.Point(0, 0)))
zig.append(unrotated_vertex(vertex))
zig_started = True
elif vertex.p.x == high_point.x and vertex.p.y == high_point.y:
high_point_found = True
prev_p = vertex.p
if zig_started:
if zig_unidirectional == True:
# remove the last bit of zig
if math.fabs(zig.LastVertex().p.y - y) < 0.002 * one_over_units:
vertices = zig.getVertices()
while len(vertices) > 0:
v = vertices[len(vertices)-1]
if math.fabs(v.p.y - y0) < 0.002 * one_over_units:
break
else:
vertices.pop()
zig = area.Curve()
for v in vertices:
zig.append(v)
curve_list_for_zigs.append(zig)
def make_zig(a, y0, y, zig_unidirectional):
for curve in a.getCurves():
make_zig_curve(curve, y0, y, zig_unidirectional)
reorder_zig_list_list = []
def add_reorder_zig(curve):
global reorder_zig_list_list
# look in existing lists
s = curve.FirstVertex().p
for curve_list in reorder_zig_list_list:
last_curve = curve_list[len(curve_list) - 1]
e = last_curve.LastVertex().p
if math.fabs(s.x - e.x) < 0.002 * one_over_units and math.fabs(s.y - e.y) < 0.002 * one_over_units:
curve_list.append(curve)
return
# else add a new list
curve_list = []
curve_list.append(curve)
reorder_zig_list_list.append(curve_list)
def reorder_zigs():
global curve_list_for_zigs
global reorder_zig_list_list
reorder_zig_list_list = []
for curve in curve_list_for_zigs:
add_reorder_zig(curve)
curve_list_for_zigs = []
for curve_list in reorder_zig_list_list:
for curve in curve_list:
curve_list_for_zigs.append(curve)
def rotated_point(p):
return area.Point(p.x * cos_angle_for_zigs - p.y * sin_angle_for_zigs, p.x * sin_angle_for_zigs + p.y * cos_angle_for_zigs)
def unrotated_point(p):
return area.Point(p.x * cos_minus_angle_for_zigs - p.y * sin_minus_angle_for_zigs, p.x * sin_minus_angle_for_zigs + p.y * cos_minus_angle_for_zigs)
def rotated_vertex(v):
if v.type:
return area.Vertex(v.type, rotated_point(v.p), rotated_point(v.c))
return area.Vertex(v.type, rotated_point(v.p), area.Point(0, 0))
def unrotated_vertex(v):
if v.type:
return area.Vertex(v.type, unrotated_point(v.p), unrotated_point(v.c))
return area.Vertex(v.type, unrotated_point(v.p), area.Point(0, 0))
def rotated_area(a):
an = area.Area()
for curve in a.getCurves():
curve_new = area.Curve()
for v in curve.getVertices():
curve_new.append(rotated_vertex(v))
an.append(curve_new)
return an
def zigzag(a, stepover, zig_unidirectional):
if a.num_curves() == 0:
return
global rightward_for_zigs
global curve_list_for_zigs
global sin_angle_for_zigs
global cos_angle_for_zigs
global sin_minus_angle_for_zigs
global cos_minus_angle_for_zigs
global one_over_units
one_over_units = 1 / area.get_units()
a = rotated_area(a)
b = area.Box()
a.GetBox(b)
x0 = b.MinX() - 1.0
x1 = b.MaxX() + 1.0
height = b.MaxY() - b.MinY()
num_steps = int(height / stepover + 1)
y = b.MinY() + 0.1 * one_over_units
null_point = area.Point(0, 0)
rightward_for_zigs = True
curve_list_for_zigs = []
for i in range(0, num_steps):
y0 = y
y = y + stepover
p0 = area.Point(x0, y0)
p1 = area.Point(x0, y)
p2 = area.Point(x1, y)
p3 = area.Point(x1, y0)
c = area.Curve()
c.append(area.Vertex(0, p0, null_point, 0))
c.append(area.Vertex(0, p1, null_point, 0))
c.append(area.Vertex(0, p2, null_point, 1))
c.append(area.Vertex(0, p3, null_point, 0))
c.append(area.Vertex(0, p0, null_point, 1))
a2 = area.Area()
a2.append(c)
a2.Intersect(a)
make_zig(a2, y0, y, zig_unidirectional)
if zig_unidirectional == False:
rightward_for_zigs = (rightward_for_zigs == False)
reorder_zigs()
def pocket(a,tool_radius, extra_offset, stepover, depthparams, from_center, keep_tool_down_if_poss, use_zig_zag, zig_angle, zig_unidirectional = False,start_point=None, cut_mode = 'conventional'):
global tool_radius_for_pocket
global area_for_feed_possible
#if len(a.getCurves()) > 1:
# for crv in a.getCurves():
# ar = area.Area()
# ar.append(crv)
# pocket(ar, tool_radius, extra_offset, rapid_safety_space, start_depth, final_depth, stepover, stepdown, clearance_height, from_center, keep_tool_down_if_poss, use_zig_zag, zig_angle, zig_unidirectional)
# return
tool_radius_for_pocket = tool_radius
if keep_tool_down_if_poss:
area_for_feed_possible = area.Area(a)
area_for_feed_possible.Offset(extra_offset - 0.01)
use_internal_function = False #(area.holes_linked() == False) # use internal function, if area module is the Clipper library
if use_internal_function:
curve_list = a.MakePocketToolpath(tool_radius, extra_offset, stepover, from_center, use_zig_zag, zig_angle)
else:
global sin_angle_for_zigs
global cos_angle_for_zigs
global sin_minus_angle_for_zigs
global cos_minus_angle_for_zigs
radians_angle = zig_angle * math.pi / 180
sin_angle_for_zigs = math.sin(-radians_angle)
cos_angle_for_zigs = math.cos(-radians_angle)
sin_minus_angle_for_zigs = math.sin(radians_angle)
cos_minus_angle_for_zigs = math.cos(radians_angle)
arealist = list()
a_offset = area.Area(a)
current_offset = tool_radius + extra_offset
a_offset.Offset(current_offset)
do_recursive = True
if use_zig_zag:
zigzag(a_offset, stepover, zig_unidirectional)
curve_list = curve_list_for_zigs
else:
if do_recursive:
recur(arealist, a_offset, stepover, from_center)
else:
while(a_offset.num_curves() > 0):
if from_center:
arealist.insert(0, a_offset)
else:
arealist.append(a_offset)
current_offset = current_offset + stepover
a_offset = area.Area(a)
a_offset.Offset(current_offset)
curve_list = get_curve_list(arealist, cut_mode == 'climb')
depths = depthparams.get_depths()
current_start_depth = depthparams.start_depth
if start_point==None:
for depth in depths:
cut_curvelist1(curve_list, depthparams.rapid_safety_space, current_start_depth, depth, depthparams.clearance_height, keep_tool_down_if_poss)
current_start_depth = depth
else:
for depth in depths:
cut_curvelist2(curve_list, depthparams.rapid_safety_space, current_start_depth, depth, depthparams.clearance_height, keep_tool_down_if_poss, start_point)
current_start_depth = depth
|
lgpl-2.1
|
serialx/pexpect-u
|
tools/testall.py
|
3
|
1975
|
#!/usr/bin/env python
'''This script runs all tests in a directory.
It does not need to know about the tests ahead of time.
It recursively descends from the current directory and
automatically builds up a list of tests to run.
Only directories named 'tests' are processed.
The path to each 'tests' directory is added to the PYTHONPATH.
Only python scripts that start with 'test_' are added to
the list of scripts in the test suite.
Noah Spurrier
'''
import unittest
import os, os.path
import sys
import pexpect
print "Testing pexpect version:", pexpect.__version__
print "Testing pexpect revision:", pexpect.__revision__
def add_tests_to_list (import_list, dirname, names):
# Only check directories named 'tests'.
if os.path.basename(dirname) != 'tests':
return
# Add any files that start with 'test_' and end with '.py'.
for f in names:
filename, ext = os.path.splitext(f)
if ext != '.py':
continue
if filename.find('test_') == 0:
import_list.append (os.path.join(dirname, filename))
def find_modules_and_add_paths (root_path):
import_list = []
module_list = []
os.path.walk (root_path, add_tests_to_list, import_list)
for module_file in import_list:
path, module = os.path.split(module_file)
module_list.append (module)
print 'Adding:', module_file
if not path in sys.path:
sys.path.append (path)
if not os.path.dirname(path) in sys.path:
sys.path.append (os.path.dirname(path))
module_list.sort()
return module_list
def suite():
modules_to_test = find_modules_and_add_paths (os.getcwd())
alltests = unittest.TestSuite()
for module in map(__import__, modules_to_test):
alltests.addTest(unittest.findTestCases(module))
return alltests
if __name__ == '__main__':
unittest.main(defaultTest='suite')
# s = all()
# runner = unittest.TextTestRunner()
# runner.run (s)
|
mit
|
mtanski/linux-fs
|
tools/perf/scripts/python/futex-contention.py
|
1997
|
1508
|
# futex contention
# (c) 2010, Arnaldo Carvalho de Melo <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
|
gpl-2.0
|
g-stream/g-stream
|
tests/test_user_model.py
|
5
|
3984
|
import unittest
import time
from app import create_app, db
from app.models import User, AnonymousUser, Role, Permission
class UserModelTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
Role.insert_roles()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_password_setter(self):
u = User(password='cat')
self.assertTrue(u.password_hash is not None)
def test_no_password_getter(self):
u = User(password='cat')
with self.assertRaises(AttributeError):
u.password
def test_password_verification(self):
u = User(password='cat')
self.assertTrue(u.verify_password('cat'))
self.assertFalse(u.verify_password('dog'))
def test_password_salts_are_random(self):
u = User(password='cat')
u2 = User(password='cat')
self.assertTrue(u.password_hash != u2.password_hash)
def test_valid_confirmation_token(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_confirmation_token()
self.assertTrue(u.confirm(token))
def test_invalid_confirmation_token(self):
u1 = User(password='cat')
u2 = User(password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u1.generate_confirmation_token()
self.assertFalse(u2.confirm(token))
def test_expired_confirmation_token(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_confirmation_token(1)
time.sleep(2)
self.assertFalse(u.confirm(token))
def test_valid_reset_token(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_reset_token()
self.assertTrue(u.reset_password(token, 'dog'))
self.assertTrue(u.verify_password('dog'))
def test_invalid_reset_token(self):
u1 = User(password='cat')
u2 = User(password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u1.generate_reset_token()
self.assertFalse(u2.reset_password(token, 'horse'))
self.assertTrue(u2.verify_password('dog'))
def test_valid_email_change_token(self):
u = User(email='[email protected]', password='cat')
db.session.add(u)
db.session.commit()
token = u.generate_email_change_token('[email protected]')
self.assertTrue(u.change_email(token))
self.assertTrue(u.email == '[email protected]')
def test_invalid_email_change_token(self):
u1 = User(email='[email protected]', password='cat')
u2 = User(email='[email protected]', password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u1.generate_email_change_token('[email protected]')
self.assertFalse(u2.change_email(token))
self.assertTrue(u2.email == '[email protected]')
def test_duplicate_email_change_token(self):
u1 = User(email='[email protected]', password='cat')
u2 = User(email='[email protected]', password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
token = u2.generate_email_change_token('[email protected]')
self.assertFalse(u2.change_email(token))
self.assertTrue(u2.email == '[email protected]')
def test_roles_and_permissions(self):
u = User(email='[email protected]', password='cat')
self.assertTrue(u.can(Permission.WRITE_ARTICLES))
self.assertFalse(u.can(Permission.MODERATE_COMMENTS))
def test_anonymous_user(self):
u = AnonymousUser()
self.assertFalse(u.can(Permission.FOLLOW))
|
mit
|
treycausey/scikit-learn
|
sklearn/__check_build/__init__.py
|
30
|
1669
|
""" Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
|
bsd-3-clause
|
awatts/boto
|
tests/integration/ec2/autoscale/test_cert_verification.py
|
126
|
1575
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.ec2.autoscale
class AutoscaleCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
autoscale = True
regions = boto.ec2.autoscale.regions()
def sample_service_call(self, conn):
conn.get_all_groups()
|
mit
|
stasiek/robotframework
|
src/robot/reporting/resultwriter.py
|
18
|
5933
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.conf import RebotSettings
from robot.errors import DataError
from robot.model import ModelModifier
from robot.output import LOGGER
from robot.result import ExecutionResult, Result
from robot.utils import unic
from .jsmodelbuilders import JsModelBuilder
from .logreportwriters import LogWriter, ReportWriter
from .xunitwriter import XUnitWriter
class ResultWriter(object):
"""A class to create log, report, output XML and xUnit files.
:param sources: Either one :class:`~robot.result.executionresult.Result`
object, or one or more paths to existing output XML files.
By default writes ``report.html`` and ``log.html``, but no output XML
or xUnit files. Custom file names can be given and results disabled
or enabled using ``settings`` or ``options`` passed to the
:meth:`write_results` method. The latter is typically more convenient::
writer = ResultWriter(result)
writer.write_results(report='custom.html', log=None, xunit='xunit.xml')
"""
def __init__(self, *sources):
self._sources = sources
def write_results(self, settings=None, **options):
"""Writes results based on the given ``settings`` or ``options``.
:param settings: :class:`~robot.conf.settings.RebotSettings` object
to configure result writing.
:param options: Used to construct new
:class:`~robot.conf.settings.RebotSettings` object if ``settings``
are not given.
"""
settings = settings or RebotSettings(options)
results = Results(settings, *self._sources)
if settings.output:
self._write_output(results.result, settings.output)
if settings.xunit:
self._write_xunit(results.result, settings.xunit,
settings.xunit_skip_noncritical)
if settings.log:
config = dict(settings.log_config,
minLevel=results.js_result.min_level)
self._write_log(results.js_result, settings.log, config)
if settings.report:
results.js_result.remove_data_not_needed_in_report()
self._write_report(results.js_result, settings.report,
settings.report_config)
return results.return_code
def _write_output(self, result, path):
self._write('Output', result.save, path)
def _write_xunit(self, result, path, skip_noncritical):
self._write('XUnit', XUnitWriter(result, skip_noncritical).write, path)
def _write_log(self, js_result, path, config):
self._write('Log', LogWriter(js_result).write, path, config)
def _write_report(self, js_result, path, config):
self._write('Report', ReportWriter(js_result).write, path, config)
def _write(self, name, writer, path, *args):
try:
writer(path, *args)
except DataError as err:
LOGGER.error(unicode(err))
except EnvironmentError as err:
# `err.filename` can be different than `path` at least if reading
# log/report templates or writing split log fails.
# `unic` is needed due to http://bugs.jython.org/issue1825.
LOGGER.error("Writing %s file '%s' failed: %s: %s" %
(name.lower(), path, err.strerror, unic(err.filename)))
else:
LOGGER.output_file(name, path)
class Results(object):
def __init__(self, settings, *sources):
self._settings = settings
self._sources = sources
if len(sources) == 1 and isinstance(sources[0], Result):
self._result = sources[0]
self._prune = False
self.return_code = self._result.return_code
else:
self._result = None
self._prune = True
self.return_code = -1
self._js_result = None
@property
def result(self):
if self._result is None:
include_keywords = bool(self._settings.log or self._settings.output)
flattened = self._settings.flatten_keywords
self._result = ExecutionResult(include_keywords=include_keywords,
flattened_keywords=flattened,
merge=self._settings.merge,
*self._sources)
self._result.configure(self._settings.status_rc,
self._settings.suite_config,
self._settings.statistics_config)
modifier = ModelModifier(self._settings.pre_rebot_modifiers,
self._settings.process_empty_suite,
LOGGER)
self._result.suite.visit(modifier)
self.return_code = self._result.return_code
return self._result
@property
def js_result(self):
if self._js_result is None:
builder = JsModelBuilder(log_path=self._settings.log,
split_log=self._settings.split_log,
prune_input_to_save_memory=self._prune)
self._js_result = builder.build_from(self.result)
if self._prune:
self._result = None
return self._js_result
|
apache-2.0
|
muccg/rdrf
|
rdrf/rdrf/views/context_views.py
|
1
|
15077
|
from django.urls import reverse_lazy
from django.forms import ModelForm
from django.views.generic.base import View
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.http import Http404
from django.contrib.contenttypes.models import ContentType
from rdrf.models.definition.models import Registry
from rdrf.models.definition.models import RDRFContext
from rdrf.models.definition.models import ContextFormGroup
from rdrf.helpers.utils import get_error_messages
from rdrf.helpers.utils import get_form_links
from rdrf.forms.navigation.locators import PatientLocator
from rdrf.forms.components import RDRFContextLauncherComponent
from registry.patients.models import Patient
from registry.groups.models import WorkingGroup
import logging
logger = logging.getLogger("registry_log")
class ContextForm(ModelForm):
class Meta:
model = RDRFContext
fields = ['display_name']
class ContextFormGroupHelperMixin(object):
def get_context_form_group(self, form_group_id):
if form_group_id is None:
return None
else:
return ContextFormGroup.objects.get(pk=form_group_id)
def get_context_name(self, registry_model, context_form_group):
if not registry_model.has_feature("contexts"):
raise Exception("Registry does not support contexts")
else:
if context_form_group is not None:
return context_form_group.name
else:
return registry_model.metadata.get("context_name", "Context")
def get_context_launcher(self, user, registry_model, patient_model, context_model=None):
context_launcher = RDRFContextLauncherComponent(user,
registry_model,
patient_model,
'',
context_model)
return context_launcher.html
def get_naming_info(self, form_group_id):
if form_group_id is not None:
context_form_group = ContextFormGroup.objects.get(id=form_group_id)
return context_form_group.naming_info
else:
return "Display Name will default to 'Modules' if left blank"
def get_default_name(self, patient_model, context_form_group_model):
if context_form_group_model is None:
return "Modules"
else:
return context_form_group_model.get_default_name(patient_model)
def allowed(self, user, registry_code, patient_id, context_id):
try:
is_normal_user = not user.is_superuser
registry_model = Registry.objects.get(code=registry_code)
if not registry_model.has_feature("contexts"):
return False
patient_model = Patient.objects.get(pk=patient_id)
patient_working_groups = set([wg for wg in patient_model.working_groups.all()])
context_model = RDRFContext.objects.get(pk=context_id)
if not user.is_superuser:
user_working_groups = set([wg for wg in user.working_groups.all()])
else:
user_working_groups = set(
[wg for wg in WorkingGroup.objects.filter(registry=registry_model)])
if is_normal_user and not user.in_registry(registry_model):
return False
if context_model.registry.code != registry_model.code:
return False
if not (patient_working_groups <= user_working_groups):
return False
return True
except Exception as ex:
logger.error("error in context allowed check: %s" % ex)
return False
def sanity_check(self, registry_model):
if not registry_model.has_feature("contexts"):
return HttpResponseRedirect("/")
def create_context_and_goto_form(self, registry_model, patient_model, context_form_group):
assert len(
context_form_group.form_models) == 1, "Direct link only possible if num forms in form group is 1"
patient_content_type = ContentType.objects.get_for_model(patient_model)
form_model = context_form_group.form_models[0]
context_model = RDRFContext()
context_model.registry = registry_model
context_model.name = "change me"
context_model.content_object = patient_model
context_model.content_type = patient_content_type
context_model.context_form_group = context_form_group
context_model.save()
form_link = reverse('registry_form', args=(registry_model.code,
form_model.id,
patient_model.pk,
context_model.id))
return HttpResponseRedirect(form_link)
class RDRFContextCreateView(View, ContextFormGroupHelperMixin):
model = RDRFContext
template_name = "rdrf_cdes/rdrf_context.html"
success_url = reverse_lazy('contextslisting')
@method_decorator(login_required)
def get(self, request, registry_code, patient_id, context_form_group_id=None):
registry_model = Registry.objects.get(code=registry_code)
self.sanity_check(registry_model)
patient_model = Patient.objects.get(pk=patient_id)
context_form_group = self.get_context_form_group(context_form_group_id)
naming_info = self.get_naming_info(context_form_group_id)
context_name = self.get_context_name(registry_model, context_form_group)
default_display_name = self.get_default_name(patient_model, context_form_group)
default_values = {"display_name": default_display_name}
if context_form_group and context_form_group.supports_direct_linking:
return self.create_context_and_goto_form(registry_model,
patient_model,
context_form_group)
context = {"location": "Add %s" % context_name,
"registry": registry_model.code,
"patient_id": patient_id,
"form_links": [],
"context_name": context_name,
'patient_link': PatientLocator(registry_model, patient_model).link,
"patient_name": patient_model.display_name,
"context_launcher": self.get_context_launcher(request.user,
registry_model,
patient_model),
"naming_info": naming_info,
"form": ContextForm(initial=default_values)}
return render(request, "rdrf_cdes/rdrf_context.html", context)
@method_decorator(login_required)
def post(self, request, registry_code, patient_id, context_form_group_id=None):
form = ContextForm(request.POST)
registry_model = Registry.objects.get(code=registry_code)
self.sanity_check(registry_model)
patient_model = Patient.objects.get(pk=patient_id)
context_form_group_model = self.get_context_form_group(context_form_group_id)
naming_info = self.get_naming_info(context_form_group_id)
context_name = self.get_context_name(registry_model, context_form_group_model)
if form.is_valid():
patient_model = Patient.objects.get(id=patient_id)
registry_model = Registry.objects.get(code=registry_code)
content_type = ContentType.objects.get_for_model(patient_model)
context_model = form.save(commit=False)
context_model.registry = registry_model
context_model.content_type = content_type
context_model.content_object = patient_model
if context_form_group_model:
context_model.context_form_group = context_form_group_model
context_model.save()
context_edit = reverse('context_edit', kwargs={"registry_code": registry_model.code,
"patient_id": patient_model.pk,
"context_id": context_model.pk})
return HttpResponseRedirect(context_edit)
else:
error_messages = get_error_messages([form])
context = {"location": "Add %s" % context_name,
"errors": True,
"error_messages": error_messages,
"registry": registry_model.code,
'patient_link': PatientLocator(registry_model, patient_model).link,
"patient_id": patient_id,
"form_links": [],
"naming_info": naming_info,
"context_launcher": self.get_context_launcher(request.user,
registry_model,
patient_model),
"patient_name": patient_model.display_name,
"form": ContextForm(request.POST)}
return render(request, "rdrf_cdes/rdrf_context.html", context)
class RDRFContextEditView(View, ContextFormGroupHelperMixin):
model = RDRFContext
template_name = "rdrf_cdes/rdrf_context.html"
success_url = reverse_lazy('contextslisting')
@method_decorator(login_required)
def get(self, request, registry_code, patient_id, context_id):
try:
rdrf_context_model = RDRFContext.objects.get(pk=context_id)
except RDRFContext.DoesNotExist:
raise Http404()
if not self.allowed(request.user, registry_code, patient_id, context_id):
return HttpResponseRedirect("/")
context_form = ContextForm(instance=rdrf_context_model)
patient_model = rdrf_context_model.content_object
registry_model = rdrf_context_model.registry
patient_name = patient_model.display_name
if rdrf_context_model.context_form_group:
context_form_group_model = self.get_context_form_group(
rdrf_context_model.context_form_group.pk)
naming_info = context_form_group_model.naming_info
else:
context_form_group_model = None
naming_info = self.get_naming_info(None)
context_name = self.get_context_name(registry_model, context_form_group_model)
form_links = get_form_links(request.user,
rdrf_context_model.object_id,
rdrf_context_model.registry,
rdrf_context_model,
)
context = {"location": "Edit %s" % context_name,
"context_id": context_id,
"patient_name": patient_name,
"form_links": form_links,
'patient_link': PatientLocator(registry_model, patient_model).link,
"context_launcher": self.get_context_launcher(request.user,
registry_model,
patient_model),
"context_name": context_name,
"registry": registry_model.code,
"naming_info": naming_info,
"patient_id": patient_id,
"form": context_form}
return render(request, "rdrf_cdes/rdrf_context.html", context)
@method_decorator(login_required)
def post(self, request, registry_code, patient_id, context_id):
registry_model = Registry.objects.get(code=registry_code)
context_model = RDRFContext.objects.get(pk=context_id)
context_form_group_model = context_model.context_form_group
if context_form_group_model:
naming_info = context_form_group_model.naming_info
else:
naming_info = self.get_naming_info(None)
context_name = context_model.registry.metadata.get("context_name", "Context")
patient_model = Patient.objects.get(id=patient_id)
form = ContextForm(request.POST, instance=context_model)
if form.is_valid():
content_type = ContentType.objects.get_for_model(patient_model)
context_model = form.save(commit=False)
context_model.registry = registry_model
context_model.content_type = content_type
context_model.content_object = patient_model
context_model.save()
form_links = get_form_links(request.user,
context_model.object_id,
context_model.registry,
context_model)
context = {"location": "Edit %s" % context_name,
"patient_name": patient_model.display_name,
'patient_link': PatientLocator(registry_model, patient_model).link,
"form_links": form_links,
"context_launcher": self.get_context_launcher(request.user,
registry_model,
patient_model),
"message": "%s saved successfully" % context_name,
"error_messages": [],
"registry": registry_model.code,
"naming_info": naming_info,
"patient_id": patient_id,
"form": ContextForm(instance=context_model),
}
else:
error_messages = get_error_messages([form])
context = {"location": "Add %s" % context_name,
"errors": True,
"error_messages": error_messages,
"registry": registry_model.code,
"patient_id": patient_id,
"form_links": [],
'patient_link': PatientLocator(registry_model, patient_model).link,
"context_launcher": self.get_context_launcher(request.user,
registry_model,
patient_model),
"error_messages": error_messages,
"naming_info": naming_info,
"patient_name": patient_model.display_name,
"form": ContextForm(request.POST)}
return render(request, "rdrf_cdes/rdrf_context.html", context)
|
agpl-3.0
|
dezynetechnologies/odoo
|
addons/document/report/__init__.py
|
444
|
1068
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import document_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
AnderEnder/ansible-modules-extras
|
database/vertica/vertica_user.py
|
148
|
14500
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: vertica_user
version_added: '2.0'
short_description: Adds or removes Vertica database users and assigns roles.
description:
- Adds or removes Vertica database user and, optionally, assigns roles.
- A user will not be removed until all the dependencies have been dropped.
- In such a situation, if the module tries to remove the user it
will fail and only remove roles granted to the user.
options:
name:
description:
- Name of the user to add or remove.
required: true
profile:
description:
- Sets the user's profile.
required: false
default: null
resource_pool:
description:
- Sets the user's resource pool.
required: false
default: null
password:
description:
- The user's password encrypted by the MD5 algorithm.
- The password must be generated with the format C("md5" + md5[password + username]),
resulting in a total of 35 characters. An easy way to do this is by querying
the Vertica database with select 'md5'||md5('<user_password><user_name>').
required: false
default: null
expired:
description:
- Sets the user's password expiration.
required: false
default: null
ldap:
description:
- Set to true if users are authenticated via LDAP.
- The user will be created with password expired and set to I($ldap$).
required: false
default: null
roles:
description:
- Comma separated list of roles to assign to the user.
aliases: ['role']
required: false
default: null
state:
description:
- Whether to create C(present), drop C(absent) or lock C(locked) a user.
required: false
choices: ['present', 'absent', 'locked']
default: present
db:
description:
- Name of the Vertica database.
required: false
default: null
cluster:
description:
- Name of the Vertica cluster.
required: false
default: localhost
port:
description:
- Vertica cluster port to connect to.
required: false
default: 5433
login_user:
description:
- The username used to authenticate with.
required: false
default: dbadmin
login_password:
description:
- The password used to authenticate with.
required: false
default: null
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
"""
EXAMPLES = """
- name: creating a new vertica user with password
vertica_user: name=user_name password=md5<encrypted_password> db=db_name state=present
- name: creating a new vertica user authenticated via ldap with roles assigned
vertica_user:
name=user_name
ldap=true
db=db_name
roles=schema_name_ro
state=present
"""
try:
import pyodbc
except ImportError:
pyodbc_found = False
else:
pyodbc_found = True
class NotSupportedError(Exception):
pass
class CannotDropError(Exception):
pass
# module specific functions
def get_user_facts(cursor, user=''):
facts = {}
cursor.execute("""
select u.user_name, u.is_locked, u.lock_time,
p.password, p.acctexpired as is_expired,
u.profile_name, u.resource_pool,
u.all_roles, u.default_roles
from users u join password_auditor p on p.user_id = u.user_id
where not u.is_super_user
and (? = '' or u.user_name ilike ?)
""", user, user)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
user_key = row.user_name.lower()
facts[user_key] = {
'name': row.user_name,
'locked': str(row.is_locked),
'password': row.password,
'expired': str(row.is_expired),
'profile': row.profile_name,
'resource_pool': row.resource_pool,
'roles': [],
'default_roles': []}
if row.is_locked:
facts[user_key]['locked_time'] = str(row.lock_time)
if row.all_roles:
facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
if row.default_roles:
facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
return facts
def update_roles(user_facts, cursor, user,
existing_all, existing_default, required):
del_roles = list(set(existing_all) - set(required))
if del_roles:
cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user))
new_roles = list(set(required) - set(existing_all))
if new_roles:
cursor.execute("grant {0} to {1}".format(','.join(new_roles), user))
if required:
cursor.execute("alter user {0} default role {1}".format(user, ','.join(required)))
def check(user_facts, user, profile, resource_pool,
locked, password, expired, ldap, roles):
user_key = user.lower()
if user_key not in user_facts:
return False
if profile and profile != user_facts[user_key]['profile']:
return False
if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
return False
if locked != (user_facts[user_key]['locked'] == 'True'):
return False
if password and password != user_facts[user_key]['password']:
return False
if expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or \
ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True'):
return False
if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \
cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0):
return False
return True
def present(user_facts, cursor, user, profile, resource_pool,
locked, password, expired, ldap, roles):
user_key = user.lower()
if user_key not in user_facts:
query_fragments = ["create user {0}".format(user)]
if locked:
query_fragments.append("account lock")
if password or ldap:
if password:
query_fragments.append("identified by '{0}'".format(password))
else:
query_fragments.append("identified by '$ldap$'")
if expired or ldap:
query_fragments.append("password expire")
if profile:
query_fragments.append("profile {0}".format(profile))
if resource_pool:
query_fragments.append("resource pool {0}".format(resource_pool))
cursor.execute(' '.join(query_fragments))
if resource_pool and resource_pool != 'general':
cursor.execute("grant usage on resource pool {0} to {1}".format(
resource_pool, user))
update_roles(user_facts, cursor, user, [], [], roles)
user_facts.update(get_user_facts(cursor, user))
return True
else:
changed = False
query_fragments = ["alter user {0}".format(user)]
if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'):
if locked:
state = 'lock'
else:
state = 'unlock'
query_fragments.append("account {0}".format(state))
changed = True
if password and password != user_facts[user_key]['password']:
query_fragments.append("identified by '{0}'".format(password))
changed = True
if ldap:
if ldap != (user_facts[user_key]['expired'] == 'True'):
query_fragments.append("password expire")
changed = True
elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'):
if expired:
query_fragments.append("password expire")
changed = True
else:
raise NotSupportedError("Unexpiring user password is not supported.")
if profile and profile != user_facts[user_key]['profile']:
query_fragments.append("profile {0}".format(profile))
changed = True
if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
query_fragments.append("resource pool {0}".format(resource_pool))
if user_facts[user_key]['resource_pool'] != 'general':
cursor.execute("revoke usage on resource pool {0} from {1}".format(
user_facts[user_key]['resource_pool'], user))
if resource_pool != 'general':
cursor.execute("grant usage on resource pool {0} to {1}".format(
resource_pool, user))
changed = True
if changed:
cursor.execute(' '.join(query_fragments))
if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \
cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0):
update_roles(user_facts, cursor, user,
user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles)
changed = True
if changed:
user_facts.update(get_user_facts(cursor, user))
return changed
def absent(user_facts, cursor, user, roles):
user_key = user.lower()
if user_key in user_facts:
update_roles(user_facts, cursor, user,
user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], [])
try:
cursor.execute("drop user {0}".format(user_facts[user_key]['name']))
except pyodbc.Error:
raise CannotDropError("Dropping user failed due to dependencies.")
del user_facts[user_key]
return True
else:
return False
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
user=dict(required=True, aliases=['name']),
profile=dict(default=None),
resource_pool=dict(default=None),
password=dict(default=None),
expired=dict(type='bool', default=None),
ldap=dict(type='bool', default=None),
roles=dict(default=None, aliases=['role']),
state=dict(default='present', choices=['absent', 'present', 'locked']),
db=dict(default=None),
cluster=dict(default='localhost'),
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None),
), supports_check_mode = True)
if not pyodbc_found:
module.fail_json(msg="The python pyodbc module is required.")
user = module.params['user']
profile = module.params['profile']
if profile:
profile = profile.lower()
resource_pool = module.params['resource_pool']
if resource_pool:
resource_pool = resource_pool.lower()
password = module.params['password']
expired = module.params['expired']
ldap = module.params['ldap']
roles = []
if module.params['roles']:
roles = module.params['roles'].split(',')
roles = filter(None, roles)
state = module.params['state']
if state == 'locked':
locked = True
else:
locked = False
db = ''
if module.params['db']:
db = module.params['db']
changed = False
try:
dsn = (
"Driver=Vertica;"
"Server={0};"
"Port={1};"
"Database={2};"
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
).format(module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception, e:
module.fail_json(msg="Unable to connect to database: {0}.".format(e))
try:
user_facts = get_user_facts(cursor)
if module.check_mode:
changed = not check(user_facts, user, profile, resource_pool,
locked, password, expired, ldap, roles)
elif state == 'absent':
try:
changed = absent(user_facts, cursor, user, roles)
except pyodbc.Error, e:
module.fail_json(msg=str(e))
elif state in ['present', 'locked']:
try:
changed = present(user_facts, cursor, user, profile, resource_pool,
locked, password, expired, ldap, roles)
except pyodbc.Error, e:
module.fail_json(msg=str(e))
except NotSupportedError, e:
module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts})
except CannotDropError, e:
module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts})
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception, e:
module.fail_json(msg=e)
module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts})
# import ansible utilities
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
miguelcarrasco/anothercryptosolver
|
anothercryptosolver/genwordlist.py
|
1
|
1216
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from sqlalchemy import create_engine
from wordlistdb_management import WordListDBCreator
def print_instructions(executable):
print executable + ':'
print 'Reads the standard input and generates a sqlite database that can be used with susdecipher.py'
print 'if the database already exist it adds the words passed in the standard input\n'
print 'usage: ' + executable + ' wordlist_database_name'
print 'example: '
print executable + " wordlist.db < somewords.txt"
def create_db_from_stdin(wordlistdb):
engine = create_engine('sqlite:///' + wordlistdb,
echo=False, encoding='utf-8', convert_unicode=True)
dbcreator = WordListDBCreator(engine)
dbcreator.max_words_added = 50
while True:
try:
line = sys.stdin.readline()
if not line:
break
dbcreator.add_word(line)
except KeyboardInterrupt:
break
dbcreator.force_commit()
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) != 1:
print_instructions(executable=sys.argv[0])
exit()
create_db_from_stdin(wordlistdb=args[0])
|
mit
|
3L3N4/theHarvester
|
lib/graphs.py
|
16
|
32822
|
"""
+-------------------------------------------------------------------+
| H T M L - G R A P H S (v4.8) |
| |
| Copyright Gerd Tentler www.gerd-tentler.de/tools |
| Created: Sep. 17, 2002 Last modified: Feb. 13, 2010 |
+-------------------------------------------------------------------+
| This program may be used and hosted free of charge by anyone for |
| personal purpose as long as this copyright notice remains intact. |
| |
| Obtain permission before selling the code for this program or |
| hosting this software on a commercial website or redistributing |
| this software over the Internet or in any other medium. In all |
| cases copyright must remain intact. |
+-------------------------------------------------------------------+
=====================================================================================================
Example:
import graphs
graph = graphs.BarGraph('hBar')
graph.values = [234, 125, 289, 147, 190]
print graph.create()
Returns HTML code
=====================================================================================================
"""
import re
import math
class BarGraph:
"""creates horizontal and vertical bar graphs, progress bars and faders"""
def __init__(self, type=''):
#-------------------------------------------------------------------------
# Configuration
#-------------------------------------------------------------------------
# graph type: "hBar", "vBar", "pBar", or "fader"
self.type = type and type or 'hBar'
self.values = [] # graph data: list
# graph background color: string
self.graphBGColor = ''
# graph border: string (CSS-spec: "size style color"; doesn't work with
# NN4)
self.graphBorder = ''
# graph padding: integer (pixels)
self.graphPadding = 0
# titles: array or string with comma-separated values
self.titles = []
self.titleColor = 'black' # title font color: string
# title background color: string
self.titleBGColor = '#C0E0FF'
# title border: string (CSS specification)
self.titleBorder = '2px groove white'
# title font family: string (CSS specification)
self.titleFont = 'Arial, Helvetica'
# title font size: integer (pixels)
self.titleSize = 12
# title text align: "left", "center", or "right"
self.titleAlign = 'center'
# title padding: integer (pixels)
self.titlePadding = 2
# label names: list or string with comma-separated values
self.labels = []
self.labelColor = 'black' # label font color: string
# label background color: string
self.labelBGColor = '#C0E0FF'
# label border: string (CSS-spec: "size style color"; doesn't work with
# NN4)
self.labelBorder = '2px groove white'
# label font family: string (CSS-spec)
self.labelFont = 'Arial, Helvetica'
# label font size: integer (pixels)
self.labelSize = 12
# label text align: "left", "center", or "right"
self.labelAlign = 'center'
# additional space between labels: integer (pixels)
self.labelSpace = 0
self.barWidth = 20 # bar width: integer (pixels)
# bar length ratio: float (from 0.1 to 2.9)
self.barLength = 1.0
# bar colors OR bar images: list or string with comma-separated values
self.barColors = []
# bar background color: string
self.barBGColor = ''
# bar border: string (CSS-spec: "size style color"; doesn't work with
# NN4)
self.barBorder = '2px outset white'
# bar level colors: ascending list (bLevel, bColor[,...]); draw bars >=
# bLevel with bColor
self.barLevelColors = []
# show values: 0 = % only, 1 = abs. and %, 2 = abs. only, 3 = none
self.showValues = 0
# base value: integer or float (only hBar and vBar)
self.baseValue = 0
# abs. values font color: string
self.absValuesColor = 'black'
# abs. values background color: string
self.absValuesBGColor = '#C0E0FF'
# abs. values border: string (CSS-spec: "size style color"; doesn't
# work with NN4)
self.absValuesBorder = '2px groove white'
# abs. values font family: string (CSS-spec)
self.absValuesFont = 'Arial, Helvetica'
# abs. values font size: integer (pixels)
self.absValuesSize = 12
# abs. values prefix: string (e.g. "$")
self.absValuesPrefix = ''
# abs. values suffix: string (e.g. " kg")
self.absValuesSuffix = ''
# perc. values font color: string
self.percValuesColor = 'black'
# perc. values font family: string (CSS-spec)
self.percValuesFont = 'Arial, Helvetica'
# perc. values font size: integer (pixels)
self.percValuesSize = 12
# perc. values number of decimals: integer
self.percValuesDecimals = 0
self.charts = 1 # number of charts: integer
# hBar/vBar only:
# legend items: list or string with comma-separated values
self.legend = []
self.legendColor = 'black' # legend font color: string
# legend background color: string
self.legendBGColor = '#F0F0F0'
# legend border: string (CSS-spec: "size style color"; doesn't work
# with NN4)
self.legendBorder = '2px groove white'
# legend font family: string (CSS-spec)
self.legendFont = 'Arial, Helvetica'
# legend font size: integer (pixels)
self.legendSize = 12
# legend vertical align: "top", "center", "bottom"
self.legendAlign = 'top'
# debug mode: 0 = off, 1 = on; just views some extra information
self.debug = 0
#-------------------------------------------------------------------------
# default bar colors; only used if barColors isn't set
__colors = (
'#0000FF',
'#FF0000',
'#00E000',
'#A0A0FF',
'#FFA0A0',
'#00A000')
# error messages
__err_type = 'ERROR: Type must be "hBar", "vBar", "pBar", or "fader"'
# CSS names (don't change)
__cssGRAPH = ''
__cssBAR = ''
__cssBARBG = ''
__cssTITLE = ''
__cssLABEL = ''
__cssLABELBG = ''
__cssLEGEND = ''
__cssLEGENDBG = ''
__cssABSVALUES = ''
__cssPERCVALUES = ''
# search pattern for images
__img_pattern = re.compile(r'\.(jpg|jpeg|jpe|gif|png)')
def set_styles(self):
"""set graph styles"""
if self.graphBGColor:
self.__cssGRAPH += 'background-color:' + self.graphBGColor + ';'
if self.graphBorder:
self.__cssGRAPH += 'border:' + self.graphBorder + ';'
if self.barBorder:
self.__cssBAR += 'border:' + self.barBorder + ';'
if self.barBGColor:
self.__cssBARBG += 'background-color:' + self.barBGColor + ';'
if self.titleColor:
self.__cssTITLE += 'color:' + self.titleColor + ';'
if self.titleBGColor:
self.__cssTITLE += 'background-color:' + self.titleBGColor + ';'
if self.titleBorder:
self.__cssTITLE += 'border:' + self.titleBorder + ';'
if self.titleFont:
self.__cssTITLE += 'font-family:' + self.titleFont + ';'
if self.titleAlign:
self.__cssTITLE += 'text-align:' + self.titleAlign + ';'
if self.titleSize:
self.__cssTITLE += 'font-size:' + str(self.titleSize) + 'px;'
if self.titleBGColor:
self.__cssTITLE += 'background-color:' + self.titleBGColor + ';'
if self.titlePadding:
self.__cssTITLE += 'padding:' + str(self.titlePadding) + 'px;'
if self.labelColor:
self.__cssLABEL += 'color:' + self.labelColor + ';'
if self.labelBGColor:
self.__cssLABEL += 'background-color:' + self.labelBGColor + ';'
if self.labelBorder:
self.__cssLABEL += 'border:' + self.labelBorder + ';'
if self.labelFont:
self.__cssLABEL += 'font-family:' + self.labelFont + ';'
if self.labelSize:
self.__cssLABEL += 'font-size:' + str(self.labelSize) + 'px;'
if self.labelAlign:
self.__cssLABEL += 'text-align:' + self.labelAlign + ';'
if self.labelBGColor:
self.__cssLABELBG += 'background-color:' + self.labelBGColor + ';'
if self.legendColor:
self.__cssLEGEND += 'color:' + self.legendColor + ';'
if self.legendFont:
self.__cssLEGEND += 'font-family:' + self.legendFont + ';'
if self.legendSize:
self.__cssLEGEND += 'font-size:' + str(self.legendSize) + 'px;'
if self.legendBGColor:
self.__cssLEGENDBG += 'background-color:' + \
self.legendBGColor + ';'
if self.legendBorder:
self.__cssLEGENDBG += 'border:' + self.legendBorder + ';'
if self.absValuesColor:
self.__cssABSVALUES += 'color:' + self.absValuesColor + ';'
if self.absValuesBGColor:
self.__cssABSVALUES += 'background-color:' + \
self.absValuesBGColor + ';'
if self.absValuesBorder:
self.__cssABSVALUES += 'border:' + self.absValuesBorder + ';'
if self.absValuesFont:
self.__cssABSVALUES += 'font-family:' + self.absValuesFont + ';'
if self.absValuesSize:
self.__cssABSVALUES += 'font-size:' + \
str(self.absValuesSize) + 'px;'
if self.percValuesColor:
self.__cssPERCVALUES += 'color:' + self.percValuesColor + ';'
if self.percValuesFont:
self.__cssPERCVALUES += 'font-family:' + self.percValuesFont + ';'
if self.percValuesSize:
self.__cssPERCVALUES += 'font-size:' + \
str(self.percValuesSize) + 'px;'
def level_color(self, value, color):
"""return bar color for each level"""
if self.barLevelColors:
for i in range(0, len(self.barLevelColors), 2):
try:
if (self.barLevelColors[i] > 0 and value >= self.barLevelColors[i]) or \
(self.barLevelColors[i] < 0 and value <= self.barLevelColors[i]):
color = self.barLevelColors[i + 1]
except IndexError:
pass
return color
def build_bar(self, value, width, height, color):
"""return a single bar"""
title = self.absValuesPrefix + str(value) + self.absValuesSuffix
bg = self.__img_pattern.search(color) and 'background' or 'bgcolor'
bar = '<table border=0 cellspacing=0 cellpadding=0><tr>'
bar += '<td style="' + self.__cssBAR + '" ' + bg + '="' + color + '"'
bar += (value != '') and ' title="' + title + '">' or '>'
bar += '<div style="width:' + \
str(width) + 'px; height:' + str(height) + 'px;'
bar += ' line-height:1px; font-size:1px;"></div>'
bar += '</td></tr></table>'
return bar
def build_fader(self, value, width, height, x, color):
"""return a single fader"""
fader = '<table border=0 cellspacing=0 cellpadding=0><tr>'
x -= int(round(width / 2))
if x > 0:
fader += '<td width=' + str(x) + '></td>'
fader += '<td>' + self.build_bar(value, width, height, color) + '</td>'
fader += '</tr></table>'
return fader
def build_value(self, val, max_dec, sum=0, align=''):
"""return a single bar/fader value"""
val = _number_format(val, max_dec)
if sum:
sum = _number_format(sum, max_dec)
value = '<td style="' + self.__cssABSVALUES + '"'
if align:
value += ' align=' + align
value += ' nowrap>'
value += ' ' + self.absValuesPrefix + \
str(val) + self.absValuesSuffix
if sum:
value += ' / ' + self.absValuesPrefix + \
str(sum) + self.absValuesSuffix
value += ' </td>'
return value
def build_legend(self, barColors):
"""return the legend"""
if hasattr(self.legend, 'split'):
self.legend = self.legend.split(',')
legend = '<table border=0 cellspacing=0 cellpadding=0><tr>'
legend += '<td style="' + self.__cssLEGENDBG + '">'
legend += '<table border=0 cellspacing=4 cellpadding=0>'
i = 0
for color in barColors:
if len(self.legend) >= i + 1:
text = hasattr(
self.legend[i],
'strip') and self.legend[i].strip() or str(self.legend[i])
else:
text = ''
legend += '<tr>'
legend += '<td>' + \
self.build_bar(
'',
self.barWidth,
self.barWidth,
color) + '</td>'
legend += '<td style="' + self.__cssLEGEND + \
'" nowrap>' + text + '</td>'
legend += '</tr>'
i += 1
legend += '</table></td></tr></table>'
return legend
def build_hTitle(self, titleLabel, titleValue, titleBar):
"""return horizontal titles"""
title = '<tr>'
title += '<td style="' + self.__cssTITLE + '">' + titleLabel + '</td>'
if titleValue != '':
title += '<td style="' + self.__cssTITLE + \
'">' + titleValue + '</td>'
title += '<td style="' + self.__cssTITLE + '">' + titleBar + '</td>'
title += '</tr>'
return title
def create_hBar(self, value, percent, mPerc, mPerc_neg,
max_neg, mul, valSpace, bColor, border, spacer, spacer_neg):
"""return a single horizontal bar with label and values (abs./perc.)"""
bar = '<table border=0 cellspacing=0 cellpadding=0 height=100%><tr>'
if percent < 0:
percent *= -1
bar += '<td style="' + self.__cssLABELBG + '" height=' + \
str(self.barWidth) + ' width=' + \
str(int(round((mPerc_neg - percent) * mul + valSpace))) + \
' align=right nowrap>'
if self.showValues < 2:
bar += '<span style="' + self.__cssPERCVALUES + '">' + \
str(_number_format(percent, self.percValuesDecimals)) + \
'%</span>'
bar += ' </td><td style="' + self.__cssLABELBG + '">'
bar += self.build_bar(value, int(round(percent * mul)),
self.barWidth, bColor)
bar += '</td><td width=' + str(spacer) + '></td>'
else:
if max_neg:
bar += '<td style="' + self.__cssLABELBG + \
'" width=' + str(spacer_neg) + '>'
bar += '<table border=0 cellspacing=0 cellpadding=0><tr><td></td></tr></table></td>'
if percent:
bar += '<td>'
bar += self.build_bar(value, int(round(percent * mul)),
self.barWidth, bColor)
bar += '</td>'
else:
bar += '<td width=1 height=' + \
str(self.barWidth + (border * 2)) + '></td>'
bar += '<td style="' + self.__cssPERCVALUES + '" width=' + \
str(int(round((mPerc - percent) * mul + valSpace))) + \
' align=left nowrap>'
if self.showValues < 2:
bar += ' ' + \
str(_number_format(percent, self.percValuesDecimals)) + '%'
bar += ' </td>'
bar += '</tr></table>'
return bar
def create_vBar(self, value, percent, mPerc, mPerc_neg,
max_neg, mul, valSpace, bColor, border, spacer, spacer_neg):
"""return a single vertical bar with label and values (abs./perc.)"""
bar = '<table border=0 cellspacing=0 cellpadding=0 width=100%><tr align=center>'
if percent < 0:
percent *= -1
bar += '<td height=' + \
str(spacer) + '></td></tr><tr align=center valign=top><td style="' + \
self.__cssLABELBG + '">'
bar += self.build_bar(value, self.barWidth,
int(round(percent * mul)), bColor)
bar += '</td></tr><tr align=center valign=top>'
bar += '<td style="' + self.__cssLABELBG + '" height=' + \
str(int(round((mPerc_neg - percent) * mul + valSpace))) + \
' nowrap>'
bar += (self.showValues < 2) and '<span style="' + self.__cssPERCVALUES + '">' + \
str(_number_format(percent, self.percValuesDecimals)) + \
'%</span>' or ' '
bar += '</td>'
else:
bar += '<td style="' + self.__cssPERCVALUES + '" valign=bottom height=' + \
str(int(round((mPerc - percent) * mul + valSpace))) + \
' nowrap>'
if self.showValues < 2:
bar += str(_number_format(percent, self.percValuesDecimals)) + \
'%'
bar += '</td>'
if percent:
bar += '</tr><tr align=center valign=bottom><td>'
bar += self.build_bar(value, self.barWidth,
int(round(percent * mul)), bColor)
bar += '</td>'
else:
bar += '</tr><tr><td width=' + \
str(self.barWidth + (border * 2)) + ' height=1></td>'
if max_neg:
bar += '</tr><tr><td style="' + self.__cssLABELBG + \
'" height=' + str(spacer_neg) + '>'
bar += '<table border=0 cellspacing=0 cellpadding=0><tr><td></td></tr></table></td>'
bar += '</tr></table>'
return bar
def create(self):
"""create a complete bar graph (horizontal, vertical, progress, or fader)"""
self.type = self.type.lower()
d = self.values
t = hasattr(
self.titles,
'split') and self.titles.split(
',') or self.titles
r = hasattr(
self.labels,
'split') and self.labels.split(
',') or self.labels
drc = hasattr(
self.barColors,
'split') and self.barColors.split(
',') or self.barColors
val = []
bc = []
if self.barLength < 0.1:
self.barLength = 0.1
elif self.barLength > 2.9:
self.barLength = 2.9
labels = (len(d) > len(r)) and len(d) or len(r)
if self.type == 'pbar' or self.type == 'fader':
if not self.barBGColor:
self.barBGColor = self.labelBGColor
if self.labelBGColor == self.barBGColor and len(t) == 0:
self.labelBGColor = ''
self.labelBorder = ''
self.set_styles()
graph = '<table border=0 cellspacing=0 cellpadding=' + \
str(self.graphPadding) + '><tr>'
graph += '<td' + \
(self.__cssGRAPH and ' style="' +
self.__cssGRAPH + '"' or '') + '>'
if self.legend and self.type != 'pbar' and self.type != 'fader':
graph += '<table border=0 cellspacing=0 cellpadding=0><tr><td>'
if self.charts > 1:
divide = math.ceil(labels / self.charts)
graph += '<table border=0 cellspacing=0 cellpadding=6><tr valign=top><td>'
else:
divide = 0
sum = 0
max = 0
max_neg = 0
max_dec = 0
ccnt = 0
lcnt = 0
chart = 0
for i in range(labels):
if divide and i and not i % divide:
lcnt = 0
chart += 1
try:
drv = len(d[i]) and [e for e in d[i]] or [d[i]]
except:
drv = [d[i]]
j = 0
dec = 0
if len(val) <= chart:
val.append([])
for v in drv:
s = str(v)
if s.find('.') != -1:
dec = len(s[s.find('.') + 1:])
if dec > max_dec:
max_dec = dec
if len(val[chart]) <= lcnt:
val[chart].append([])
val[chart][lcnt].append(v)
if v != 0:
v -= self.baseValue
if v > max:
max = v
elif v < max_neg:
max_neg = v
if v < 0:
v *= -1
sum += v
if len(bc) <= j:
if ccnt >= len(self.__colors):
ccnt = 0
if len(drc) <= j or len(drc[j]) < 3:
bc.append(self.__colors[ccnt])
ccnt += 1
else:
bc.append(drc[j].strip())
j += 1
lcnt += 1
border = int(self.barBorder[0])
mPerc = sum and int(round(max * 100.0 / sum)) or 0
if self.type == 'pbar' or self.type == 'fader':
mul = 2
else:
mul = mPerc and 100.0 / mPerc or 1
mul *= self.barLength
if self.showValues < 2:
if self.type == 'hbar':
valSpace = (self.percValuesDecimals * (self.percValuesSize / 1.6)) + \
(self.percValuesSize * 3.2)
else:
valSpace = self.percValuesSize * 1.2
else:
valSpace = self.percValuesSize
spacer = maxSize = int(round(mPerc * mul + valSpace + border * 2))
if max_neg:
mPerc_neg = sum and int(round(-max_neg * 100.0 / sum)) or 0
if mPerc_neg > mPerc and self.type != 'pbar' and self.type != 'fader':
mul = 100.0 / mPerc_neg * self.barLength
spacer_neg = int(round(mPerc_neg * mul + valSpace + border * 2))
maxSize += spacer_neg
else:
mPerc_neg = spacer_neg = 0
titleLabel = ''
titleValue = ''
titleBar = ''
if len(t) > 0:
titleLabel = (t[0] == '') and ' ' or t[0]
if self.showValues == 1 or self.showValues == 2:
titleValue = (t[1] == '') and ' ' or t[1]
titleBar = (t[2] == '') and ' ' or t[2]
else:
titleBar = (t[1] == '') and ' ' or t[1]
chart = 0
lcnt = 0
for v in val:
graph += '<table border=0 cellspacing=2 cellpadding=0>'
if self.type == 'hbar':
if len(t) > 0:
graph += self.build_hTitle(titleLabel,
titleValue, titleBar)
for i in range(len(v)):
label = (
lcnt < len(r)) and r[lcnt].strip() or str(lcnt + 1)
rowspan = len(v[i])
graph += '<tr><td style="' + self.__cssLABEL + '"' + \
((rowspan > 1) and ' rowspan=' + str(rowspan) or '') + \
'>'
graph += ' ' + label + ' </td>'
for j in range(len(v[i])):
value = v[i][j] and v[i][j] - self.baseValue or 0
percent = sum and value * 100.0 / sum or 0
value = _number_format(v[i][j], max_dec)
bColor = self.level_color(v[i][j], bc[j])
if self.showValues == 1 or self.showValues == 2:
graph += self.build_value(v[i]
[j], max_dec, 0, 'right')
graph += '<td' + (self.__cssBARBG and ' style="' + self.__cssBARBG + '"' or '') + \
' height=100% width=' + \
str(maxSize) + '>'
graph += self.create_hBar(
value, percent, mPerc, mPerc_neg,
max_neg, mul, valSpace, bColor, border, spacer, spacer_neg)
graph += '</td></tr>'
if j < len(v[i]) - 1:
graph += '<tr>'
if self.labelSpace and i < len(v) - 1:
graph += '<tr><td colspan=3 height=' + \
str(self.labelSpace) + '></td></tr>'
lcnt += 1
elif self.type == 'vbar':
graph += '<tr align=center valign=bottom>'
if titleBar != '':
titleBar = titleBar.replace('-', '-<br>')
graph += '<td style="' + self.__cssTITLE + \
'" valign=middle>' + titleBar + '</td>'
for i in range(len(v)):
for j in range(len(v[i])):
value = v[i][j] and v[i][j] - self.baseValue or 0
percent = sum and value * 100.0 / sum or 0
value = _number_format(v[i][j], max_dec)
bColor = self.level_color(v[i][j], bc[j])
graph += '<td' + \
(self.__cssBARBG and ' style="' +
self.__cssBARBG + '"' or '') + '>'
graph += self.create_vBar(
value, percent, mPerc, mPerc_neg,
max_neg, mul, valSpace, bColor, border, spacer, spacer_neg)
graph += '</td>'
if self.labelSpace:
graph += '<td width=' + str(self.labelSpace) + '></td>'
if self.showValues == 1 or self.showValues == 2:
graph += '</tr><tr align=center>'
if titleValue != '':
graph += '<td style="' + self.__cssTITLE + \
'">' + titleValue + '</td>'
for i in range(len(v)):
for j in range(len(v[i])):
graph += self.build_value(v[i][j], max_dec)
if self.labelSpace:
graph += '<td width=' + \
str(self.labelSpace) + '></td>'
graph += '</tr><tr>'
if titleLabel != '':
graph += '<td style="' + self.__cssTITLE + \
'">' + titleLabel + '</td>'
for i in range(len(v)):
label = (
lcnt < len(r)) and r[lcnt].strip() or str(lcnt + 1)
colspan = len(v[i])
graph += '<td style="' + self.__cssLABEL + '"' + \
((colspan > 1) and ' colspan=' + str(colspan) or '') + \
'>'
graph += ' ' + label + ' </td>'
if self.labelSpace:
graph += '<td width=' + str(self.labelSpace) + '></td>'
lcnt += 1
graph += '</tr>'
elif self.type == 'pbar' or self.type == 'fader':
if len(t) > 0:
graph += self.build_hTitle(titleLabel,
titleValue, titleBar)
for i in range(len(v)):
try:
m = (len(v[i]) > 1) and True or False
except:
m = False
if m or not i:
label = (
lcnt < len(r)) and r[lcnt].strip() or str(i + 1)
graph += '<tr>'
if len(r):
graph += '<td style="' + self.__cssLABEL + '">'
graph += ' ' + label + ' </td>'
try:
sum = v[i][1] and v[i][1] or v[-1][0]
except:
sum = v[-1][0]
percent = sum and v[i][0] * 100.0 / sum or 0
value = _number_format(v[i][0], max_dec)
if self.showValues == 1 or self.showValues == 2:
graph += self.build_value(v[i]
[0], max_dec, sum, 'right')
graph += '<td' + \
(self.__cssBARBG and ' style="' +
self.__cssBARBG + '"' or '') + '>'
self.barColors = (
len(drc) >= i + 1) and drc[i].strip() or self.__colors[0]
bColor = self.level_color(v[i][0], self.barColors)
graph += '<table border=0 cellspacing=0 cellpadding=0><tr><td>'
if self.type == 'fader':
graph += self.build_fader(
value, int(round(self.barWidth / 2)),
self.barWidth, int(round(percent * mul)), bColor)
else:
graph += self.build_bar(value,
int(round(percent * mul)), self.barWidth, bColor)
graph += '</td><td width=' + \
str(int(round((100 - percent) * mul))) + '></td>'
graph += '</tr></table></td>'
if self.showValues < 2:
graph += '<td style="' + self.__cssPERCVALUES + '" nowrap> ' + \
str(_number_format(percent, self.percValuesDecimals)) + \
'%</td>'
graph += '</tr>'
if self.labelSpace and i < len(v) - 1:
graph += '<td colspan=3 height=' + \
str(self.labelSpace) + '></td>'
lcnt += 1
else:
graph += '<tr><td>' + self.__err_type + '</td></tr>'
graph += '</table>'
if chart < self.charts - 1 and len(val[chart + 1]):
graph += '</td>'
if self.type == 'vbar':
graph += '</tr><tr valign=top>'
graph += '<td>'
chart += 1
if self.charts > 1:
graph += '</td></tr></table>'
if self.legend and self.type != 'pbar' and self.type != 'fader':
graph += '</td><td width=10> </td><td' + \
(self.legendAlign and ' valign=' + self.legendAlign or '') + \
'>'
graph += self.build_legend(bc)
graph += '</td></tr></table>'
if self.debug:
graph += "<br>sum=%s max=%s max_neg=%s max_dec=%s " % (sum,
max, max_neg, max_dec)
graph += "mPerc=%s mPerc_neg=%s mul=%s valSpace=%s" % (mPerc,
mPerc_neg, mul, valSpace)
graph += '</td></tr></table>'
return graph
def _number_format(val, dec):
"""return float with dec decimals; if dec is 0, return integer"""
return dec and ('%.' + str(dec) + 'f') % val or int(round(val))
if __name__ == '__main__':
print __doc__
|
gpl-2.0
|
shajrawi/swift
|
utils/swift_build_support/swift_build_support/SwiftBuildSupport.py
|
34
|
7469
|
# utils/SwiftBuildSupport.py - Utilities for Swift build scripts -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import print_function
try:
# Python 2
import ConfigParser
except ImportError:
# Python 3
import configparser as ConfigParser
import os
from . import diagnostics
HOME = os.environ.get("HOME", "/")
def _get_default_source_root():
result = ""
# Are we in a Swift checkout? Start from this file and check its parent
# directories.
#
# $SWIFT_SOURCE_ROOT/swift/utils/swift_build_support/swift_build_support/SwiftBuildSupport.py
utils_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
(swift_path, parent_dirname) = os.path.split(utils_path)
if parent_dirname != "utils":
return result
if not os.path.exists(os.path.join(swift_path, 'CMakeLists.txt')):
return result
result = os.path.dirname(swift_path)
# Are we in an LLVM checkout? Start from the Swift checkout and check /its/
# parent directories.
#
# $SWIFT_SOURCE_ROOT/llvm/tools/swift/utils/swift_build_support/swift_build_support/SwiftBuildSupport.py
(llvm_path, parent_dirname) = os.path.split(result)
if parent_dirname != "tools":
return result
if not os.path.exists(os.path.join(llvm_path, 'CMakeLists.txt')):
return result
result = os.path.dirname(llvm_path)
return result
# Set SWIFT_SOURCE_ROOT in your environment to control where the sources
# are found.
SWIFT_SOURCE_ROOT = os.environ.get(
"SWIFT_SOURCE_ROOT", _get_default_source_root())
# Set SWIFT_BUILD_ROOT to a directory that will contain a subdirectory
# for each build configuration
SWIFT_BUILD_ROOT = os.environ.get(
"SWIFT_BUILD_ROOT", os.path.join(SWIFT_SOURCE_ROOT, "build"))
def _get_default_swift_repo_name():
result = ""
# Are we in a Swift checkout? Start from this file and check its parent
# directories.
#
# $SWIFT_SOURCE_ROOT/$SWIFT_REPO_NAME/utils/swift_build_support/swift_build_support/SwiftBuildSupport.py
utils_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
(swift_path, parent_dirname) = os.path.split(utils_path)
if parent_dirname != "utils":
return result
if not os.path.exists(os.path.join(swift_path, 'CMakeLists.txt')):
return result
(_, swift_repo_name) = os.path.split(swift_path)
return swift_repo_name
# Set SWIFT_REPO_NAME in your environment to control the name of the swift
# directory name that is used.
SWIFT_REPO_NAME = os.environ.get(
"SWIFT_REPO_NAME", _get_default_swift_repo_name())
def _load_preset_files_impl(preset_file_names, substitutions={}):
config = ConfigParser.SafeConfigParser(substitutions, allow_no_value=True)
if config.read(preset_file_names) == []:
diagnostics.fatal(
"preset file not found (tried " + str(preset_file_names) + ")")
return config
_PRESET_PREFIX = "preset: "
def _get_preset_options_impl(config, substitutions, preset_name):
section_name = _PRESET_PREFIX + preset_name
if section_name not in config.sections():
return (None, None, None)
build_script_opts = []
build_script_impl_opts = []
missing_opts = []
dash_dash_seen = False
for o in config.options(section_name):
try:
a = config.get(section_name, o)
except ConfigParser.InterpolationMissingOptionError as e:
# e.reference contains the correctly formatted option
missing_opts.append(e.reference)
continue
if not a:
a = ""
if o in substitutions:
continue
opt = None
if o == "mixin-preset":
# Split on newlines and filter out empty lines.
mixins = filter(None, [m.strip() for m in a.splitlines()])
for mixin in mixins:
(base_build_script_opts,
base_build_script_impl_opts,
base_missing_opts) = \
_get_preset_options_impl(config, substitutions, mixin)
build_script_opts += base_build_script_opts
build_script_impl_opts += base_build_script_impl_opts
missing_opts += base_missing_opts
elif o == "dash-dash":
dash_dash_seen = True
elif a == "":
opt = "--" + o
else:
opt = "--" + o + "=" + a
if opt:
if not dash_dash_seen:
build_script_opts.append(opt)
else:
build_script_impl_opts.append(opt)
return (build_script_opts, build_script_impl_opts, missing_opts)
def get_preset_options(substitutions, preset_file_names, preset_name):
config = _load_preset_files_impl(preset_file_names, substitutions)
(build_script_opts, build_script_impl_opts, missing_opts) = \
_get_preset_options_impl(config, substitutions, preset_name)
if not build_script_opts and not build_script_impl_opts:
diagnostics.fatal("preset '" + preset_name + "' not found")
if missing_opts:
diagnostics.fatal("missing option(s) for preset '" + preset_name +
"': " + ", ".join(missing_opts))
# Migrate 'swift-sdks' parameter to 'stdlib-deployment-targets'
swift_sdks_opts = [opt for opt in build_script_impl_opts
if opt.startswith("--swift-sdks")]
try:
swift_sdks_opt = swift_sdks_opts[-1]
except IndexError:
swift_sdks_opt = None
if swift_sdks_opt is not None:
sdks_to_configure = swift_sdks_opt.split("=")[1].split(";")
tgts = []
# Expand SDKs in to their deployment targets
from swift_build_support.swift_build_support.targets \
import StdlibDeploymentTarget
for sdk in sdks_to_configure:
if sdk == "OSX":
tgts += StdlibDeploymentTarget.OSX.targets
elif sdk == "IOS":
tgts += StdlibDeploymentTarget.iOS.targets
elif sdk == "IOS_SIMULATOR":
tgts += StdlibDeploymentTarget.iOSSimulator.targets
elif sdk == "TVOS":
tgts += StdlibDeploymentTarget.AppleTV.targets
elif sdk == "TVOS_SIMULATOR":
tgts += StdlibDeploymentTarget.AppleTVSimulator.targets
elif sdk == "WATCHOS":
tgts += StdlibDeploymentTarget.AppleWatch.targets
elif sdk == "WATCHOS_SIMULATOR":
tgts += StdlibDeploymentTarget.AppleWatchSimulator.targets
build_script_opts.append("--stdlib-deployment-targets=" +
" ".join([tgt.name for tgt in tgts]))
# Filter the swift-sdks parameter
build_script_impl_opts = [opt for opt in build_script_impl_opts
if not opt.startswith("--swift-sdks")]
return build_script_opts + ["--"] + build_script_impl_opts
def get_all_preset_names(preset_file_names):
config = _load_preset_files_impl(preset_file_names)
return [name[len(_PRESET_PREFIX):] for name in config.sections()
if name.startswith(_PRESET_PREFIX)]
|
apache-2.0
|
vivyly/fancast
|
fancast/casting/models.py
|
1
|
3383
|
from operator import itemgetter
from django.db import models
from shortuuidfield import ShortUUIDField
MEDIUM_CHOICES = [(x, x) for x in ['movie',
'tv',
'book',
'comicbook',
'play',
'theatre',
'shortfilm',
'shortstory',
'cartoon',
'anime',
'manga',
'videogame'
'documentary',
'life'
]]
class BaseModel(models.Model):
slug = ShortUUIDField(unique=True)
published = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now_add=True)
class Project(BaseModel):
origin = models.CharField(max_length=255, choices=MEDIUM_CHOICES)
derivation = models.CharField(max_length=255, choices=MEDIUM_CHOICES)
origin_title = models.CharField(max_length=255, blank=False)
derived_title = models.CharField(max_length=255, blank=False)
def __unicode__(self):
return u"<Project: %s >" % self.derived_title
@property
def characters(self):
return Character.objects.filter(project = self).order_by('order')
class Character(BaseModel):
name = models.CharField(max_length=255, blank=False)
normalized = models.CharField(max_length=255, blank=False)
image = models.URLField(blank=True)
description = models.TextField(blank=True)
project = models.ForeignKey(Project, null=True, blank=False)
order = models.IntegerField()
def __unicode__(self):
return u"<Character: %s >" % self.name
@property
def actors(self):
return Actor.objects.filter(prospect__character = self)
@property
def prospects(self):
return Prospect.objects.filter(character=self)
@property
def actors_ordered(self):
prospects = Prospect.objects.filter(character=self)
actors = []
for prospect in prospects:
actors.append([prospect.totalvotes, prospect.actor])
sorted_actors = [actor for actor in sorted(actors, key=itemgetter(0))]
sorted_actors.reverse()
return sorted_actors
class Actor(BaseModel):
name = models.CharField(max_length=255, blank=False)
normalized = models.CharField(max_length=255, blank=False)
image = models.URLField(blank=True)
description = models.TextField(blank=True)
def __unicode__(self):
return u"<Actor: %s >" % self.name
class Prospect(BaseModel):
character = models.ForeignKey(Character)
actor = models.ForeignKey(Actor)
@property
def upvotes(self):
return ProspectVote.objects.filter(
prospect=self, vote_status=1).count()
@property
def downvotes(self):
return ProspectVote.objects.filter(
prospect=self, vote_status=-1).count()
@property
def totalvotes(self):
return self.upvotes - self.downvotes
class ProspectVote(models.Model):
sessionid = models.CharField(max_length=255)
prospect = models.ForeignKey(Prospect, related_name="votes")
vote_status = models.IntegerField()
|
bsd-3-clause
|
mdinger/rust
|
src/etc/snapshot.py
|
37
|
8006
|
# Copyright 2011-2015 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import re
import os
import sys
import glob
import tarfile
import shutil
import subprocess
import distutils.spawn
try:
import hashlib
sha_func = hashlib.sha1
except ImportError:
import sha
sha_func = sha.new
def scrub(b):
if sys.version_info >= (3,) and type(b) == bytes:
return b.decode('ascii')
else:
return b
src_dir = scrub(os.getenv("CFG_SRC_DIR"))
if not src_dir:
raise Exception("missing env var CFG_SRC_DIR")
snapshotfile = os.path.join(src_dir, "src", "snapshots.txt")
download_url_base = "https://static.rust-lang.org/stage0-snapshots"
download_dir_base = "dl"
download_unpack_base = os.path.join(download_dir_base, "unpack")
snapshot_files = {
"linux": ["bin/rustc"],
"macos": ["bin/rustc"],
"winnt": ["bin/rustc.exe"],
"freebsd": ["bin/rustc"],
"dragonfly": ["bin/rustc"],
"bitrig": ["bin/rustc"],
"openbsd": ["bin/rustc"],
}
winnt_runtime_deps_32 = ["libgcc_s_dw2-1.dll", "libstdc++-6.dll"]
winnt_runtime_deps_64 = ["libgcc_s_seh-1.dll", "libstdc++-6.dll"]
def parse_line(n, line):
global snapshotfile
if re.match(r"\s*$", line):
return None
if re.match(r"^T\s*$", line):
return None
match = re.match(r"\s+([\w_-]+) ([a-fA-F\d]{40})\s*$", line)
if match:
return {"type": "file",
"platform": match.group(1),
"hash": match.group(2).lower()}
match = re.match(r"([ST]) (\d{4}-\d{2}-\d{2}) ([a-fA-F\d]+)\s*$", line)
if not match:
raise Exception("%s:%d:E syntax error: " % (snapshotfile, n))
return {"type": "snapshot",
"date": match.group(2),
"rev": match.group(3)}
def partial_snapshot_name(date, rev, platform):
return ("rust-stage0-%s-%s-%s.tar.bz2" %
(date, rev, platform))
def full_snapshot_name(date, rev, platform, hsh):
return ("rust-stage0-%s-%s-%s-%s.tar.bz2" %
(date, rev, platform, hsh))
def get_kernel(triple):
t = triple.split('-')
if len(t) == 2:
os_name = t[1]
else:
os_name = t[2]
if os_name == "windows":
return "winnt"
if os_name == "darwin":
return "macos"
if os_name == "freebsd":
return "freebsd"
if os_name == "dragonfly":
return "dragonfly"
if os_name == "bitrig":
return "bitrig"
if os_name == "openbsd":
return "openbsd"
return "linux"
def get_cpu(triple):
arch = triple.split('-')[0]
if arch == "i686":
return "i386"
return arch
def get_platform(triple):
return "%s-%s" % (get_kernel(triple), get_cpu(triple))
def cmd_out(cmdline):
p = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
return scrub(p.communicate()[0].strip())
def local_rev_info(field):
return cmd_out(["git", "--git-dir=" + os.path.join(src_dir, ".git"),
"log", "-n", "1",
"--format=%%%s" % field, "HEAD"])
def local_rev_full_sha():
return local_rev_info("H").split()[0]
def local_rev_short_sha():
return local_rev_info("h").split()[0]
def local_rev_committer_date():
return local_rev_info("ci")
def get_url_to_file(u, f):
# no security issue, just to stop partial download leaving a stale file
tmpf = f + '.tmp'
returncode = -1
if distutils.spawn.find_executable("curl"):
returncode = subprocess.call(["curl", "-o", tmpf, u])
elif distutils.spawn.find_executable("wget"):
returncode = subprocess.call(["wget", "-O", tmpf, u])
if returncode != 0:
try:
os.unlink(tmpf)
except OSError:
pass
raise Exception("failed to fetch url")
os.rename(tmpf, f)
def snap_filename_hash_part(snap):
match = re.match(r".*([a-fA-F\d]{40}).tar.bz2$", snap)
if not match:
raise Exception("unable to find hash in filename: " + snap)
return match.group(1)
def hash_file(x):
h = sha_func()
h.update(open(x, "rb").read())
return scrub(h.hexdigest())
def get_winnt_runtime_deps(platform):
"""Returns a list of paths of Rust's system runtime dependencies"""
if platform == "winnt-x86_64":
deps = winnt_runtime_deps_64
else:
deps = winnt_runtime_deps_32
runtime_deps = []
path_dirs = os.environ["PATH"].split(os.pathsep)
for name in deps:
for dir in path_dirs:
filepath = os.path.join(dir, name)
if os.path.isfile(filepath):
runtime_deps.append(filepath)
break
else:
raise Exception("Could not find runtime dependency: %s" % name)
return runtime_deps
def make_snapshot(stage, triple):
kernel = get_kernel(triple)
platform = get_platform(triple)
rev = local_rev_short_sha()
date = local_rev_committer_date().split()[0]
file0 = partial_snapshot_name(date, rev, platform)
def in_tar_name(fn):
cs = re.split(r"[\\/]", fn)
if len(cs) >= 2:
return os.sep.join(cs[-2:])
tar = tarfile.open(file0, "w:bz2")
for name in snapshot_files[kernel]:
dir = stage
if stage == "stage1" and re.match(r"^lib/(lib)?std.*", name):
dir = "stage0"
fn_glob = os.path.join(triple, dir, name)
matches = glob.glob(fn_glob)
if not matches:
raise Exception("Not found file with name like " + fn_glob)
if len(matches) == 1:
tar.add(matches[0], "rust-stage0/" + in_tar_name(matches[0]))
else:
raise Exception("Found stale files: \n %s\n"
"Please make a clean build." % "\n ".join(matches))
if kernel == "winnt":
for path in get_winnt_runtime_deps(platform):
tar.add(path, "rust-stage0/bin/" + os.path.basename(path))
tar.add(os.path.join(os.path.dirname(__file__), "third-party"),
"rust-stage0/bin/third-party")
tar.close()
h = hash_file(file0)
file1 = full_snapshot_name(date, rev, platform, h)
shutil.move(file0, file1)
return file1
def curr_snapshot_rev():
i = 0
found_snap = False
date = None
rev = None
f = open(snapshotfile)
for line in f.readlines():
i += 1
parsed = parse_line(i, line)
if not parsed:
continue
if parsed["type"] == "snapshot":
date = parsed["date"]
rev = parsed["rev"]
found_snap = True
break
if not found_snap:
raise Exception("no snapshot entries in file")
return (date, rev)
def determine_curr_snapshot(triple):
i = 0
platform = get_platform(triple)
found_file = False
found_snap = False
hsh = None
date = None
rev = None
f = open(snapshotfile)
for line in f.readlines():
i += 1
parsed = parse_line(i, line)
if not parsed:
continue
if found_snap and parsed["type"] == "file":
if parsed["platform"] == platform:
hsh = parsed["hash"]
found_file = True
break
elif parsed["type"] == "snapshot":
date = parsed["date"]
rev = parsed["rev"]
found_snap = True
if not found_snap:
raise Exception("no snapshot entries in file")
if not found_file:
raise Exception("no snapshot file found for platform %s, rev %s" %
(platform, rev))
return full_snapshot_name(date, rev, platform, hsh)
|
apache-2.0
|
bollu/sandhi
|
modules/gr36/gnuradio-core/src/python/gnuradio/gr/qa_vector_sink_source.py
|
18
|
2014
|
#!/usr/bin/env python
#
# Copyright 2008,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import math
class test_vector_sink_source(gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001(self):
src_data = [float(x) for x in range(16)]
expected_result = tuple(src_data)
src = gr.vector_source_f(src_data)
dst = gr.vector_sink_f()
self.tb.connect(src, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_002(self):
src_data = [float(x) for x in range(16)]
expected_result = tuple(src_data)
src = gr.vector_source_f(src_data, False, 2)
dst = gr.vector_sink_f(2)
self.tb.connect(src, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_003(self):
src_data = [float(x) for x in range(16)]
expected_result = tuple(src_data)
self.assertRaises(ValueError, lambda : gr.vector_source_f(src_data, False, 3))
if __name__ == '__main__':
gr_unittest.run(test_vector_sink_source, "test_vector_sink_source.xml")
|
gpl-3.0
|
magicnote/yohe
|
phpfmt.py
|
1
|
41900
|
import csv
import os
import os.path
import shutil
import sublime
import sublime_plugin
import subprocess
import time
import sys
from os.path import dirname, realpath
dist_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, dist_dir)
from diff_match_patch.python3.diff_match_patch import diff_match_patch
def print_debug(*msg):
if getSetting(sublime.active_window().active_view(), sublime.load_settings('yohe.sublime-settings'), "debug", False):
print(msg)
def getSetting( view, settings, key, default ):
local = 'phpfmt.' + key
return view.settings().get( local, settings.get( key, default ) )
def dofmt(eself, eview, sgter = None, src = None, force = False):
if int(sublime.version()) < 3000:
print_debug("phpfmt: ST2 not supported")
return False
self = eself
view = eview
s = sublime.load_settings('yohe.sublime-settings')
additional_extensions = getSetting( view, s, "additional_extensions", [])
autoimport = getSetting( view, s, "autoimport", True)
debug = getSetting( view, s, "debug", False)
enable_auto_align = getSetting( view, s, "enable_auto_align", False)
ignore_list = getSetting( view, s, "ignore_list", "")
indent_with_space = getSetting( view, s, "indent_with_space", False)
psr1 = getSetting( view, s, "psr1", False)
psr1_naming = getSetting( view, s, "psr1_naming", psr1)
psr2 = getSetting( view, s, "psr2", False)
smart_linebreak_after_curly = getSetting( view, s, "smart_linebreak_after_curly", True)
skip_if_ini_missing = getSetting( view, s, "skip_if_ini_missing", False)
visibility_order = getSetting( view, s, "visibility_order", False)
yoda = getSetting( view, s, "yoda", False)
readini = getSetting( view, s, "readini", False)
passes = getSetting( view, s, "passes", [])
excludes = getSetting( view, s, "excludes", [])
php_bin = getSetting( view, s, "php_bin", "php")
formatter_path = os.path.join(dirname(realpath(sublime.packages_path())), "Packages", "yohe", "fmt.phar")
config_file = os.path.join(dirname(realpath(sublime.packages_path())), "Packages", "yohe", "php.tools.ini")
uri = view.file_name()
dirnm, sfn = os.path.split(uri)
ext = os.path.splitext(uri)[1][1:]
if force is False and "php" != ext and not ext in additional_extensions:
print_debug("phpfmt: not a PHP file")
return False
if "" != ignore_list:
if type(ignore_list) is not list:
ignore_list = ignore_list.split(" ")
for v in ignore_list:
pos = uri.find(v)
if -1 != pos and v != "":
print_debug("phpfmt: skipping file")
return False
if not os.path.isfile(php_bin) and not php_bin == "php":
print_debug("Can't find PHP binary file at "+php_bin)
sublime.error_message("Can't find PHP binary file at "+php_bin)
# Look for oracle.sqlite
if dirnm != "":
oracleDirNm = dirnm
while oracleDirNm != "/":
oracleFname = oracleDirNm+os.path.sep+"oracle.sqlite"
if os.path.isfile(oracleFname):
break
origOracleDirNm = oracleDirNm
oracleDirNm = os.path.dirname(oracleDirNm)
if origOracleDirNm == oracleDirNm:
break
if not os.path.isfile(oracleFname):
print_debug("phpfmt (oracle file): not found")
oracleFname = None
else:
print_debug("phpfmt (oracle file): "+oracleFname)
if readini:
iniDirNm = dirnm
while iniDirNm != "/":
iniFname = iniDirNm+os.path.sep+".php.tools.ini"
if os.path.isfile(iniFname):
break
originiDirNm = iniDirNm
iniDirNm = os.path.dirname(iniDirNm)
if originiDirNm == iniDirNm:
break
if os.path.isfile(iniFname):
print_debug("phpfmt (ini file): "+iniFname)
config_file = iniFname
elif skip_if_ini_missing:
print_debug("phpfmt (ini file): not found - skipping")
return False
else:
oracleFname = None
cmd_ver = [php_bin, '-v'];
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(cmd_ver, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, startupinfo=startupinfo)
else:
p = subprocess.Popen(cmd_ver, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
res, err = p.communicate()
print_debug("phpfmt (php_ver) cmd:\n", cmd_ver)
print_debug("phpfmt (php_ver) out:\n", res.decode('utf-8'))
print_debug("phpfmt (php_ver) err:\n", err.decode('utf-8'))
if ('PHP 5.3' in res.decode('utf-8') or 'PHP 5.3' in err.decode('utf-8') or 'PHP 5.4' in res.decode('utf-8') or 'PHP 5.4' in err.decode('utf-8') or 'PHP 5.5' in res.decode('utf-8') or 'PHP 5.5' in err.decode('utf-8') or 'PHP 5.6' in res.decode('utf-8') or 'PHP 5.6' in err.decode('utf-8')):
s = debugEnvironment(php_bin, formatter_path)
sublime.message_dialog('Warning.\nPHP 7.0 or newer is required.\nPlease, upgrade your local PHP installation.\nDebug information:'+s)
return False
s = debugEnvironment(php_bin, formatter_path)
print_debug(s)
lintret = 1
if "AutoSemicolon" in passes:
lintret = 0
else:
cmd_lint = [php_bin,"-ddisplay_errors=1","-l"];
if src is None:
cmd_lint.append(uri)
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(cmd_lint, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=dirnm, shell=False, startupinfo=startupinfo)
else:
p = subprocess.Popen(cmd_lint, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=dirnm, shell=False)
else:
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(cmd_lint, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, startupinfo=startupinfo)
else:
p = subprocess.Popen(cmd_lint, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
p.stdin.write(src.encode('utf-8'))
lint_out, lint_err = p.communicate()
lintret = p.returncode
if(lintret==0):
cmd_fmt = [php_bin]
if not debug:
cmd_fmt.append("-ddisplay_errors=stderr")
if psr1:
cmd_fmt.append("-dshort_open_tag=On")
cmd_fmt.append(formatter_path)
cmd_fmt.append("--config="+config_file)
if psr1:
cmd_fmt.append("--psr1")
if psr1_naming:
cmd_fmt.append("--psr1-naming")
if psr2:
cmd_fmt.append("--psr2")
if indent_with_space is True:
cmd_fmt.append("--indent_with_space")
elif indent_with_space > 0:
cmd_fmt.append("--indent_with_space="+str(indent_with_space))
if enable_auto_align:
cmd_fmt.append("--enable_auto_align")
if visibility_order:
cmd_fmt.append("--visibility_order")
if smart_linebreak_after_curly:
cmd_fmt.append("--smart_linebreak_after_curly")
if yoda:
cmd_fmt.append("--yoda")
if sgter is not None:
cmd_fmt.append("--setters_and_getters="+sgter)
cmd_fmt.append("--constructor="+sgter)
if autoimport is True and oracleFname is not None:
cmd_fmt.append("--oracleDB="+oracleFname)
if len(passes) > 0:
cmd_fmt.append("--passes="+','.join(passes))
if len(excludes) > 0:
cmd_fmt.append("--exclude="+','.join(excludes))
if debug:
cmd_fmt.append("-v")
if sgter is None:
cmd_fmt.append("-o=-")
if src is None:
cmd_fmt.append(uri)
else:
cmd_fmt.append("-")
print_debug("cmd_fmt: ", cmd_fmt)
if src is None:
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(cmd_fmt, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=dirnm, shell=False, startupinfo=startupinfo)
else:
p = subprocess.Popen(cmd_fmt, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=dirnm, shell=False)
else:
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(cmd_fmt, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, startupinfo=startupinfo)
else:
p = subprocess.Popen(cmd_fmt, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
if src is not None:
p.stdin.write(src.encode('utf-8'))
res, err = p.communicate()
print_debug("p:\n", p.returncode)
print_debug("err:\n", err.decode('utf-8'))
if p.returncode != 0:
return ''
if sgter is not None:
sublime.set_timeout(revert_active_window, 50)
return res.decode('utf-8')
else:
sublime.status_message("phpfmt: format failed - syntax errors found")
print_debug("lint error: ", lint_out)
def dogeneratephpdoc(eself, eview):
self = eself
view = eview
s = sublime.load_settings('yohe.sublime-settings')
additional_extensions = s.get("additional_extensions", [])
autoimport = s.get("autoimport", True)
debug = s.get("debug", False)
enable_auto_align = s.get("enable_auto_align", False)
ignore_list = s.get("ignore_list", "")
indent_with_space = s.get("indent_with_space", False)
psr1 = s.get("psr1", False)
psr1_naming = s.get("psr1_naming", psr1)
psr2 = s.get("psr2", False)
smart_linebreak_after_curly = s.get("smart_linebreak_after_curly", True)
visibility_order = s.get("visibility_order", False)
yoda = s.get("yoda", False)
passes = s.get("passes", [])
php_bin = s.get("php_bin", "php")
formatter_path = os.path.join(dirname(realpath(sublime.packages_path())), "Packages", "yohe", "fmt.phar")
config_file = os.path.join(dirname(realpath(sublime.packages_path())), "Packages", "yohe", "php.tools.ini")
uri = view.file_name()
dirnm, sfn = os.path.split(uri)
ext = os.path.splitext(uri)[1][1:]
if "php" != ext and not ext in additional_extensions:
print_debug("phpfmt: not a PHP file")
sublime.status_message("phpfmt: not a PHP file")
return False
if not os.path.isfile(php_bin) and not php_bin == "php":
print_debug("Can't find PHP binary file at "+php_bin)
sublime.error_message("Can't find PHP binary file at "+php_bin)
print_debug("phpfmt:", uri)
if enable_auto_align:
print_debug("auto align: enabled")
else:
print_debug("auto align: disabled")
cmd_lint = [php_bin,"-l",uri];
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(cmd_lint, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=dirnm, shell=False, startupinfo=startupinfo)
else:
p = subprocess.Popen(cmd_lint, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=dirnm, shell=False)
lint_out, lint_err = p.communicate()
if(p.returncode==0):
cmd_fmt = [php_bin]
if not debug:
cmd_fmt.append("-ddisplay_errors=stderr")
cmd_fmt.append(formatter_path)
cmd_fmt.append("--config="+config_file)
if psr1:
cmd_fmt.append("--psr1")
if psr1_naming:
cmd_fmt.append("--psr1-naming")
if psr2:
cmd_fmt.append("--psr2")
if indent_with_space:
cmd_fmt.append("--indent_with_space")
elif indent_with_space > 0:
cmd_fmt.append("--indent_with_space="+str(indent_with_space))
if enable_auto_align:
cmd_fmt.append("--enable_auto_align")
if visibility_order:
cmd_fmt.append("--visibility_order")
passes.append("GeneratePHPDoc")
if len(passes) > 0:
cmd_fmt.append("--passes="+','.join(passes))
cmd_fmt.append(uri)
uri_tmp = uri + "~"
print_debug("cmd_fmt: ", cmd_fmt)
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(cmd_fmt, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=dirnm, shell=False, startupinfo=startupinfo)
else:
p = subprocess.Popen(cmd_fmt, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=dirnm, shell=False)
res, err = p.communicate()
print_debug("err:\n", err.decode('utf-8'))
sublime.set_timeout(revert_active_window, 50)
else:
print_debug("lint error: ", lint_out)
def doreordermethod(eself, eview):
self = eself
view = eview
s = sublime.load_settings('yohe.sublime-settings')
additional_extensions = s.get("additional_extensions", [])
autoimport = s.get("autoimport", True)
debug = s.get("debug", False)
enable_auto_align = s.get("enable_auto_align", False)
ignore_list = s.get("ignore_list", "")
indent_with_space = s.get("indent_with_space", False)
psr1 = s.get("psr1", False)
psr1_naming = s.get("psr1_naming", psr1)
psr2 = s.get("psr2", False)
smart_linebreak_after_curly = s.get("smart_linebreak_after_curly", True)
visibility_order = s.get("visibility_order", False)
yoda = s.get("yoda", False)
passes = s.get("passes", [])
php_bin = s.get("php_bin", "php")
formatter_path = os.path.join(dirname(realpath(sublime.packages_path())), "Packages", "phpfmt", "fmt.phar")
config_file = os.path.join(dirname(realpath(sublime.packages_path())), "Packages", "phpfmt", "php.tools.ini")
uri = view.file_name()
dirnm, sfn = os.path.split(uri)
ext = os.path.splitext(uri)[1][1:]
if "php" != ext and not ext in additional_extensions:
print_debug("phpfmt: not a PHP file")
sublime.status_message("phpfmt: not a PHP file")
return False
if not os.path.isfile(php_bin) and not php_bin == "php":
print_debug("Can't find PHP binary file at "+php_bin)
sublime.error_message("Can't find PHP binary file at "+php_bin)
print_debug("phpfmt:", uri)
if enable_auto_align:
print_debug("auto align: enabled")
else:
print_debug("auto align: disabled")
cmd_lint = [php_bin,"-l",uri];
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(cmd_lint, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=dirnm, shell=False, startupinfo=startupinfo)
else:
p = subprocess.Popen(cmd_lint, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=dirnm, shell=False)
lint_out, lint_err = p.communicate()
if(p.returncode==0):
cmd_fmt = [php_bin]
if not debug:
cmd_fmt.append("-ddisplay_errors=stderr")
cmd_fmt.append(formatter_path)
cmd_fmt.append("--config="+config_file)
if psr1:
cmd_fmt.append("--psr1")
if psr1_naming:
cmd_fmt.append("--psr1-naming")
if psr2:
cmd_fmt.append("--psr2")
if indent_with_space:
cmd_fmt.append("--indent_with_space")
elif indent_with_space > 0:
cmd_fmt.append("--indent_with_space="+str(indent_with_space))
if enable_auto_align:
cmd_fmt.append("--enable_auto_align")
if visibility_order:
cmd_fmt.append("--visibility_order")
passes.append("OrganizeClass")
if len(passes) > 0:
cmd_fmt.append("--passes="+','.join(passes))
cmd_fmt.append(uri)
uri_tmp = uri + "~"
print_debug("cmd_fmt: ", cmd_fmt)
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(cmd_fmt, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=dirnm, shell=False, startupinfo=startupinfo)
else:
p = subprocess.Popen(cmd_fmt, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=dirnm, shell=False)
res, err = p.communicate()
print_debug("err:\n", err.decode('utf-8'))
sublime.set_timeout(revert_active_window, 50)
else:
print_debug("lint error: ", lint_out)
def debugEnvironment(php_bin, formatter_path):
ret = ""
cmd_ver = [php_bin,"-v"];
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(cmd_ver, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, startupinfo=startupinfo)
else:
p = subprocess.Popen(cmd_ver, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
res, err = p.communicate()
ret += ("phpfmt (php version):\n"+res.decode('utf-8'))
if err.decode('utf-8'):
ret += ("phpfmt (php version) err:\n"+err.decode('utf-8'))
ret += "\n"
cmd_ver = [php_bin,"-m"];
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(cmd_ver, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, startupinfo=startupinfo)
else:
p = subprocess.Popen(cmd_ver, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
res, err = p.communicate()
if res.decode('utf-8').find("tokenizer") != -1:
ret += ("phpfmt (php tokenizer) found\n")
else:
ret += ("phpfmt (php tokenizer):\n"+res.decode('utf-8'))
if err.decode('utf-8'):
ret += ("phpfmt (php tokenizer) err:\n"+err.decode('utf-8'))
ret += "\n"
cmd_ver = [php_bin,formatter_path,"--version"];
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(cmd_ver, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, startupinfo=startupinfo)
else:
p = subprocess.Popen(cmd_ver, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
res, err = p.communicate()
ret += ("phpfmt (fmt.phar version):\n"+res.decode('utf-8'))
if err.decode('utf-8'):
ret += ("phpfmt (fmt.phar version) err:\n"+err.decode('utf-8'))
ret += "\n"
return ret
def revert_active_window():
sublime.active_window().active_view().run_command("revert")
sublime.active_window().active_view().run_command("phpcs_sniff_this_file")
def lookForOracleFile(view):
uri = view.file_name()
oracleDirNm, sfn = os.path.split(uri)
originalDirNm = oracleDirNm
while oracleDirNm != "/":
oracleFname = oracleDirNm+os.path.sep+"oracle.sqlite"
if os.path.isfile(oracleFname):
return True
origOracleDirNm = oracleDirNm
oracleDirNm = os.path.dirname(oracleDirNm)
if origOracleDirNm == oracleDirNm:
return False
return False
def outputToPanel(name, eself, eedit, message):
eself.output_view = eself.view.window().get_output_panel(name)
eself.view.window().run_command("show_panel", {"panel": "output."+name})
eself.output_view.set_read_only(False)
eself.output_view.insert(eedit, eself.output_view.size(), message)
eself.output_view.set_read_only(True)
def hidePanel(name, eself, eedit):
eself.output_view = eself.view.window().get_output_panel(name)
eself.view.window().run_command("hide_panel", {"panel": "output."+name})
class phpfmt(sublime_plugin.EventListener):
def on_pre_save(self, view):
s = sublime.load_settings('yohe.sublime-settings')
format_on_save = s.get("format_on_save", True)
if format_on_save:
view.run_command('php_fmt')
class AnalyseThisCommand(sublime_plugin.TextCommand):
def run(self, edit):
if not lookForOracleFile(self.view):
sublime.active_window().active_view().run_command("build_oracle")
return False
lookTerm = (self.view.substr(self.view.word(self.view.sel()[0].a)))
s = sublime.load_settings('yohe.sublime-settings')
php_bin = s.get("php_bin", "php")
oraclePath = os.path.join(dirname(realpath(sublime.packages_path())), "Packages", "phpfmt", "oracle.php")
uri = self.view.file_name()
dirNm, sfn = os.path.split(uri)
ext = os.path.splitext(uri)[1][1:]
oracleDirNm = dirNm
while oracleDirNm != "/":
oracleFname = oracleDirNm+os.path.sep+"oracle.sqlite"
if os.path.isfile(oracleFname):
break
origOracleDirNm = oracleDirNm
oracleDirNm = os.path.dirname(oracleDirNm)
if origOracleDirNm == oracleDirNm:
break
cmdOracle = [php_bin]
cmdOracle.append(oraclePath)
cmdOracle.append("introspect")
cmdOracle.append(lookTerm)
print_debug(cmdOracle+'asdasd')
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(cmdOracle, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=oracleDirNm, shell=False, startupinfo=startupinfo)
else:
p = subprocess.Popen(cmdOracle, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=oracleDirNm, shell=False)
res, err = p.communicate()
print_debug("phpfmt (introspect): "+res.decode('utf-8'))
print_debug("phpfmt (introspect) err: "+err.decode('utf-8'))
outputToPanel("phpfmtintrospect", self, edit, "Analysis:\n"+res.decode('utf-8'));
lastCalltip = ""
class CalltipCommand(sublime_plugin.TextCommand):
def run(self, edit):
global lastCalltip
uri = self.view.file_name()
dirnm, sfn = os.path.split(uri)
ext = os.path.splitext(uri)[1][1:]
s = sublime.load_settings('yohe.sublime-settings')
additional_extensions = s.get("additional_extensions", [])
if "php" != ext and not ext in additional_extensions:
return False
if not lookForOracleFile(self.view):
return False
lookTerm = (self.view.substr(self.view.word(self.view.sel()[0].a)))
if lastCalltip == lookTerm:
return False
lastCalltip = lookTerm
php_bin = s.get("php_bin", "php")
oraclePath = os.path.join(dirname(realpath(sublime.packages_path())), "Packages", "phpfmt", "oracle.php")
uri = self.view.file_name()
dirNm, sfn = os.path.split(uri)
ext = os.path.splitext(uri)[1][1:]
oracleDirNm = dirNm
while oracleDirNm != "/":
oracleFname = oracleDirNm+os.path.sep+"oracle.sqlite"
if os.path.isfile(oracleFname):
break
origOracleDirNm = oracleDirNm
oracleDirNm = os.path.dirname(oracleDirNm)
if origOracleDirNm == oracleDirNm:
break
cmdOracle = [php_bin]
cmdOracle.append(oraclePath)
cmdOracle.append("calltip")
cmdOracle.append(lookTerm)
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(cmdOracle, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=oracleDirNm, shell=False, startupinfo=startupinfo)
else:
p = subprocess.Popen(cmdOracle, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=oracleDirNm, shell=False)
res, err = p.communicate()
output = res.decode('utf-8');
self.view.set_status("phpfmt", output)
class DebugEnvCommand(sublime_plugin.TextCommand):
def run(self, edit):
s = sublime.load_settings('yohe.sublime-settings')
php_bin = s.get("php_bin", "php")
formatter_path = os.path.join(dirname(realpath(sublime.packages_path())), "Packages", "phpfmt", "fmt.phar")
s = debugEnvironment(php_bin, formatter_path)
sublime.message_dialog(s)
class FmtNowCommand(sublime_plugin.TextCommand):
def run(self, edit):
vsize = self.view.size()
src = self.view.substr(sublime.Region(0, vsize))
if not src.strip():
return
src = dofmt(self, self.view, None, src, True)
if src is False or src == "":
return False
_, err = merge(self.view, vsize, src, edit)
print_debug(err)
class TogglePassMenuCommand(sublime_plugin.TextCommand):
def run(self, edit):
s = sublime.load_settings('yohe.sublime-settings')
php_bin = s.get("php_bin", "php")
formatter_path = os.path.join(dirname(realpath(sublime.packages_path())), "Packages", "phpfmt", "fmt.phar")
cmd_passes = [php_bin,formatter_path,'--list-simple'];
print_debug(cmd_passes)
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(cmd_passes, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, startupinfo=startupinfo)
else:
p = subprocess.Popen(cmd_passes, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
out, err = p.communicate()
descriptions = out.decode("utf-8").strip().split(os.linesep)
def on_done(i):
if i >= 0 :
s = sublime.load_settings('yohe.sublime-settings')
passes = s.get('passes', [])
chosenPass = descriptions[i].split(' ')
option = chosenPass[0]
passDesc = option
if option in passes:
passes.remove(option)
msg = "phpfmt: "+passDesc+" disabled"
print_debug(msg)
sublime.status_message(msg)
else:
passes.append(option)
msg = "phpfmt: "+passDesc+" enabled"
print_debug(msg)
sublime.status_message(msg)
s.set('passes', passes)
sublime.save_settings('yohe.sublime-settings')
self.view.window().show_quick_panel(descriptions, on_done, sublime.MONOSPACE_FONT)
class ToggleExcludeMenuCommand(sublime_plugin.TextCommand):
def run(self, edit):
s = sublime.load_settings('yohe.sublime-settings')
php_bin = s.get("php_bin", "php")
formatter_path = os.path.join(dirname(realpath(sublime.packages_path())), "Packages", "phpfmt", "fmt.phar")
cmd_passes = [php_bin,formatter_path,'--list-simple'];
print_debug(cmd_passes)
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(cmd_passes, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, startupinfo=startupinfo)
else:
p = subprocess.Popen(cmd_passes, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
out, err = p.communicate()
descriptions = out.decode("utf-8").strip().split(os.linesep)
def on_done(i):
if i >= 0 :
s = sublime.load_settings('yohe.sublime-settings')
excludes = s.get('excludes', [])
chosenPass = descriptions[i].split(' ')
option = chosenPass[0]
passDesc = option
if option in excludes:
excludes.remove(option)
msg = "phpfmt: "+passDesc+" disabled"
print_debug(msg)
sublime.status_message(msg)
else:
excludes.append(option)
msg = "phpfmt: "+passDesc+" enabled"
print_debug(msg)
sublime.status_message(msg)
s.set('excludes', excludes)
sublime.save_settings('yohe.sublime-settings')
self.view.window().show_quick_panel(descriptions, on_done, sublime.MONOSPACE_FONT)
class ToggleCommand(sublime_plugin.TextCommand):
def run(self, edit, option):
s = sublime.load_settings('yohe.sublime-settings')
options = {
"autocomplete":"autocomplete",
"autoimport":"dependency autoimport",
"enable_auto_align":"auto align",
"format_on_save":"format on save",
"psr1":"PSR1",
"psr1_naming":"PSR1 Class and Method Naming",
"psr2":"PSR2",
"readini":"look for .php.tools.ini",
"smart_linebreak_after_curly":"smart linebreak after curly",
"skip_if_ini_missing":"skip if ini file is missing",
"visibility_order":"visibility order",
"yoda":"yoda mode",
}
s = sublime.load_settings('yohe.sublime-settings')
value = s.get(option, False)
if value:
s.set(option, False)
msg = "phpfmt: "+options[option]+" disabled"
print_debug(msg)
sublime.status_message(msg)
else:
s.set(option, True)
msg = "phpfmt: "+options[option]+" enabled"
print_debug(msg)
sublime.status_message(msg)
sublime.save_settings('yohe.sublime-settings')
class UpdatePhpBinCommand(sublime_plugin.TextCommand):
def run(self, edit):
def execute(text):
s = sublime.load_settings('yohe.sublime-settings')
s.set("php_bin", text)
s = sublime.load_settings('yohe.sublime-settings')
self.view.window().show_input_panel('php binary path:', s.get("php_bin", ""), execute, None, None)
class OrderMethodCommand(sublime_plugin.TextCommand):
def run(self, edit):
doreordermethod(self, self.view)
class GeneratePhpdocCommand(sublime_plugin.TextCommand):
def run(self, edit):
dogeneratephpdoc(self, self.view)
class SgterSnakeCommand(sublime_plugin.TextCommand):
def run(self, edit):
dofmt(self, self.view, 'snake')
class SgterCamelCommand(sublime_plugin.TextCommand):
def run(self, edit):
dofmt(self, self.view, 'camel')
class SgterGoCommand(sublime_plugin.TextCommand):
def run(self, edit):
dofmt(self, self.view, 'golang')
class BuildOracleCommand(sublime_plugin.TextCommand):
def run(self, edit):
def buildDB():
if self.msgFile is not None:
self.msgFile.window().run_command('close_file')
s = sublime.load_settings('yohe.sublime-settings')
php_bin = s.get("php_bin", "php")
oraclePath = os.path.join(dirname(realpath(sublime.packages_path())), "Packages", "phpfmt", "oracle.php")
cmdOracle = [php_bin]
cmdOracle.append(oraclePath)
cmdOracle.append("flush")
cmdOracle.append(self.dirNm)
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(cmdOracle, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.dirNm, shell=False, startupinfo=startupinfo)
else:
p = subprocess.Popen(cmdOracle, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.dirNm, shell=False)
res, err = p.communicate()
print_debug("phpfmt (oracle): "+res.decode('utf-8'))
print_debug("phpfmt (oracle) err: "+err.decode('utf-8'))
sublime.status_message("phpfmt (oracle): done")
#sublime.set_timeout_async(self.long_command, 0)
def askForDirectory(text):
self.dirNm = text
sublime.set_timeout_async(buildDB, 0)
view = self.view
s = sublime.load_settings('yohe.sublime-settings')
php_bin = s.get("php_bin", "php")
uri = view.file_name()
oracleDirNm, sfn = os.path.split(uri)
originalDirNm = oracleDirNm
while oracleDirNm != "/":
oracleFname = oracleDirNm+os.path.sep+"oracle.sqlite"
if os.path.isfile(oracleFname):
break
origOracleDirNm = oracleDirNm
oracleDirNm = os.path.dirname(oracleDirNm)
if origOracleDirNm == oracleDirNm:
break
self.msgFile = None
if not os.path.isfile(oracleFname):
print_debug("phpfmt (oracle file): not found -- dialog")
self.msgFile = self.view.window().open_file(os.path.join(dirname(realpath(sublime.packages_path())), "Packages", "phpfmt", "message"))
self.msgFile.set_read_only(True)
self.view.window().show_input_panel('location:', originalDirNm, askForDirectory, None, None)
else:
print_debug("phpfmt (oracle file): "+oracleFname)
print_debug("phpfmt (oracle dir): "+oracleDirNm)
self.dirNm = oracleDirNm
sublime.set_timeout_async(buildDB, 0)
class IndentWithSpacesCommand(sublime_plugin.TextCommand):
def run(self, edit):
def setIndentWithSpace(text):
s = sublime.load_settings('yohe.sublime-settings')
v = text.strip()
if not v:
v = False
else:
v = int(v)
s.set("indent_with_space", v)
sublime.save_settings('yohe.sublime-settings')
sublime.status_message("phpfmt (indentation): done")
sublime.active_window().active_view().run_command("fmt_now")
s = sublime.load_settings('yohe.sublime-settings')
spaces = s.get("indent_with_space", 4)
if not spaces:
spaces = ""
spaces = str(spaces)
self.view.window().show_input_panel('how many spaces? (leave it empty to return to tabs)', spaces, setIndentWithSpace, None, None)
class PHPFmtComplete(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
s = sublime.load_settings('yohe.sublime-settings')
autocomplete = s.get("autocomplete", False)
if autocomplete is False:
return []
pos = locations[0]
scopes = view.scope_name(pos).split()
if not ('source.php.embedded.block.html' in scopes or 'source.php' in scopes):
return []
print_debug("phpfmt (autocomplete): "+prefix);
comps = []
uri = view.file_name()
dirNm, sfn = os.path.split(uri)
ext = os.path.splitext(uri)[1][1:]
oracleDirNm = dirNm
while oracleDirNm != "/":
oracleFname = oracleDirNm+os.path.sep+"oracle.sqlite"
if os.path.isfile(oracleFname):
break
origOracleDirNm = oracleDirNm
oracleDirNm = os.path.dirname(oracleDirNm)
if origOracleDirNm == oracleDirNm:
break
if not os.path.isfile(oracleFname):
sublime.status_message("phpfmt: autocomplete database not found")
return []
if prefix in "namespace":
ns = dirNm.replace(oracleDirNm, '').replace('/','\\')
if ns.startswith('\\'):
ns = ns[1:]
comps.append((
'%s \t %s \t %s' % ("namespace", ns, "namespace"),
'%s %s;\n${0}' % ("namespace", ns),
))
if prefix in "class":
print_debug("class guess")
className = sfn.split(".")[0]
comps.append((
'%s \t %s \t %s' % ("class", className, "class"),
'%s %s {\n\t${0}\n}\n' % ("class", className),
))
php_bin = s.get("php_bin", "php")
oraclePath = os.path.join(dirname(realpath(sublime.packages_path())), "Packages", "phpfmt", "oracle.php")
cmdOracle = [php_bin]
cmdOracle.append(oraclePath)
cmdOracle.append("autocomplete")
cmdOracle.append(prefix)
print_debug(cmdOracle)
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(cmdOracle, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=oracleDirNm, shell=False, startupinfo=startupinfo)
else:
p = subprocess.Popen(cmdOracle, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=oracleDirNm, shell=False)
res, err = p.communicate()
print_debug("phpfmt (autocomplete) err: "+err.decode('utf-8'))
f = res.decode('utf-8').split('\n')
reader = csv.reader(f, delimiter=',')
for row in reader:
if len(row) > 0:
if "class" == row[3]:
comps.append((
'%s \t %s \t %s' % (row[1], row[0], "class"),
'%s(${0})' % (row[1]),
))
comps.append((
'%s \t %s \t %s' % (row[0], row[0], "class"),
'%s(${0})' % (row[0]),
))
if "method" == row[3]:
comps.append((
'%s \t %s \t %s' % (row[1], row[2], "method"),
'%s' % (row[0].replace('$','\$')),
))
return comps
s = sublime.load_settings('yohe.sublime-settings')
version = s.get('version', 1)
s.set('version', version)
sublime.save_settings('yohe.sublime-settings')
if version == 2:
# Convert to version 3
print_debug("Convert to version 3")
s.set('version', 3)
sublime.save_settings('yohe.sublime-settings')
if version == 3:
# Convert to version 3
print_debug("Convert to version 4")
s.set('version', 4)
passes = s.get('passes', [])
passes.append("ReindentSwitchBlocks")
s.set('passes', passes)
sublime.save_settings('yohe.sublime-settings')
# def selfupdate():
# s = sublime.load_settings('yohe.sublime-settings')
# php_bin = s.get("php_bin", "php")
# formatter_path = os.path.join(dirname(realpath(sublime.packages_path())), "Packages", "phpfmt", "fmt.phar")
# print_debug("Selfupdate")
# cmd_update = [php_bin, formatter_path, '--selfupdate']
# if os.name == 'nt':
# startupinfo = subprocess.STARTUPINFO()
# startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# p = subprocess.Popen(cmd_update, shell=False, startupinfo=startupinfo)
# else:
# p = subprocess.Popen(cmd_update, shell=False)
# sublime.set_timeout(selfupdate, 3000)
def _ct_poller():
s = sublime.load_settings('yohe.sublime-settings')
if s.get("calltip", False):
try:
view = sublime.active_window().active_view()
view.run_command('calltip')
except Exception:
pass
sublime.set_timeout(_ct_poller, 5000)
_ct_poller()
class PhpFmtCommand(sublime_plugin.TextCommand):
def run(self, edit):
vsize = self.view.size()
src = self.view.substr(sublime.Region(0, vsize))
if not src.strip():
return
src = dofmt(self, self.view, None, src)
if src is False or src == "":
return False
_, err = merge(self.view, vsize, src, edit)
print_debug(err)
class MergeException(Exception):
pass
def _merge(view, size, text, edit):
def ss(start, end):
return view.substr(sublime.Region(start, end))
dmp = diff_match_patch()
diffs = dmp.diff_main(ss(0, size), text, False)
dmp.diff_cleanupEfficiency(diffs)
i = 0
dirty = False
for d in diffs:
k, s = d
l = len(s)
if k == 0:
# match
l = len(s)
if ss(i, i+l) != s:
raise MergeException('mismatch', dirty)
i += l
else:
dirty = True
if k > 0:
# insert
view.insert(edit, i, s)
i += l
else:
# delete
if ss(i, i+l) != s:
raise MergeException('mismatch', dirty)
view.erase(edit, sublime.Region(i, i+l))
return dirty
def merge(view, size, text, edit):
vs = view.settings()
ttts = vs.get("translate_tabs_to_spaces")
vs.set("translate_tabs_to_spaces", False)
origin_src = view.substr(sublime.Region(0, view.size()))
if not origin_src.strip():
vs.set("translate_tabs_to_spaces", ttts)
return (False, '')
try:
dirty = False
err = ''
if size < 0:
size = view.size()
dirty = _merge(view, size, text, edit)
except MergeException as ex:
dirty = True
err = "Could not merge changes into the buffer, edit aborted: %s" % ex[0]
view.replace(edit, sublime.Region(0, view.size()), origin_src)
except Exception as ex:
err = "error: %s" % ex
finally:
vs.set("translate_tabs_to_spaces", ttts)
return (dirty, err)
|
mit
|
ywcui1990/nupic.research
|
htmresearch/support/nlp_model_test_helpers.py
|
9
|
8428
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
helpStr = """
Methods and data for running NLP model API tests. The intent here is
to ensure that changes to the models does not decrease their classification
accuracies (see NLP_MODEL_ACCURACIES below). Three tests are supported:
hello classification: Very simple, hello world classification test. There are
two categories that can be discriminated using bag of words. The training
set is 8 docs, and the test set is an additional 2 (i.e. 10 total) -- first
is an incorrectly labeled version of a training sample, second is
semantically similar to one of the training samples.
simple queries: Qualitative test where we query a trained model to see which
data samples are the most and least similar.
simple labels: Less simple classification test. The dataset used here must be
specified in the command line args.
"""
import argparse
import numpy
from prettytable import PrettyTable
from textwrap import TextWrapper
from tqdm import tqdm
from htmresearch.frameworks.nlp.classification_model import ClassificationModel
from htmresearch.frameworks.nlp.model_factory import (
createModel, getNetworkConfig)
# There should be one "htm" model for each htm config entry.
NLP_MODEL_TYPES = [
"docfp",
"cioword",
"htm",
"htm",
"htm",
"keywords"]
# Network models use 4k retina.
HTM_CONFIGS = [
("HTM_sensor_knn", "../data/network_configs/sensor_knn.json"),
("HTM_sensor_simple_tp_knn", "../data/network_configs/sensor_simple_tp_knn.json"),
("HTM_sensor_tm_knn", "../data/network_configs/sensor_tm_knn.json"),
]
# Some values of k we know work well.
K_VALUES = { "keywords": 21, "docfp": 1}
NLP_MODEL_ACCURACIES = {
"hello_classification": {
"docfp": 90.0,
"cioword": 90.0,
"HTM_sensor_knn": 80.0,
"HTM_sensor_simple_tp_knn": 90.0,
"HTM_sensor_tm_knn": 90.0,
"keywords": 80.0,
},
"simple_queries": {
"docfp": "good but not great",
"cioword": "passable",
"HTM_sensor_knn": "passable",
"HTM_sensor_simple_tp_knn": "idk, no results yet!",
"HTM_sensor_tm_knn": "idk, no results yet!",
"keywords": "passable",
},
"simple_labels": {
"docfp": 100.0,
"cioword": 100.0,
"HTM_sensor_knn": 66.2,
"HTM_sensor_simple_tp_knn": 99.7,
"HTM_sensor_tm_knn": 0.0,
"keywords": 16.9,
},
}
_WRAPPER = TextWrapper(width=80)
def executeModelLifecycle(args, trainingData, labelRefs):
""" Execute model lifecycle: create a model, train it, save it, reload it.
@param args (argparse) Arguments used in classification model API experiments.
@param trainingData (dict) Keys are document numbers, values are three-tuples
of the document (str), labels (list), and document ID (int).
@param labelRefs (list) Label names (str) corresponding to label indices.
@return (two-tuple) Original and new models.
"""
model = instantiateModel(args)
model = trainModel(model, trainingData, labelRefs, args.verbosity)
model.save(args.modelDir)
newModel = ClassificationModel.load(args.modelDir)
return model, newModel
def instantiateModel(args):
"""
Set some specific arguments and return an instance of the model we will use.
"""
args.networkConfig = getNetworkConfig(args.networkConfigPath)
args.k = K_VALUES.get(args.modelName, 1)
return createModel(**vars(args))
def trainModel(model, trainingData, labelRefs, verbosity=0):
"""
Train the given model on trainingData. Return the trained model instance.
"""
modelName = repr(model).split()[0].split(".")[-1]
print
print "===================Training {} on sample text================".format(
modelName)
if verbosity > 0:
printTemplate = PrettyTable(["ID", "Document", "Label"])
printTemplate.align = "l"
printTemplate.header_style = "upper"
for (document, labels, docId) in tqdm(trainingData):
if verbosity > 0:
docStr = unicode(document, errors="ignore")
printTemplate.add_row(
[docId, _WRAPPER.fill(docStr), labelRefs[labels[0]]])
model.trainDocument(document, labels, docId)
if verbosity > 0:
print printTemplate
return model
def testModel(model, testData, labelRefs, docCategoryMap=None, verbosity=0):
"""
Test the given model on testData, print out and return accuracy percentage.
Accuracy is calculated as follows. Each token in a document votes for a single
category; it's possible for a token to contribute no votes. The document is
classified with the category that received the most votes. Note that it is
possible for a document to receive no votes, in which case it is counted as a
misclassification.
"""
modelName = repr(model).split()[0].split(".")[-1]
print
print "===================Testing {} on sample text==================".format(
modelName)
if verbosity > 0:
print
printTemplate = PrettyTable(
["ID", "Document", "Actual Label(s)", "Predicted Label"])
printTemplate.align = "l"
printTemplate.header_style = "upper"
numCorrect = 0
labelRefs.append("none")
for (document, labels, docId) in tqdm(testData):
categoryVotes, _, _ = model.inferDocument(document)
if categoryVotes.sum() > 0:
# We will count classification as correct if the best category is any
# one of the categories associated with this docId
predicted = categoryVotes.argmax()
if predicted in docCategoryMap[docId]:
numCorrect += 1
else:
# No classification possible for this doc
predicted = -1
if verbosity > 0:
docStr = unicode(document, errors="ignore")
printTemplate.add_row(
[docId,
_WRAPPER.fill(docStr),
[labelRefs[l] for l in labels],
labelRefs[predicted]]
)
accuracyPct = numCorrect * 100.0 / len(testData)
if verbosity > 0:
print printTemplate
print
print "Total correct =", numCorrect, "out of", len(testData), "documents"
print "Accuracy =", accuracyPct, "%"
return accuracyPct
def printSummary(testName, accuracies):
""" Print comparison of the new acuracies against the current values.
@param testName (str) One of the NLP_MODEL_ACCURACIES keys.
@param accuracies (dict) Keys are model names, values are accuracy percents.
"""
try:
currentAccuracies = NLP_MODEL_ACCURACIES[testName]
except KeyError as e:
print "No accuracy values for test '{}'".format(testName)
raise
printTemplate = PrettyTable(["NLP Model", "Current Accuracy", "New Accuracy"])
printTemplate.align = "l"
printTemplate.header_style = "upper"
for modelName, accuracyPct in accuracies.iteritems():
currentPct = currentAccuracies.get(modelName, "which model?")
printTemplate.add_row([modelName, currentPct, accuracyPct])
print
print "Results summary:"
print printTemplate
def assertResults(testName, accuracies):
""" Assert the new acuracies against the current values.
@param testName (str) One of the NLP_MODEL_ACCURACIES keys.
@param accuracies (dict) Keys are model names, values are accuracy percents.
"""
try:
currentAccuracies = NLP_MODEL_ACCURACIES[testName]
except KeyError as e:
print "No accuracy values for test '{}'".format(testName)
raise e
for modelName, accuracyPct in accuracies.iteritems():
currentPct = currentAccuracies.get(modelName, 0.0)
assert accuracyPct >= currentPct, \
"{} does not pass the test! Current accuracy is {}, new is {}.".format(
modelName, currentPct, accuracyPct)
|
agpl-3.0
|
rhdedgar/openshift-tools
|
openshift/installer/vendored/openshift-ansible-git-2016-04-18/roles/lib_yaml_editor/build/src/yedit.py
|
9
|
6339
|
# pylint: skip-file
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([a-zA-Z-./]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([a-zA-Z-./]+)"
def __init__(self, filename=None, content=None, content_type='yaml'):
self.content = content
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
if self.filename and not self.content:
self.load(content_type=self.content_type)
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def remove_entry(data, key):
''' remove data at location key '''
if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
return None
key_indexes = re.findall(Yedit.re_key, key)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
return None
curr_data = data
key_indexes = re.findall(Yedit.re_key, key)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key):
data = data[dict_key]
continue
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for add
# expected list entry
if key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
return curr_data
@staticmethod
def get_entry(data, key):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
return None
key_indexes = re.findall(Yedit.re_key, key)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
with open(self.filename, 'w') as yfd:
yfd.write(yaml.safe_dump(self.yaml_dict, default_flow_style=False))
def read(self):
''' write to file '''
# check if it exists
if not self.exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents:
return None
# check if it is yaml
try:
if content_type == 'yaml':
self.yaml_dict = yaml.load(contents)
elif content_type == 'json':
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as _:
# Error loading yaml or json
return None
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key)
except KeyError as _:
entry = None
return entry
def delete(self, key):
''' remove key from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, key)
except KeyError as _:
entry = None
if not entry:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, key)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def put(self, key, value):
''' put key, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, key)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
result = Yedit.add_entry(self.yaml_dict, key, value)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def create(self, key, value):
''' create a yaml file '''
if not self.exists():
self.yaml_dict = {key: value}
return (True, self.yaml_dict)
return (False, self.yaml_dict)
|
apache-2.0
|
mozilla/django-jobvite
|
django_jobvite/models.py
|
1
|
1868
|
from django.db import models
class Category(models.Model):
name = models.CharField(max_length=100)
slug = models.CharField(max_length=100, unique=True)
description = models.TextField(blank=True)
def __unicode__(self):
return self.name
def to_dict(self):
fields = self._meta.fields
d = {self.id: {}}
for field in fields:
if field.primary_key:
continue
d[self.id][field.name] = getattr(self, field.name)
return d
class Position(models.Model):
job_id = models.CharField(max_length=25)
title = models.CharField(max_length=100)
requisition_id = models.PositiveIntegerField()
category = models.ForeignKey(Category, null=True, blank=True)
job_type = models.CharField(max_length=255)
location = models.CharField(max_length=150, null=True, blank=True)
date = models.CharField(max_length=100)
detail_url = models.URLField()
apply_url = models.URLField()
description = models.TextField()
brief_description = models.TextField(null=True, blank=True)
location_filter = models.CharField(max_length=255, blank=True, default='')
def __unicode__(self):
return u"%s - %s" % (self.job_id, self.title)
@models.permalink
def get_absolute_url(self):
return ('django_jobvite_position', (), {
'job_id': self.job_id,
})
def to_dict(self):
"""Return the model as a dictionary keyed on ``job_id``."""
fields = self._meta.fields
d = {self.job_id: {}}
for field in fields:
if field.primary_key:
continue
if field.name == 'job_id':
continue
d[self.job_id][field.name] = getattr(self, field.name)
d[self.job_id]['categories'] = [c.name for c in self.category.all()]
return d
|
bsd-3-clause
|
CristianBB/SickRage
|
lib/github/PaginatedList.py
|
23
|
7862
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Bill Mill <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2013 davidbrai <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
class PaginatedListBase:
def __init__(self):
self.__elements = list()
def __getitem__(self, index):
assert isinstance(index, (int, slice))
if isinstance(index, (int, long)):
self.__fetchToIndex(index)
return self.__elements[index]
else:
return self._Slice(self, index)
def __iter__(self):
for element in self.__elements:
yield element
while self._couldGrow():
newElements = self._grow()
for element in newElements:
yield element
def _isBiggerThan(self, index):
return len(self.__elements) > index or self._couldGrow()
def __fetchToIndex(self, index):
while len(self.__elements) <= index and self._couldGrow():
self._grow()
def _grow(self):
newElements = self._fetchNextPage()
self.__elements += newElements
return newElements
class _Slice:
def __init__(self, theList, theSlice):
self.__list = theList
self.__start = theSlice.start or 0
self.__stop = theSlice.stop
self.__step = theSlice.step or 1
def __iter__(self):
index = self.__start
while not self.__finished(index):
if self.__list._isBiggerThan(index):
yield self.__list[index]
index += self.__step
else:
return
def __finished(self, index):
return self.__stop is not None and index >= self.__stop
class PaginatedList(PaginatedListBase):
"""
This class abstracts the `pagination of the API <http://developer.github.com/v3/#pagination>`_.
You can simply enumerate through instances of this class::
for repo in user.get_repos():
print repo.name
You can also index them or take slices::
second_repo = user.get_repos()[1]
first_repos = user.get_repos()[:10]
If you want to iterate in reversed order, just do::
for repo in user.get_repos().reversed:
print repo.name
And if you really need it, you can explicitely access a specific page::
some_repos = user.get_repos().get_page(0)
some_other_repos = user.get_repos().get_page(3)
"""
def __init__(self, contentClass, requester, firstUrl, firstParams, headers=None):
PaginatedListBase.__init__(self)
self.__requester = requester
self.__contentClass = contentClass
self.__firstUrl = firstUrl
self.__firstParams = firstParams or ()
self.__nextUrl = firstUrl
self.__nextParams = firstParams or {}
self.__headers = headers
if self.__requester.per_page != 30:
self.__nextParams["per_page"] = self.__requester.per_page
self._reversed = False
self.__totalCount = None
@property
def totalCount(self):
if not self.__totalCount:
self._grow()
return self.__totalCount
def _getLastPageUrl(self):
headers, data = self.__requester.requestJsonAndCheck(
"GET",
self.__firstUrl,
parameters=self.__nextParams,
headers=self.__headers
)
links = self.__parseLinkHeader(headers)
lastUrl = links.get("last")
return lastUrl
@property
def reversed(self):
r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams)
r.__reverse()
return r
def __reverse(self):
self._reversed = True
lastUrl = self._getLastPageUrl()
if lastUrl:
self.__nextUrl = lastUrl
def _couldGrow(self):
return self.__nextUrl is not None
def _fetchNextPage(self):
headers, data = self.__requester.requestJsonAndCheck(
"GET",
self.__nextUrl,
parameters=self.__nextParams,
headers=self.__headers
)
data = data if data else []
self.__nextUrl = None
if len(data) > 0:
links = self.__parseLinkHeader(headers)
if self._reversed:
if "prev" in links:
self.__nextUrl = links["prev"]
elif "next" in links:
self.__nextUrl = links["next"]
self.__nextParams = None
if 'items' in data:
self.__totalCount = data['total_count']
data = data["items"]
content = [
self.__contentClass(self.__requester, headers, element, completed=False)
for element in data if element is not None
]
if self._reversed:
return content[::-1]
return content
def __parseLinkHeader(self, headers):
links = {}
if "link" in headers:
linkHeaders = headers["link"].split(", ")
for linkHeader in linkHeaders:
(url, rel) = linkHeader.split("; ")
url = url[1:-1]
rel = rel[5:-1]
links[rel] = url
return links
def get_page(self, page):
params = dict(self.__firstParams)
if page != 0:
params["page"] = page + 1
if self.__requester.per_page != 30:
params["per_page"] = self.__requester.per_page
headers, data = self.__requester.requestJsonAndCheck(
"GET",
self.__firstUrl,
parameters=params,
headers=self.__headers
)
if 'items' in data:
self.__totalCount = data['total_count']
data = data["items"]
return [
self.__contentClass(self.__requester, headers, element, completed=False)
for element in data
]
|
gpl-3.0
|
jindongh/boto
|
tests/integration/sts/test_session_token.py
|
101
|
3405
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Tests for Session Tokens
"""
import unittest
import os
from boto.exception import BotoServerError
from boto.sts.connection import STSConnection
from boto.sts.credentials import Credentials
from boto.s3.connection import S3Connection
class SessionTokenTest(unittest.TestCase):
sts = True
def test_session_token(self):
print('--- running Session Token tests ---')
c = STSConnection()
# Create a session token
token = c.get_session_token()
# Save session token to a file
token.save('token.json')
# Now load up a copy of that token
token_copy = Credentials.load('token.json')
assert token_copy.access_key == token.access_key
assert token_copy.secret_key == token.secret_key
assert token_copy.session_token == token.session_token
assert token_copy.expiration == token.expiration
assert token_copy.request_id == token.request_id
os.unlink('token.json')
assert not token.is_expired()
# Try using the session token with S3
s3 = S3Connection(aws_access_key_id=token.access_key,
aws_secret_access_key=token.secret_key,
security_token=token.session_token)
buckets = s3.get_all_buckets()
print('--- tests completed ---')
def test_assume_role_with_web_identity(self):
c = STSConnection(anon=True)
arn = 'arn:aws:iam::000240903217:role/FederatedWebIdentityRole'
wit = 'b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9'
try:
creds = c.assume_role_with_web_identity(
role_arn=arn,
role_session_name='guestuser',
web_identity_token=wit,
provider_id='www.amazon.com',
)
except BotoServerError as err:
self.assertEqual(err.status, 403)
self.assertTrue('Not authorized' in err.body)
def test_decode_authorization_message(self):
c = STSConnection()
try:
creds = c.decode_authorization_message('b94d27b9934')
except BotoServerError as err:
self.assertEqual(err.status, 400)
self.assertIn('InvalidAuthorizationMessageException', err.body)
|
mit
|
DonBeo/statsmodels
|
statsmodels/base/data.py
|
6
|
21327
|
"""
Base tools for handling various kinds of data structures, attaching metadata to
results, and doing data cleaning
"""
from statsmodels.compat.python import reduce, iteritems, lmap, zip, range
from statsmodels.compat.numpy import np_matrix_rank
import numpy as np
from pandas import DataFrame, Series, TimeSeries, isnull
from statsmodels.tools.decorators import (resettable_cache, cache_readonly,
cache_writable)
import statsmodels.tools.data as data_util
from statsmodels.tools.sm_exceptions import MissingDataError
def _asarray_2dcolumns(x):
if np.asarray(x).ndim > 1 and np.asarray(x).squeeze().ndim == 1:
return
def _asarray_2d_null_rows(x):
"""
Makes sure input is an array and is 2d. Makes sure output is 2d. True
indicates a null in the rows of 2d x.
"""
#Have to have the asarrays because isnull doesn't account for array-like
#input
x = np.asarray(x)
if x.ndim == 1:
x = x[:, None]
return np.any(isnull(x), axis=1)[:, None]
def _nan_rows(*arrs):
"""
Returns a boolean array which is True where any of the rows in any
of the _2d_ arrays in arrs are NaNs. Inputs can be any mixture of Series,
DataFrames or array-like.
"""
if len(arrs) == 1:
arrs += ([[False]],)
def _nan_row_maybe_two_inputs(x, y):
# check for dtype bc dataframe has dtypes
x_is_boolean_array = hasattr(x, 'dtype') and x.dtype == bool and x
return np.logical_or(_asarray_2d_null_rows(x),
(x_is_boolean_array | _asarray_2d_null_rows(y)))
return reduce(_nan_row_maybe_two_inputs, arrs).squeeze()
class ModelData(object):
"""
Class responsible for handling input data and extracting metadata into the
appropriate form
"""
_param_names = None
def __init__(self, endog, exog=None, missing='none', hasconst=None,
**kwargs):
if 'design_info' in kwargs:
self.design_info = kwargs.pop('design_info')
if 'formula' in kwargs:
self.formula = kwargs.pop('formula')
if missing != 'none':
arrays, nan_idx = self.handle_missing(endog, exog, missing,
**kwargs)
self.missing_row_idx = nan_idx
self.__dict__.update(arrays) # attach all the data arrays
self.orig_endog = self.endog
self.orig_exog = self.exog
self.endog, self.exog = self._convert_endog_exog(self.endog,
self.exog)
else:
self.__dict__.update(kwargs) # attach the extra arrays anyway
self.orig_endog = endog
self.orig_exog = exog
self.endog, self.exog = self._convert_endog_exog(endog, exog)
# this has side-effects, attaches k_constant and const_idx
self._handle_constant(hasconst)
self._check_integrity()
self._cache = resettable_cache()
def __getstate__(self):
from copy import copy
d = copy(self.__dict__)
if "design_info" in d:
del d["design_info"]
d["restore_design_info"] = True
return d
def __setstate__(self, d):
if "restore_design_info" in d:
# NOTE: there may be a more performant way to do this
from patsy import dmatrices
depth = 1 # hmm, have to have same eval env in calling ns
data = d['orig_endog'].join(d['orig_exog'])
_, design = dmatrices(d['formula'], data, eval_env=depth,
return_type='dataframe')
self.design_info = design.design_info
del d["restore_design_info"]
self.__dict__.update(d)
def _handle_constant(self, hasconst):
if hasconst is not None:
if hasconst:
self.k_constant = 1
self.const_idx = None
else:
self.k_constant = 0
self.const_idx = None
elif self.exog is None:
self.const_idx = None
self.k_constant = 0
else:
# detect where the constant is
check_implicit = False
const_idx = np.where(self.exog.ptp(axis=0) == 0)[0].squeeze()
self.k_constant = const_idx.size
if self.k_constant == 1:
if self.exog[:, const_idx].mean() != 0:
self.const_idx = const_idx
else:
# we only have a zero column and no other constant
check_implicit = True
elif self.k_constant > 1:
# we have more than one constant column
# look for ones
values = [] # keep values if we need != 0
for idx in const_idx:
value = self.exog[:, idx].mean()
if value == 1:
self.k_constant = 1
self.const_idx = idx
break
values.append(value)
else:
# we didn't break, no column of ones
pos = (np.array(values) != 0)
if pos.any():
# take the first nonzero column
self.k_constant = 1
self.const_idx = const_idx[pos.argmax()]
else:
# only zero columns
check_implicit = True
elif self.k_constant == 0:
check_implicit = True
else:
# shouldn't be here
pass
if check_implicit:
# look for implicit constant
# Compute rank of augmented matrix
augmented_exog = np.column_stack(
(np.ones(self.exog.shape[0]), self.exog))
rank_augm = np_matrix_rank(augmented_exog)
rank_orig = np_matrix_rank(self.exog)
self.k_constant = int(rank_orig == rank_augm)
self.const_idx = None
@classmethod
def _drop_nans(cls, x, nan_mask):
return x[nan_mask]
@classmethod
def _drop_nans_2d(cls, x, nan_mask):
return x[nan_mask][:, nan_mask]
@classmethod
def handle_missing(cls, endog, exog, missing, **kwargs):
"""
This returns a dictionary with keys endog, exog and the keys of
kwargs. It preserves Nones.
"""
none_array_names = []
# patsy's already dropped NaNs in y/X
missing_idx = kwargs.pop('missing_idx', None)
if missing_idx is not None:
# y, X already handled by patsy. add back in later.
combined = ()
combined_names = []
if exog is None:
none_array_names += ['exog']
elif exog is not None:
combined = (endog, exog)
combined_names = ['endog', 'exog']
else:
combined = (endog,)
combined_names = ['endog']
none_array_names += ['exog']
# deal with other arrays
combined_2d = ()
combined_2d_names = []
if len(kwargs):
for key, value_array in iteritems(kwargs):
if value_array is None or value_array.ndim == 0:
none_array_names += [key]
continue
# grab 1d arrays
if value_array.ndim == 1:
combined += (np.asarray(value_array),)
combined_names += [key]
elif value_array.squeeze().ndim == 1:
combined += (np.asarray(value_array),)
combined_names += [key]
# grab 2d arrays that are _assumed_ to be symmetric
elif value_array.ndim == 2:
combined_2d += (np.asarray(value_array),)
combined_2d_names += [key]
else:
raise ValueError("Arrays with more than 2 dimensions "
"aren't yet handled")
if missing_idx is not None:
nan_mask = missing_idx
updated_row_mask = None
if combined: # there were extra arrays not handled by patsy
combined_nans = _nan_rows(*combined)
if combined_nans.shape[0] != nan_mask.shape[0]:
raise ValueError("Shape mismatch between endog/exog "
"and extra arrays given to model.")
# for going back and updated endog/exog
updated_row_mask = combined_nans[~nan_mask]
nan_mask |= combined_nans # for updating extra arrays only
if combined_2d:
combined_2d_nans = _nan_rows(combined_2d)
if combined_2d_nans.shape[0] != nan_mask.shape[0]:
raise ValueError("Shape mismatch between endog/exog "
"and extra 2d arrays given to model.")
if updated_row_mask is not None:
updated_row_mask |= combined_2d_nans[~nan_mask]
else:
updated_row_mask = combined_2d_nans[~nan_mask]
nan_mask |= combined_2d_nans
else:
nan_mask = _nan_rows(*combined)
if combined_2d:
nan_mask = _nan_rows(*(nan_mask[:, None],) + combined_2d)
if not np.any(nan_mask): # no missing don't do anything
combined = dict(zip(combined_names, combined))
if combined_2d:
combined.update(dict(zip(combined_2d_names, combined_2d)))
if none_array_names:
combined.update(dict(zip(none_array_names,
[None] * len(none_array_names))))
if missing_idx is not None:
combined.update({'endog': endog})
if exog is not None:
combined.update({'exog': exog})
return combined, []
elif missing == 'raise':
raise MissingDataError("NaNs were encountered in the data")
elif missing == 'drop':
nan_mask = ~nan_mask
drop_nans = lambda x: cls._drop_nans(x, nan_mask)
drop_nans_2d = lambda x: cls._drop_nans_2d(x, nan_mask)
combined = dict(zip(combined_names, lmap(drop_nans, combined)))
if missing_idx is not None:
if updated_row_mask is not None:
updated_row_mask = ~updated_row_mask
# update endog/exog with this new information
endog = cls._drop_nans(endog, updated_row_mask)
if exog is not None:
exog = cls._drop_nans(exog, updated_row_mask)
combined.update({'endog': endog})
if exog is not None:
combined.update({'exog': exog})
if combined_2d:
combined.update(dict(zip(combined_2d_names,
lmap(drop_nans_2d, combined_2d))))
if none_array_names:
combined.update(dict(zip(none_array_names,
[None] * len(none_array_names))))
return combined, np.where(~nan_mask)[0].tolist()
else:
raise ValueError("missing option %s not understood" % missing)
def _convert_endog_exog(self, endog, exog):
# for consistent outputs if endog is (n,1)
yarr = self._get_yarr(endog)
xarr = None
if exog is not None:
xarr = self._get_xarr(exog)
if xarr.ndim == 1:
xarr = xarr[:, None]
if xarr.ndim != 2:
raise ValueError("exog is not 1d or 2d")
return yarr, xarr
@cache_writable()
def ynames(self):
endog = self.orig_endog
ynames = self._get_names(endog)
if not ynames:
ynames = _make_endog_names(self.endog)
if len(ynames) == 1:
return ynames[0]
else:
return list(ynames)
@cache_writable()
def xnames(self):
exog = self.orig_exog
if exog is not None:
xnames = self._get_names(exog)
if not xnames:
xnames = _make_exog_names(self.exog)
return list(xnames)
return None
@property
def param_names(self):
# for handling names of 'extra' parameters in summary, etc.
return self._param_names or self.xnames
@param_names.setter
def param_names(self, values):
self._param_names = values
@cache_readonly
def row_labels(self):
exog = self.orig_exog
if exog is not None:
row_labels = self._get_row_labels(exog)
else:
endog = self.orig_endog
row_labels = self._get_row_labels(endog)
return row_labels
def _get_row_labels(self, arr):
return None
def _get_names(self, arr):
if isinstance(arr, DataFrame):
return list(arr.columns)
elif isinstance(arr, Series):
if arr.name:
return [arr.name]
else:
return
else:
try:
return arr.dtype.names
except AttributeError:
pass
return None
def _get_yarr(self, endog):
if data_util._is_structured_ndarray(endog):
endog = data_util.struct_to_ndarray(endog)
endog = np.asarray(endog)
if len(endog) == 1: # never squeeze to a scalar
if endog.ndim == 1:
return endog
elif endog.ndim > 1:
return np.asarray([endog.squeeze()])
return endog.squeeze()
def _get_xarr(self, exog):
if data_util._is_structured_ndarray(exog):
exog = data_util.struct_to_ndarray(exog)
return np.asarray(exog)
def _check_integrity(self):
if self.exog is not None:
if len(self.exog) != len(self.endog):
raise ValueError("endog and exog matrices are different sizes")
def wrap_output(self, obj, how='columns', names=None):
if how == 'columns':
return self.attach_columns(obj)
elif how == 'rows':
return self.attach_rows(obj)
elif how == 'cov':
return self.attach_cov(obj)
elif how == 'dates':
return self.attach_dates(obj)
elif how == 'columns_eq':
return self.attach_columns_eq(obj)
elif how == 'cov_eq':
return self.attach_cov_eq(obj)
elif how == 'generic_columns':
return self.attach_generic_columns(obj, names)
elif how == 'generic_columns_2d':
return self.attach_generic_columns_2d(obj, names)
else:
return obj
def attach_columns(self, result):
return result
def attach_columns_eq(self, result):
return result
def attach_cov(self, result):
return result
def attach_cov_eq(self, result):
return result
def attach_rows(self, result):
return result
def attach_dates(self, result):
return result
def attach_generic_columns(self, result, *args, **kwargs):
return result
def attach_generic_columns_2d(self, result, *args, **kwargs):
return result
class PatsyData(ModelData):
def _get_names(self, arr):
return arr.design_info.column_names
class PandasData(ModelData):
"""
Data handling class which knows how to reattach pandas metadata to model
results
"""
def _convert_endog_exog(self, endog, exog=None):
#TODO: remove this when we handle dtype systematically
endog = np.asarray(endog)
exog = exog if exog is None else np.asarray(exog)
if endog.dtype == object or exog is not None and exog.dtype == object:
raise ValueError("Pandas data cast to numpy dtype of object. "
"Check input data with np.asarray(data).")
return super(PandasData, self)._convert_endog_exog(endog, exog)
@classmethod
def _drop_nans(cls, x, nan_mask):
if hasattr(x, 'ix'):
return x.ix[nan_mask]
else: # extra arguments could be plain ndarrays
return super(PandasData, cls)._drop_nans(x, nan_mask)
@classmethod
def _drop_nans_2d(cls, x, nan_mask):
if hasattr(x, 'ix'):
return x.ix[nan_mask].ix[:, nan_mask]
else: # extra arguments could be plain ndarrays
return super(PandasData, cls)._drop_nans_2d(x, nan_mask)
def _check_integrity(self):
endog, exog = self.orig_endog, self.orig_exog
# exog can be None and we could be upcasting one or the other
if (exog is not None and
(hasattr(endog, 'index') and hasattr(exog, 'index')) and
not self.orig_endog.index.equals(self.orig_exog.index)):
raise ValueError("The indices for endog and exog are not aligned")
super(PandasData, self)._check_integrity()
def _get_row_labels(self, arr):
try:
return arr.index
except AttributeError:
# if we've gotten here it's because endog is pandas and
# exog is not, so just return the row labels from endog
return self.orig_endog.index
def attach_generic_columns(self, result, names):
# get the attribute to use
column_names = getattr(self, names, None)
return Series(result, index=column_names)
def attach_generic_columns_2d(self, result, rownames, colnames=None):
colnames = colnames or rownames
rownames = getattr(self, rownames, None)
colnames = getattr(self, colnames, None)
return DataFrame(result, index=rownames, columns=colnames)
def attach_columns(self, result):
# this can either be a 1d array or a scalar
# don't squeeze because it might be a 2d row array
# if it needs a squeeze, the bug is elsewhere
if result.ndim <= 1:
return Series(result, index=self.param_names)
else: # for e.g., confidence intervals
return DataFrame(result, index=self.param_names)
def attach_columns_eq(self, result):
return DataFrame(result, index=self.xnames, columns=self.ynames)
def attach_cov(self, result):
return DataFrame(result, index=self.param_names,
columns=self.param_names)
def attach_cov_eq(self, result):
return DataFrame(result, index=self.ynames, columns=self.ynames)
def attach_rows(self, result):
# assumes if len(row_labels) > len(result) it's bc it was truncated
# at the front, for AR lags, for example
if result.squeeze().ndim == 1:
return Series(result, index=self.row_labels[-len(result):])
else: # this is for VAR results, may not be general enough
return DataFrame(result, index=self.row_labels[-len(result):],
columns=self.ynames)
def attach_dates(self, result):
return TimeSeries(result, index=self.predict_dates)
def _make_endog_names(endog):
if endog.ndim == 1 or endog.shape[1] == 1:
ynames = ['y']
else: # for VAR
ynames = ['y%d' % (i+1) for i in range(endog.shape[1])]
return ynames
def _make_exog_names(exog):
exog_var = exog.var(0)
if (exog_var == 0).any():
# assumes one constant in first or last position
# avoid exception if more than one constant
const_idx = exog_var.argmin()
exog_names = ['x%d' % i for i in range(1, exog.shape[1])]
exog_names.insert(const_idx, 'const')
else:
exog_names = ['x%d' % i for i in range(1, exog.shape[1]+1)]
return exog_names
def handle_missing(endog, exog=None, missing='none', **kwargs):
klass = handle_data_class_factory(endog, exog)
if missing == 'none':
ret_dict = dict(endog=endog, exog=exog)
ret_dict.update(kwargs)
return ret_dict, None
return klass.handle_missing(endog, exog, missing=missing, **kwargs)
def handle_data_class_factory(endog, exog):
"""
Given inputs
"""
if data_util._is_using_ndarray_type(endog, exog):
klass = ModelData
elif data_util._is_using_pandas(endog, exog):
klass = PandasData
elif data_util._is_using_patsy(endog, exog):
klass = PatsyData
# keep this check last
elif data_util._is_using_ndarray(endog, exog):
klass = ModelData
else:
raise ValueError('unrecognized data structures: %s / %s' %
(type(endog), type(exog)))
return klass
def handle_data(endog, exog, missing='none', hasconst=None, **kwargs):
# deal with lists and tuples up-front
if isinstance(endog, (list, tuple)):
endog = np.asarray(endog)
if isinstance(exog, (list, tuple)):
exog = np.asarray(exog)
klass = handle_data_class_factory(endog, exog)
return klass(endog, exog=exog, missing=missing, hasconst=hasconst,
**kwargs)
|
bsd-3-clause
|
nrhine1/scikit-learn
|
examples/ensemble/plot_bias_variance.py
|
357
|
7324
|
"""
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
|
bsd-3-clause
|
ztemt/NX507J_Lollipop_kernel
|
scripts/rt-tester/rt-tester.py
|
11005
|
5307
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
gpl-2.0
|
transientskp/tkp
|
tkp/telescope/lofar/noise.py
|
3
|
5683
|
"""
functions for calculating theoretical noise levels of LOFAR equipment.
For more information about the math used here read the `sensitivity of the
LOFAR array page
<http://www.astron.nl/radio-observatory/astronomers/lofar-imaging-capabilities-sensitivity/sensitivity-lofar-array/sensiti>`_.
To check the values calculated here one can use this `LOFAR image noise
calculator <http://www.astron.nl/~heald/test/sens.php>`_.
"""
import math
import logging
import warnings
import scipy.constants
import scipy.interpolate
from tkp.telescope.lofar import antennaarrays
logger = logging.getLogger(__name__)
ANTENNAE_PER_TILE = 16
TILES_PER_CORE_STATION = 24
TILES_PER_REMOTE_STATION = 48
TILES_PER_INTL_STATION = 96
def noise_level(freq_eff, bandwidth, tau_time, antenna_set, Ncore, Nremote,
Nintl):
"""
Returns the theoretical noise level (in Jy) given the supplied array
antenna_set.
:param bandwidth: in Hz
:param tau_time: in seconds
:param inner: in case of LBA, inner or outer
:param antenna_set: LBA_INNER, LBA_OUTER, LBA_SPARSE, LBA or HBA
"""
if antenna_set.startswith("LBA"):
ds_core = antennaarrays.core_dipole_distances[antenna_set]
Aeff_core = sum([Aeff_dipole(freq_eff, x) for x in ds_core])
ds_remote = antennaarrays.remote_dipole_distances[antenna_set]
Aeff_remote = sum([Aeff_dipole(freq_eff, x) for x in ds_remote])
ds_intl = antennaarrays.intl_dipole_distances[antenna_set]
Aeff_intl = sum([Aeff_dipole(freq_eff, x) for x in ds_intl])
else:
Aeff_core = ANTENNAE_PER_TILE * TILES_PER_CORE_STATION * \
Aeff_dipole(freq_eff)
Aeff_remote = ANTENNAE_PER_TILE * TILES_PER_REMOTE_STATION * \
Aeff_dipole(freq_eff)
Aeff_intl = ANTENNAE_PER_TILE * TILES_PER_INTL_STATION * \
Aeff_dipole(freq_eff)
# c = core, r = remote, i = international
# so for example cc is core-core baseline
Ssys_c = system_sensitivity(freq_eff, Aeff_core)
Ssys_r = system_sensitivity(freq_eff, Aeff_remote)
Ssys_i = system_sensitivity(freq_eff, Aeff_intl)
baselines_cc = (Ncore * (Ncore - 1)) / 2
baselines_rr = (Nremote * (Nremote - 1)) / 2
baselines_ii = (Nintl * (Nintl - 1)) / 2
baselines_cr = (Ncore * Nremote)
baselines_ci = (Ncore * Nintl)
baselines_ri = (Nremote * Nintl)
#baselines_total = baselines_cc + baselines_rr + baselines_ii +\
# baselines_cr + baselines_ci + baselines_ri
# baseline noise, for example cc is core-core
temp_cc = Ssys_c
temp_rr = Ssys_r
temp_ii = Ssys_i
#temp_cr = math.sqrt(SEFD_cc) * math.sqrt(SEFD_rr)
#temp_ci = math.sqrt(SEFD_cc) * math.sqrt(SEFD_ii)
#temp_ri = math.sqrt(SEFD_rr) * math.sqrt(SEFD_ii)
# The noise level in a LOFAR image
t_cc = baselines_cc / (temp_cc * temp_cc)
t_rr = baselines_rr / (temp_rr * temp_cc)
t_ii = baselines_ii / (temp_ii * temp_ii)
t_cr = baselines_cr / (temp_cc * temp_rr)
t_ci = baselines_ci / (temp_cc * temp_ii)
t_ri = baselines_ri / (temp_rr * temp_ii)
# factor for increase of noise due to the weighting scheme
W = 1 # taken from PHP script
image_sens = W / math.sqrt(4 * bandwidth * tau_time *
(t_cc + t_rr + t_ii + t_cr + t_ci + t_ri))
return image_sens
def Aeff_dipole(freq_eff, distance=None):
"""
The effective area of each dipole in the array is determined by its
distance to the nearest dipole (d) within the full array.
:param freq_eff: Frequency
:param distance: Distance to nearest dipole, only required for LBA.
"""
wavelength = scipy.constants.c/freq_eff
if wavelength > 3: # LBA dipole
if not distance:
msg = "Distance to nearest dipole required for LBA noise calculation"
logger.error(msg)
warnings.warn(msg)
distance = 1
return min(pow(wavelength, 2) / 3, (math.pi * pow(distance, 2)) / 4)
else: # HBA dipole
return min(pow(wavelength, 2) / 3, 1.5625)
def system_sensitivity(freq_eff, Aeff):
"""
Returns the SEFD of a system, given the freq_eff and effective
collecting area. Returns SEFD in Jansky's.
"""
wavelength = scipy.constants.c / freq_eff
# Ts0 = 60 +/- 20 K for Galactic latitudes between 10 and 90 degrees.
Ts0 = 60
# system efficiency factor (~ 1.0)
n = 1
# For all LOFAR frequencies the sky brightness temperature is dominated by
# the Galactic radiation, which depends strongly on the wavelength
Tsky = Ts0 * wavelength ** 2.55
#The instrumental noise temperature follows from measurements or simulations
# This is a quick & dirty approach based roughly on Fig 5 here
# <http://www.skatelescope.org/uploaded/59513_113_Memo_Nijboer.pdf>
sensitivities = [
(0, 0),
(10e6, 0.1 * Tsky),
(40e6, 0.7 * Tsky),
(50e6, 0.85 * Tsky),
(55e6, 0.9 * Tsky),
(60e6, 0.85 * Tsky),
(70e6, 0.6 * Tsky),
(80e6, 0.3 * Tsky),
(90e6, 0 * Tsky),
(110e6, 0 * Tsky),
(120e6, 200),
(300e6, 200)
]
x, y = zip(*sensitivities)
sensitivity = scipy.interpolate.interp1d(x, y, kind='linear')
Tinst = sensitivity(freq_eff)
Tsys = Tsky + Tinst
# SEFD or system sensitivity
S = (2 * n * scipy.constants.k / Aeff) * Tsys
# S is in Watts per square metre per Hertz. One Jansky = 10**-26 Watts/sq
# metre/Hz
return S * 10**26
|
bsd-2-clause
|
Almad/django-sane-testing
|
djangosanetesting/runnercompat.py
|
1
|
16346
|
import sys
import signal
import unittest
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
#This module was taken from Django 1.2.4 source code and embedded for backward
#compatibility.
#
#Copyright (c) Django Software Foundation
#Distributed under BSD license
try:
all
except NameError:
from django.utils.itercompat import all
# The module name for tests outside models.py
TEST_MODULE = 'tests'
class DjangoTestRunner(unittest.TextTestRunner):
def run(self, *args, **kwargs):
"""
Runs the test suite after registering a custom signal handler
that triggers a graceful exit when Ctrl-C is pressed.
"""
self._default_keyboard_interrupt_handler = signal.signal(signal.SIGINT,
self._keyboard_interrupt_handler)
try:
result = super(DjangoTestRunner, self).run(*args, **kwargs)
finally:
signal.signal(signal.SIGINT, self._default_keyboard_interrupt_handler)
return result
def _keyboard_interrupt_handler(self, signal_number, stack_frame):
"""
Handles Ctrl-C by setting a flag that will stop the test run when
the currently running test completes.
"""
self._keyboard_interrupt_intercepted = True
sys.stderr.write(" <Test run halted by Ctrl-C> ")
# Set the interrupt handler back to the default handler, so that
# another Ctrl-C press will trigger immediate exit.
signal.signal(signal.SIGINT, self._default_keyboard_interrupt_handler)
def _makeResult(self):
result = super(DjangoTestRunner, self)._makeResult()
failfast = self.failfast
def stoptest_override(func):
def stoptest(test):
# If we were set to failfast and the unit test failed,
# or if the user has typed Ctrl-C, report and quit
if (failfast and not result.wasSuccessful()) or \
self._keyboard_interrupt_intercepted:
result.stop()
func(test)
return stoptest
setattr(result, 'stopTest', stoptest_override(result.stopTest))
return result
def get_tests(app_module):
try:
app_path = app_module.__name__.split('.')[:-1]
test_module = __import__('.'.join(app_path + [TEST_MODULE]), {}, {}, TEST_MODULE)
except ImportError, e:
# Couldn't import tests.py. Was it due to a missing file, or
# due to an import error in a tests.py that actually exists?
import os.path
from imp import find_module
try:
mod = find_module(TEST_MODULE, [os.path.dirname(app_module.__file__)])
except ImportError:
# 'tests' module doesn't exist. Move on.
test_module = None
else:
# The module exists, so there must be an import error in the
# test module itself. We don't need the module; so if the
# module was a single file module (i.e., tests.py), close the file
# handle returned by find_module. Otherwise, the test module
# is a directory, and there is nothing to close.
if mod[0]:
mod[0].close()
raise
return test_module
def build_suite(app_module):
"Create a complete Django test suite for the provided application module"
from django.test import _doctest as doctest
from django.test.testcases import OutputChecker, DocTestRunner
doctestOutputChecker = OutputChecker()
suite = unittest.TestSuite()
# Load unit and doctests in the models.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(app_module, 'suite'):
suite.addTest(app_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(app_module))
try:
suite.addTest(doctest.DocTestSuite(app_module,
checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in models.py
pass
# Check to see if a separate 'tests' module exists parallel to the
# models module
test_module = get_tests(app_module)
if test_module:
# Load unit and doctests in the tests.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(test_module, 'suite'):
suite.addTest(test_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(test_module))
try:
suite.addTest(doctest.DocTestSuite(test_module,
checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in tests.py
pass
return suite
def build_test(label):
"""Construct a test case with the specified label. Label should be of the
form model.TestClass or model.TestClass.test_method. Returns an
instantiated test or test suite corresponding to the label provided.
"""
parts = label.split('.')
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Test label '%s' should be of the form app.TestCase or app.TestCase.test_method" % label)
#
# First, look for TestCase instances with a name that matches
#
from django.db.models import get_app
app_module = get_app(parts[0])
test_module = get_tests(app_module)
TestClass = getattr(app_module, parts[1], None)
# Couldn't find the test class in models.py; look in tests.py
if TestClass is None:
if test_module:
TestClass = getattr(test_module, parts[1], None)
try:
if issubclass(TestClass, unittest.TestCase):
if len(parts) == 2: # label is app.TestClass
try:
return unittest.TestLoader().loadTestsFromTestCase(TestClass)
except TypeError:
raise ValueError("Test label '%s' does not refer to a test class" % label)
else: # label is app.TestClass.test_method
return TestClass(parts[2])
except TypeError:
# TestClass isn't a TestClass - it must be a method or normal class
pass
#
# If there isn't a TestCase, look for a doctest that matches
#
from django.test import _doctest as doctest
from django.test.testcases import OutputChecker, DocTestRunner
doctestOutputChecker = OutputChecker()
tests = []
for module in app_module, test_module:
try:
doctests = doctest.DocTestSuite(module,
checker=doctestOutputChecker,
runner=DocTestRunner)
# Now iterate over the suite, looking for doctests whose name
# matches the pattern that was given
for test in doctests:
if test._dt_test.name in (
'%s.%s' % (module.__name__, '.'.join(parts[1:])),
'%s.__test__.%s' % (module.__name__, '.'.join(parts[1:]))):
tests.append(test)
except ValueError:
# No doctests found.
pass
# If no tests were found, then we were given a bad test label.
if not tests:
raise ValueError("Test label '%s' does not refer to a test" % label)
# Construct a suite out of the tests that matched.
return unittest.TestSuite(tests)
def partition_suite(suite, classes, bins):
"""
Partitions a test suite by test type.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
for test in suite:
if isinstance(test, unittest.TestSuite):
partition_suite(test, classes, bins)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].addTest(test)
break
else:
bins[-1].addTest(test)
def reorder_suite(suite, classes):
"""
Reorders a test suite by test type.
classes is a sequence of types
All tests of type clases[0] are placed first, then tests of type classes[1], etc.
Tests with no match in classes are placed last.
"""
class_count = len(classes)
bins = [unittest.TestSuite() for i in range(class_count+1)]
partition_suite(suite, classes, bins)
for i in range(class_count):
bins[0].addTests(bins[i+1])
return bins[0]
def dependency_ordered(test_databases, dependencies):
"""Reorder test_databases into an order that honors the dependencies
described in TEST_DEPENDENCIES.
"""
ordered_test_databases = []
resolved_databases = set()
while test_databases:
changed = False
deferred = []
while test_databases:
signature, (db_name, aliases) = test_databases.pop()
dependencies_satisfied = True
for alias in aliases:
if alias in dependencies:
if all(a in resolved_databases for a in dependencies[alias]):
# all dependencies for this alias are satisfied
dependencies.pop(alias)
resolved_databases.add(alias)
else:
dependencies_satisfied = False
else:
resolved_databases.add(alias)
if dependencies_satisfied:
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured("Circular dependency in TEST_DEPENDENCIES")
test_databases = deferred
return ordered_test_databases
class DjangoTestSuiteRunner(object):
def __init__(self, verbosity=1, interactive=True, failfast=True, **kwargs):
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
def setup_test_environment(self, **kwargs):
from django.test.utils import setup_test_environment
setup_test_environment()
settings.DEBUG = False
def build_suite(self, test_labels, extra_tests=None, **kwargs):
from django.db.models import get_apps
from django.test.testcases import TestCase
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
if extra_tests:
for test in extra_tests:
suite.addTest(test)
return reorder_suite(suite, (TestCase,))
def setup_databases(self, **kwargs):
from django.db import connections, DEFAULT_DB_ALIAS
# First pass -- work out which databases actually need to be created,
# and which ones are test mirrors or duplicate entries in DATABASES
mirrored_aliases = {}
test_databases = {}
dependencies = {}
for alias in connections:
connection = connections[alias]
if connection.settings_dict['TEST_MIRROR']:
# If the database is marked as a test mirror, save
# the alias.
mirrored_aliases[alias] = connection.settings_dict['TEST_MIRROR']
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], [])
)
item[1].append(alias)
if 'TEST_DEPENDENCIES' in connection.settings_dict:
dependencies[alias] = connection.settings_dict['TEST_DEPENDENCIES']
else:
if alias != DEFAULT_DB_ALIAS:
dependencies[alias] = connection.settings_dict.get('TEST_DEPENDENCIES', [DEFAULT_DB_ALIAS])
# Second pass -- actually create the databases.
old_names = []
mirrors = []
for signature, (db_name, aliases) in dependency_ordered(test_databases.items(), dependencies):
# Actually create the database for the first connection
connection = connections[aliases[0]]
old_names.append((connection, db_name, True))
test_db_name = connection.creation.create_test_db(self.verbosity, autoclobber=not self.interactive)
for alias in aliases[1:]:
connection = connections[alias]
if db_name:
old_names.append((connection, db_name, False))
connection.settings_dict['NAME'] = test_db_name
else:
# If settings_dict['NAME'] isn't defined, we have a backend where
# the name isn't important -- e.g., SQLite, which uses :memory:.
# Force create the database instead of assuming it's a duplicate.
old_names.append((connection, db_name, True))
connection.creation.create_test_db(self.verbosity, autoclobber=not self.interactive)
for alias, mirror_alias in mirrored_aliases.items():
mirrors.append((alias, connections[alias].settings_dict['NAME']))
connections[alias].settings_dict['NAME'] = connections[mirror_alias].settings_dict['NAME']
return old_names, mirrors
def run_suite(self, suite, **kwargs):
return DjangoTestRunner(verbosity=self.verbosity, failfast=self.failfast).run(suite)
def teardown_databases(self, old_config, **kwargs):
from django.db import connections
old_names, mirrors = old_config
# Point all the mirrors back to the originals
for alias, old_name in mirrors:
connections[alias].settings_dict['NAME'] = old_name
# Destroy all the non-mirror databases
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, self.verbosity)
else:
connection.settings_dict['NAME'] = old_name
def teardown_test_environment(self, **kwargs):
from django.test.utils import teardown_test_environment
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Labels must be of the form:
- app.TestClass.test_method
Run a single specific test method
- app.TestClass
Run all the test methods in a given class
- app
Search for doctests and unittests in the named application.
When looking for tests, the test runner will look in the models and
tests modules for the application.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
from django.test.utils import (
setup_test_environment, teardown_test_environment)
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
old_config = self.setup_databases()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
|
bsd-3-clause
|
KaranToor/MA450
|
google-cloud-sdk/.install/.backup/platform/gsutil/gslib/addlhelp/anon.py
|
10
|
2147
|
# -*- coding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional help text for anonymous access."""
from __future__ import absolute_import
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
gsutil users can access publicly readable data without obtaining
credentials. For example, the gs://uspto-pair bucket contains a number
of publicly readable objects, so any user can run the following command
without first obtaining credentials:
gsutil ls gs://uspto-pair/applications/0800401*
Users can similarly download objects they find via the above gsutil ls
command.
See "gsutil help acls" for more details about data protection.
<B>Configuring/Using Credentials via Cloud SDK Distribution of gsutil</B>
If a user without credentials attempts to access protected data using gsutil,
they will be prompted to run gcloud init to obtain credentials.
<B>Configuring/Using Credentials via Standalone gsutil Distribution</B>
If a user without credentials attempts to access protected data using gsutil,
they will be prompted to run gsutil config to obtain credentials.
""")
class CommandOptions(HelpProvider):
"""Additional help text for anonymous access."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='anon',
help_name_aliases=['anonymous', 'public'],
help_type='additional_help',
help_one_line_summary='Accessing Public Data Without Credentials',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
|
apache-2.0
|
Architektor/PySnip
|
venv/lib/python2.7/site-packages/twisted/trial/_dist/__init__.py
|
57
|
1941
|
# -*- test-case-name: twisted.trial._dist.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This package implements the distributed Trial test runner:
- The L{twisted.trial._dist.disttrial} module implements a test runner which
runs in a manager process and can launch additional worker processes in
which to run tests and gather up results from all of them.
- The L{twisted.trial._dist.options} module defines command line options used
to configure the distributed test runner.
- The L{twisted.trial._dist.managercommands} module defines AMP commands
which are sent from worker processes back to the manager process to report
the results of tests.
- The L{twisted.trial._dist.workercommands} module defines AMP commands which
are sent from the manager process to the worker processes to control the
execution of tests there.
- The L{twisted.trial._dist.distreporter} module defines a proxy for
L{twisted.trial.itrial.IReporter} which enforces the typical requirement
that results be passed to a reporter for only one test at a time, allowing
any reporter to be used with despite disttrial's simultaneously running
tests.
- The L{twisted.trial._dist.workerreporter} module implements a
L{twisted.trial.itrial.IReporter} which is used by worker processes and
reports results back to the manager process using AMP commands.
- The L{twisted.trial._dist.workertrial} module is a runnable script which is
the main point for worker processes.
- The L{twisted.trial._dist.worker} process defines the manager's AMP
protocol for accepting results from worker processes and a process protocol
for use running workers as local child processes (as opposed to
distributing them to another host).
@since: 12.3
"""
# File descriptors numbers used to set up pipes with the worker.
_WORKER_AMP_STDIN = 3
_WORKER_AMP_STDOUT = 4
|
gpl-3.0
|
peervalhoegen/SudoQ
|
utilities/compareStringFiles.py
|
2
|
1120
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#aufruf: python comparebla.py english foreign ignore
import sys
import datetime
import re
from collections import defaultdict
englishLinesRaw = open(sys.argv[1]).readlines()
#fill a dictionary variable_name:string_value for english
only_english_strings =[l.strip() for l in filter(lambda x:'<string' in x, englishLinesRaw)]
engDic = {}
p_value = re.compile('>(.*)<')
for line in only_english_strings:
key = line.split('"')[1]
value = p_value.search(line).group(1)
engDic[key]=value
#delete all keys found in foreign
foreignLinesRaw = open(sys.argv[2]).readlines()
only_foreign_strings =[l.strip() for l in filter(lambda x:'<string' in x, foreignLinesRaw)]
for l in only_foreign_strings:
key = l.split('"')[1]
if key not in engDic:
print l
#print engDic
print key
engDic.pop(key)
#delete all keys that are international e.g. ✔ or otherwise never altered in foreign locatization
ignored = map(lambda x: x.strip(), open(sys.argv[3]).readlines())
for s in ignored:
engDic.pop(s)
#unique keys remain
for k,v in engDic.iteritems():
print k.ljust(60)+v
|
gpl-3.0
|
sunze/py_flask
|
venv/lib/python3.4/site-packages/mako/ext/pygmentplugin.py
|
60
|
4540
|
# ext/pygmentplugin.py
# Copyright (C) 2006-2015 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from pygments.lexers.web import \
HtmlLexer, XmlLexer, JavascriptLexer, CssLexer
from pygments.lexers.agile import PythonLexer, Python3Lexer
from pygments.lexer import DelegatingLexer, RegexLexer, bygroups, \
include, using
from pygments.token import \
Text, Comment, Operator, Keyword, Name, String, Other
from pygments.formatters.html import HtmlFormatter
from pygments import highlight
from mako import compat
class MakoLexer(RegexLexer):
name = 'Mako'
aliases = ['mako']
filenames = ['*.mao']
tokens = {
'root': [
(r'(\s*)(\%)(\s*end(?:\w+))(\n|\Z)',
bygroups(Text, Comment.Preproc, Keyword, Other)),
(r'(\s*)(\%(?!%))([^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, using(PythonLexer), Other)),
(r'(\s*)(##[^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, Other)),
(r'''(?s)<%doc>.*?</%doc>''', Comment.Preproc),
(r'(<%)([\w\.\:]+)',
bygroups(Comment.Preproc, Name.Builtin), 'tag'),
(r'(</%)([\w\.\:]+)(>)',
bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
(r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'),
(r'(<%(?:!?))(.*?)(%>)(?s)',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(\$\{)(.*?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'''(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=%(?!%)|\#\#) | # an eval or comment line
(?=\#\*) | # multiline comment
(?=</?%) | # a python block
# call start or end
(?=\$\{) | # a substitution
(?<=\n)(?=\s*%) |
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)
''', bygroups(Other, Operator)),
(r'\s+', Text),
],
'ondeftags': [
(r'<%', Comment.Preproc),
(r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin),
include('tag'),
],
'tag': [
(r'((?:\w+)\s*=)\s*(".*?")',
bygroups(Name.Attribute, String)),
(r'/?\s*>', Comment.Preproc, '#pop'),
(r'\s+', Text),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
class MakoHtmlLexer(DelegatingLexer):
name = 'HTML+Mako'
aliases = ['html+mako']
def __init__(self, **options):
super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer,
**options)
class MakoXmlLexer(DelegatingLexer):
name = 'XML+Mako'
aliases = ['xml+mako']
def __init__(self, **options):
super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer,
**options)
class MakoJavascriptLexer(DelegatingLexer):
name = 'JavaScript+Mako'
aliases = ['js+mako', 'javascript+mako']
def __init__(self, **options):
super(MakoJavascriptLexer, self).__init__(JavascriptLexer,
MakoLexer, **options)
class MakoCssLexer(DelegatingLexer):
name = 'CSS+Mako'
aliases = ['css+mako']
def __init__(self, **options):
super(MakoCssLexer, self).__init__(CssLexer, MakoLexer,
**options)
pygments_html_formatter = HtmlFormatter(cssclass='syntax-highlighted',
linenos=True)
def syntax_highlight(filename='', language=None):
mako_lexer = MakoLexer()
if compat.py3k:
python_lexer = Python3Lexer()
else:
python_lexer = PythonLexer()
if filename.startswith('memory:') or language == 'mako':
return lambda string: highlight(string, mako_lexer,
pygments_html_formatter)
return lambda string: highlight(string, python_lexer,
pygments_html_formatter)
|
mit
|
Intel-Corporation/tensorflow
|
tensorflow/contrib/data/python/ops/counter.py
|
22
|
1757
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Counter Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import counter
from tensorflow.python.framework import dtypes
from tensorflow.python.util import deprecation
@deprecation.deprecated(None, "Use `tf.data.experimental.Counter(...)`.")
def Counter(start=0, step=1, dtype=dtypes.int64):
"""Creates a `Dataset` that counts from `start` in steps of size `step`.
For example:
```python
Dataset.count() == [0, 1, 2, ...)
Dataset.count(2) == [2, 3, ...)
Dataset.count(2, 5) == [2, 7, 12, ...)
Dataset.count(0, -1) == [0, -1, -2, ...)
Dataset.count(10, -1) == [10, 9, ...)
```
Args:
start: (Optional.) The starting value for the counter. Defaults to 0.
step: (Optional.) The step size for the counter. Defaults to 1.
dtype: (Optional.) The data type for counter elements. Defaults to
`tf.int64`.
Returns:
A `Dataset` of scalar `dtype` elements.
"""
return counter.Counter(start, step, dtype)
|
apache-2.0
|
zhaohuaw/stock-logistics-warehouse
|
__unported__/stock_inventory_with_location/__openerp__.py
|
10
|
1555
|
# -*- coding: utf-8 -*-
#################################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Julius Network Solutions SARL <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
{
"name" : "Move Inventory Extended",
"version" : "1.0",
"author" : "Julius Network Solutions,Odoo Community Association (OCA)",
"description" : """
Presentation:
This module get the product real location if exists instead of inventory location.
""",
"website" : "http://www.julius.fr",
"depends" : [
"stock",
"stock_tracking_extended",
],
"category" : "Customs/Stock",
"init_xml" : [],
"demo_xml" : [],
"update_xml" : [],
'test': [],
'installable': False,
'active': False,
'certificate': '',
}
|
agpl-3.0
|
rhelmer/socorro
|
socorro/unittest/processor/test_breakpad_pipe_to_json.py
|
11
|
15280
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from nose.tools import eq_, ok_
import socorro.processor.breakpad_pipe_to_json as bpj
from socorro.lib.util import DotDict
from socorro.unittest.testbase import TestCase
cannonical_json_dump = {
#"status": ,
"system_info": {
'os': 'Windows NT',
'os_ver': '5.1.2600 Service Pack 2',
"cpu_arch": 'x86',
"cpu_info": 'GenuineIntel family 6 model 22 stepping 1',
"cpu_count": 4
},
"crash_info": {
"type": 'EXCEPTION_ACCESS_VIOLATION_READ',
"crash_address": '0x676c',
"crashing_thread": 0
},
"main_module": 0,
"modules": [
{
"filename": 'firefox.exe',
"version": '24.0.0.4925',
"debug_file": 'firefox.pdb',
"debug_id": '9FFDDF56AADE45988C759EF5ABAE53862',
"base_addr": '0x00400000',
"end_addr": '0x004e0fff',
},
{
"filename": 'nss3.dll',
"version": '24.0.0.4925',
"debug_file": 'nss3.pdb',
"debug_id": '30EAD90FEEBD495398D46EFA41814E261',
"base_addr": '0x00a00000',
"end_addr": '0x00bb5fff',
},
{
"filename": 'mozjs.dll',
"debug_file": 'mozjs.pdb',
"debug_id": 'CC7AA5DA1FB144C4B40C2DF1B08709232',
"base_addr": '0x00bd0000',
"end_addr": '0x00ef9fff',
},
{
"filename": 'mozalloc.dll',
"version": '24.0.0.4925',
"debug_file": 'mozalloc.pdb',
"debug_id": 'F4C1BFD2BA3A487CA37EBF3D7E543F7B1',
"base_addr": '0x01000000',
"end_addr": '0x01005fff',
},
{
"filename": 'gkmedias.dll',
"version": '24.0.0.4925',
"debug_file": 'gkmedias.pdb',
"debug_id": '02FE96BEFEAE4570AA12E766CF2C8A361',
"base_addr": '0x01010000',
"end_addr": '0x01337fff',
},
],
"thread_count": 2,
"threads": [
{
"frame_count": 13,
"frames": [
{
"frame": 0,
"module": "mozjs.dll",
"function": "bogus_sig_1",
"file": "jsinferinlines.h:17666746e8cc",
"line": 1321,
},
{
"frame": 1,
"module": "mozjs.dll",
"function": "bogus_sig_2",
"file": "jsobj.cpp:17666746e8cc",
"line": 1552,
},
{
"frame": 2,
"module": "mozjs.dll",
"function": "bogus_sig_3",
"file": "CodeGenerator.cpp:17666746e8cc",
"line": 3119,
},
{
"frame": 3,
"module": "mozjs.dll",
"module_offset": "0xcc9d0",
},
{
"frame": 4,
"offset": "0x80b6fe0",
},
{
"frame": 5,
"offset": "0x3cf5ee6",
},
{
"frame": 6,
"module": "mozjs.dll",
"function": "bogus_sig_7",
"file": "BaselineJIT.cpp:17666746e8cc",
"line": 105,
},
{
"frame": 7,
"module": "mozjs.dll",
"function": "bogus_sig_8",
"file": "BaselineCompiler-shared.cpp:17666746e8cc",
"line": 71,
},
{
"frame": 8,
"module": "mozjs.dll",
"function": "bogus_sig_9",
"file": "Ion.cpp:17666746e8cc",
"line": 1708,
},
{
"frame": 9,
"module": "mozjs.dll",
"function": "bogus_sig_10",
"file": "Interpreter.cpp:17666746e8cc",
"line": 2586,
},
{
"frame": 10,
"module": "mozjs.dll",
"function": "bogus_sig_11",
"file": "Interpreter.cpp:17666746e8cc",
"line": 438,
},
{
"frame": 11,
"module": "mozjs.dll",
"function": "bogus_sig_12",
"file": "Interpreter.cpp:17666746e8cc",
"line": 622,
},
{
"frame": 12,
"module": "mozjs.dll",
"function": "bogus_sig_13",
"file": "Interpreter.cpp:17666746e8cc",
"line": 659,
},
]
},
{
"frame_count": 2,
"frames": [
{
"frame": 0,
"module": "lars_crash.dll",
"function": "ha_ha",
"file": "no source",
"line": 0,
},
{
"frame": 1,
"module": "lars_crash.dll",
"function": "ha_ha2",
"file": "no source",
"line": 0,
},
]
}
],
"crashing_thread": {
"threads_index": 0,
"total_frames": 13,
"frames": [
{
"frame": 0,
"module": "mozjs.dll",
"function": "bogus_sig_1",
"file": "jsinferinlines.h:17666746e8cc",
"line": 1321,
},
{
"frame": 1,
"module": "mozjs.dll",
"function": "bogus_sig_2",
"file": "jsobj.cpp:17666746e8cc",
"line": 1552,
},
{
"frame": 2,
"module": "mozjs.dll",
"function": "bogus_sig_3",
"file": "CodeGenerator.cpp:17666746e8cc",
"line": 3119,
},
{
"frame": 3,
"module": "mozjs.dll",
"module_offset": "0xcc9d0",
},
{
"frame": 4,
"offset": "0x80b6fe0",
},
{
"frame": 5,
"offset": "0x3cf5ee6",
},
{
"frame": 6,
"module": "mozjs.dll",
"function": "bogus_sig_7",
"file": "BaselineJIT.cpp:17666746e8cc",
"line": 105,
},
{
"frame": 7,
"module": "mozjs.dll",
"function": "bogus_sig_8",
"file": "BaselineCompiler-shared.cpp:17666746e8cc",
"line": 71,
},
{
"frame": 8,
"module": "mozjs.dll",
"function": "bogus_sig_9",
"file": "Ion.cpp:17666746e8cc",
"line": 1708,
},
{
"frame": 9,
"module": "mozjs.dll",
"function": "bogus_sig_10",
"file": "Interpreter.cpp:17666746e8cc",
"line": 2586,
},
]
}
}
class TestCase(TestCase):
def test_get(self):
a_list = ['a', 'b', 'c']
eq_(bpj._get(a_list, 0, None), 'a')
eq_(bpj._get(a_list, 1, None), 'b')
eq_(bpj._get(a_list, 2, None), 'c')
eq_(bpj._get(a_list, 3, None), None)
def test_get_int(self):
a_list = ['a', '1', 'c']
eq_(bpj._get_int(a_list, 0, None), None)
eq_(bpj._get_int(a_list, 1, None), 1)
eq_(bpj._get_int(a_list, 2, None), None)
eq_(bpj._get_int(a_list, 3, None), None)
def test_extract_OS_info(self):
info = ['OS', 'Windows NT', '5.1.2600 Service Pack 2']
d = DotDict()
bpj._extract_OS_info(info, d)
ok_('system_info' in d)
eq_(
d.system_info,
{
'os': 'Windows NT',
'os_ver': '5.1.2600 Service Pack 2'
}
)
def test_extract_OS_info_fail(self):
info = ['OS',]
d = DotDict()
bpj._extract_OS_info(info, d)
ok_('system_info' in d)
eq_(d.system_info, {})
def test_extract_CPU_info(self):
info = ['CPU', 'x86', 'GenuineIntel family 6 model 22 stepping 1', 1]
d = DotDict()
bpj._extract_CPU_info(info, d)
ok_('system_info' in d)
eq_(
d.system_info,
{
"cpu_arch": 'x86',
"cpu_info": 'GenuineIntel family 6 model 22 stepping 1',
"cpu_count": 1
}
)
def test_extract_OS_and_CPU_info(self):
info = ['OS', 'Windows NT', '5.1.2600 Service Pack 2']
d = DotDict()
bpj._extract_OS_info(info, d)
info = ['CPU', 'x86', 'GenuineIntel family 6 model 22 stepping 1', 1]
bpj._extract_CPU_info(info, d)
ok_('system_info' in d)
eq_(
d.system_info,
{
'os': 'Windows NT',
'os_ver': '5.1.2600 Service Pack 2',
"cpu_arch": 'x86',
"cpu_info": 'GenuineIntel family 6 model 22 stepping 1',
"cpu_count": 1
}
)
def test_extract_crash_info(self):
info = ['Crash', 'EXCEPTION_ACCESS_VIOLATION_READ', '0x676c', 1]
d = DotDict()
crashing_thread = bpj._extract_crash_info(info, d)
ok_('crash_info' in d)
eq_(
d.crash_info,
{
"type": 'EXCEPTION_ACCESS_VIOLATION_READ',
"crash_address": '0x676c',
"crashing_thread": 1
}
)
eq_(crashing_thread, 1)
def test_extract_module_info(self):
info = ['Module', 'firefox.exe', '24.0.0.4925', 'firefox.pdb',
'9FFDDF56AADE45988C759EF5ABAE53862', '0x00400000',
'0x004e0fff', '1']
d = DotDict()
bpj._extract_module_info(info, d, 17)
ok_('modules' in d)
ok_(len(d.modules), 1)
eq_(d.main_module, 17)
eq_(
d.modules[0],
{
"filename": 'firefox.exe',
"version": '24.0.0.4925',
"debug_file": 'firefox.pdb',
"debug_id": '9FFDDF56AADE45988C759EF5ABAE53862',
"base_addr": '0x00400000',
"end_addr": '0x004e0fff',
}
)
def test_extract_module_info_not_main(self):
info = ['Module', 'firefloosy.exe', '24.0.0.4925', 'firefox.pdb',
'9FFDDF56AADE45988C759EF5ABAE53862', '0x00400000',
'0x004e0fff', '0']
d = DotDict()
bpj._extract_module_info(info, d, 17)
ok_('modules' in d)
ok_(len(d.modules), 1)
ok_('main_module' not in d)
eq_(
d.modules[0],
{
"filename": 'firefloosy.exe',
"version": '24.0.0.4925',
"debug_file": 'firefox.pdb',
"debug_id": '9FFDDF56AADE45988C759EF5ABAE53862',
"base_addr": '0x00400000',
"end_addr": '0x004e0fff',
}
)
def test_extract_frame_inf(self):
info = ['0', '12', 'msvcr100.dll', '_callthreadstartex',
'f:\\src\\threadex.c', '314', '0x6']
d = DotDict()
bpj._extract_frame_info(info, d)
ok_('threads' in d)
eq_(len(d.threads), 1)
eq_(
d.threads[0],
{
"frame_count": 1,
"frames": [
{
"frame": 12,
"module": 'msvcr100.dll',
"function": '_callthreadstartex',
"file": 'f:\\src\\threadex.c',
"line": 314,
}
]
}
)
def test_extract_frame_info_frames_missing(self):
info = ['4', '12', 'msvcr100.dll', '_callthreadstartex',
'f:\\src\\threadex.c', '314', '0x6']
d = DotDict()
bpj._extract_frame_info(info, d)
ok_('threads' in d)
eq_(len(d.threads), 5)
eq_(
d.threads[4],
{
"frame_count": 1,
"frames": [
{
"frame": 12,
"module": 'msvcr100.dll',
"function": '_callthreadstartex',
"file": 'f:\\src\\threadex.c',
"line": 314,
}
]
}
)
def test_pipe_dump_to_json_dump(self):
pipe_dump = [
"OS|Windows NT|5.1.2600 Service Pack 2",
"CPU|x86|GenuineIntel family 6 model 22 stepping 1|4",
"Crash|EXCEPTION_ACCESS_VIOLATION_READ|0x676c|0",
"Module|firefox.exe|24.0.0.4925|firefox.pdb|9FFDDF56AADE45988C759EF5ABAE53862|0x00400000|0x004e0fff|1",
"Module|nss3.dll|24.0.0.4925|nss3.pdb|30EAD90FEEBD495398D46EFA41814E261|0x00a00000|0x00bb5fff|0",
"Module|mozjs.dll||mozjs.pdb|CC7AA5DA1FB144C4B40C2DF1B08709232|0x00bd0000|0x00ef9fff|0",
"Module|mozalloc.dll|24.0.0.4925|mozalloc.pdb|F4C1BFD2BA3A487CA37EBF3D7E543F7B1|0x01000000|0x01005fff|0",
"Module|gkmedias.dll|24.0.0.4925|gkmedias.pdb|02FE96BEFEAE4570AA12E766CF2C8A361|0x01010000|0x01337fff|0",
"",
"0|0|mozjs.dll|bogus_sig_1|jsinferinlines.h:17666746e8cc|1321|0x0",
"0|1|mozjs.dll|bogus_sig_2|jsobj.cpp:17666746e8cc|1552|0x2d",
"0|2|mozjs.dll|bogus_sig_3|CodeGenerator.cpp:17666746e8cc|3119|0x13",
"0|3|mozjs.dll||||0xcc9d0",
"0|4|||||0x80b6fe0",
"0|5|||||0x3cf5ee6",
"0|6|mozjs.dll|bogus_sig_7|BaselineJIT.cpp:17666746e8cc|105|0x20",
"0|7|mozjs.dll|bogus_sig_8|BaselineCompiler-shared.cpp:17666746e8cc|71|0x3d",
"0|8|mozjs.dll|bogus_sig_9|Ion.cpp:17666746e8cc|1708|0x1b",
"0|9|mozjs.dll|bogus_sig_10|Interpreter.cpp:17666746e8cc|2586|0x26",
"0|10|mozjs.dll|bogus_sig_11|Interpreter.cpp:17666746e8cc|438|0x9",
"0|11|mozjs.dll|bogus_sig_12|Interpreter.cpp:17666746e8cc|622|0x37",
"0|12|mozjs.dll|bogus_sig_13|Interpreter.cpp:17666746e8cc|659|0x1b",
"1|0|lars_crash.dll|ha_ha|no source|0|0x3|0x2|0x1",
"1|1|lars_crash.dll|ha_ha2|no source|0|0x5|0x1|0x3",
]
json_dump = bpj.pipe_dump_to_json_dump(pipe_dump)
eq_(json_dump, cannonical_json_dump)
|
mpl-2.0
|
grundprinzip/Impala
|
tests/comparison/common.py
|
3
|
11914
|
# Copyright (c) 2014 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from copy import deepcopy
# The model related modules (types.py, query.py, etc) are interconnected by circular
# imports which causes problems for the python import system. This module is intended to
# be the first of the circular modules imported. To be importable, no direct references
# are made to the other modules from this modules namespace. Instead, other modules are
# lazyily imported using the following function. Keep in mind that python "globals" are
# module local, there is no such thing as a cross-module global.
__ALREADY_IMPORTED = False
def get_import(name):
global __ALREADY_IMPORTED
if not __ALREADY_IMPORTED:
from tests.comparison.types import (
Boolean,
Char,
DataType,
Float,
Int,
JOINABLE_TYPES,
Number,
Timestamp)
from tests.comparison.funcs import AggFunc, AnalyticFunc, Func
from tests.comparison.query import InlineView, Subquery, WithClauseInlineView
for key, value in locals().items():
globals()[key] = value
__ALREADY_IMPORTED = True
return globals()[name]
class ValExpr(object):
'''This is class that represents a generic expr that results in a scalar.'''
@property
def type(self):
'''Returns the type that this expr evaluates to. The type may be Int or Char but
never BigInt or String. Valid return value are the set defined in types.TYPES.
'''
return self.exact_type.get_generic_type()
@property
def exact_type(self):
'''Return the actual type of the val expr. For example "type" could return Int and
"exact_type" could return TinyInt.
'''
pass
@property
def base_type(self):
'''Returns the lowest type in the type heirarchy that is not DataType. For non-
numeric types, the return value will be the same is self.type. Numeric types
will return Number whereas self.type may be Decimal, Int, or Float.
'''
return self.type.get_base_type()
@property
def is_func(self):
'''Evaluates to True if this expr is an instance of a function.'''
return isinstance(self, get_import('Func'))
@property
def is_agg(self):
'''Evaluates to True if this expr is an instance of an aggregate function.'''
return isinstance(self, get_import('AggFunc'))
@property
def is_analytic(self):
'''Evaluates to True if this expr is an instance of an analytic function.'''
return isinstance(self, get_import('AnalyticFunc'))
@property
def contains_agg(self):
'''Evaluates to True if this expression is an aggregate function or contains an
aggregate function.
'''
if self.is_agg:
return True
return any(isinstance(arg, ValExpr) and arg.contains_agg for arg in self.args)
@property
def contains_analytic(self):
'''Evaluates to True if this expression is an analytic function or contains an
analytic function.
'''
if self.is_analytic:
return True
if self.is_func:
for arg in self.args:
if isinstance(arg, ValExpr) and arg.contains_analytic:
return True
@property
def contains_subquery(self):
'''Evaluates to True if this expression is a subquery or contains a subquery.'''
if self.is_subquery:
return True
if self.is_func:
for arg in self.args:
if isinstance(arg, ValExpr) and arg.contains_subquery:
return True
@property
def is_col(self):
return isinstance(self, Column)
@property
def is_constant(self):
return isinstance(self, get_import('DataType'))
@property
def is_subquery(self):
return isinstance(self, get_import('Subquery'))
@property
def returns_boolean(self):
return issubclass(self.type, get_import('Boolean'))
@property
def returns_int(self):
return issubclass(self.type, get_import('Int'))
@property
def returns_float(self):
return issubclass(self.type, get_import('Float'))
@property
def returns_char(self):
return issubclass(self.type, get_import('Char'))
@property
def returns_timestamp(self):
return issubclass(self.type, get_import('Timestamp'))
def iter_exprs(self, filter=None):
'''Return an iterator over all exprs that this expr contains as a function argument
including this expr itself.
'''
if not filter or filter(self):
yield self
def count_col_refs(self):
'''Return a dict with Columns as keys and the number of times the column was used
in this expr as values.
'''
col_ref_counts = defaultdict(int)
if self.is_func:
for arg in self.args:
if isinstance(arg, ValExpr):
for col, count in arg.count_col_refs().iteritems():
col_ref_counts[col] += count
elif self.is_col:
col_ref_counts[self] += 1
return col_ref_counts
class Column(ValExpr):
'''A representation of a column. All TableExprs will have Columns. So a Column
may belong to an InlineView as well as a standard Table.
This class is used in two ways:
1) As a piece of metadata in a table definiton. In this usage the col isn't
intended to represent an val.
2) As an expr in a query, for example an item being selected or as part of
a JOIN condition. In this usage the col is more like a val, which is why
it implements/extends ValExpr.
'''
def __init__(self, owner, name, exact_type):
self.owner = owner
self.name = name
self._exact_type = exact_type
@property
def exact_type(self):
return self._exact_type
@exact_type.setter
def exact_type(self, exact_type):
self._exact_type = exact_type
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
if not isinstance(other, Column):
return False
if self is other:
return True
return self.name == other.name and self.owner.identifier == other.owner.identifier
def __repr__(self):
return '%s<name: %s, type: %s>' % (
type(self).__name__, self.name, self.type.__name__)
def __deepcopy__(self, memo):
# Don't return a deep copy of owner, since that is a circular reference
return Column(self.owner, self.name, self.exact_type)
class ValExprList(list):
'''A list of ValExprs'''
@property
def by_type(self):
return get_import('DataType').group_by_type(self)
class TableExpr(object):
'''This class represents something that a query may use to SELECT from or JOIN on.'''
def identifier(self):
'''Returns either a table name or alias if one has been declared.'''
pass
def cols(self):
pass
@property
def unique_cols(self):
'''Returns a list of lists of Cols that in combination define a unique set of values
within the table. The returned list could be thought of as a list of uniqueness
constraints (though there may be no actual constraints or any other type of
enforcement).
'''
return ValExprList()
@property
def joinable_cols(self):
'''Returns a list of Cols that are of a type that is allowed in a JOIN. This is
mostly an Impala specific thing since Impala requires at least one equality based
join and not all types are allowed in equality comparisons. Also Boolean is
excluded because of low cardinality.
'''
joinable_types = tuple(get_import('JOINABLE_TYPES'))
return ValExprList(col for col in self.cols if issubclass(col.type, joinable_types))
@property
def col_types(self):
'''Returns a Set containing the various column types that this TableExpr contains.'''
return set(self.cols_by_type)
def is_visible(self):
'''If False is returned, columns from this TableExpr may only be used in JOIN
conditions. This is intended to be used to identify ANTI and SEMI joined table
exprs.
'''
pass
@property
def cols_by_type(self):
'''Group cols of the same type into lists and return a dict of the results.'''
return get_import('DataType').group_by_type(self.cols)
@property
def joinable_cols_by_type(self):
return get_import('DataType').group_by_type(self.joinable_cols)
@property
def is_table(self):
return isinstance(self, Table)
@property
def is_inline_view(self):
return isinstance(self, get_import('InlineView'))
@property
def is_with_clause_inline_view(self):
return isinstance(self, get_import('WithClauseInlineView'))
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self.identifier == other.identifier
class Table(TableExpr):
'''Represents a standard database table.'''
def __init__(self, name):
self.name = name
self._cols = ValExprList()
self._unique_cols = ValExprList()
self.alias = None
self.is_visible = True # tables used in SEMI or ANTI JOINs are invisible
@property
def identifier(self):
return self.alias or self.name
@property
def cols(self):
return self._cols
@cols.setter
def cols(self, cols):
self._cols = cols
@property
def unique_cols(self):
return self._unique_cols
@unique_cols.setter
def unique_cols(self, unique_cols):
self._unique_cols = unique_cols
def __repr__(self):
return 'Table<name: %s, cols: %s>' \
% (self.name, ', '.join([str(col) for col in self.cols]))
def __deepcopy__(self, memo):
other = Table(self.name)
other.alias = self.alias
other.is_visible = self.is_visible
cols_by_name = dict()
# Copy the cols and set their owner to the copy of the TableExpr
for col in self._cols:
col = deepcopy(col, memo)
col.owner = other
cols_by_name[col.name] = col
other._cols = ValExprList(cols_by_name[col.name] for col in self._cols)
other._unique_cols = ValExprList()
for col_combo in self._unique_cols:
other_col_combo = set()
for col in col_combo:
if col.name in cols_by_name:
col = cols_by_name[col.name]
else:
col = deepcopy(col, memo)
col.owner = other
other_col_combo.add(col)
other.unique_cols.append(other_col_combo)
return other
class TableExprList(list):
'''A list of TableExprs.'''
@property
def cols(self):
'''Return a list of all the Columns containd in all the TableExprs.'''
return ValExprList(col for table_expr in self for col in table_expr.cols)
@property
def joinable_cols_by_type(self):
cols_by_type = defaultdict(ValExprList)
for table_expr in self:
for type_, cols in table_expr.joinable_cols_by_type.iteritems():
cols_by_type[type_].extend(cols)
return cols_by_type
@property
def cols_by_type(self):
cols_by_type = defaultdict(ValExprList)
for table_expr in self:
for type_, cols in table_expr.cols_by_type.iteritems():
cols_by_type[type_].extend(cols)
return cols_by_type
@property
def col_types(self):
return tuple(self.cols_by_type)
@property
def by_col_type(self):
'''Return a dict with keys being column types and values being lists of TableExprs
that have at least one Column of that type.
'''
table_exprs_by_type = defaultdict(TableExprList)
for table_expr in self:
for col_type in table_expr.col_types:
table_exprs_by_type[col_type].append(table_expr)
return table_exprs_by_type
|
apache-2.0
|
lahosken/pants
|
tests/python/pants_test/engine/legacy/test_build_ignore_integration.py
|
8
|
4320
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import tempfile
from pants_test.pants_run_integration_test import PantsRunIntegrationTest, ensure_engine
class IgnorePatternsPantsIniIntegrationTest(PantsRunIntegrationTest):
"""Tests the functionality of the build_ignore_patterns option in pants.ini ."""
@ensure_engine
def test_build_ignore_patterns_pants_ini(self):
def output_to_list(output_filename):
with open(output_filename, 'r') as results_file:
return set([line.rstrip() for line in results_file.readlines()])
tempdir = tempfile.mkdtemp()
tmp_output = os.path.join(tempdir, 'minimize-output1.txt')
run_result = self.run_pants(['minimize',
'testprojects::',
'--quiet',
'--minimize-output-file={0}'.format(tmp_output)])
self.assert_success(run_result)
results = output_to_list(tmp_output)
self.assertIn('testprojects/src/java/org/pantsbuild/testproject/phrases:ten-thousand',
results)
self.assertIn('testprojects/src/java/org/pantsbuild/testproject/phrases:once-upon-a-time',
results)
self.assertIn('testprojects/src/java/org/pantsbuild/testproject/phrases:lesser-of-two',
results)
self.assertIn('testprojects/src/java/org/pantsbuild/testproject/phrases:there-was-a-duck',
results)
tmp_output = os.path.join(tempdir, 'minimize-output2.txt')
run_result = self.run_pants(['minimize',
'testprojects::',
'--quiet',
'--minimize-output-file={0}'.format(tmp_output)],
config={
'DEFAULT': {
'build_ignore': [
'testprojects/src/java/org/pantsbuild/testproject/phrases'
]
}
})
self.assert_success(run_result)
results = output_to_list(tmp_output)
self.assertNotIn('testprojects/src/java/org/pantsbuild/testproject/phrases:ten-thousand',
results)
self.assertNotIn('testprojects/src/java/org/pantsbuild/testproject/phrases:once-upon-a-time',
results)
self.assertNotIn('testprojects/src/java/org/pantsbuild/testproject/phrases:lesser-of-two',
results)
self.assertNotIn('testprojects/src/java/org/pantsbuild/testproject/phrases:there-was-a-duck',
results)
@ensure_engine
def test_build_ignore_dependency(self):
run_result = self.run_pants(['-q',
'list',
'testprojects/tests/python/pants::'],
config={
'DEFAULT': {
'build_ignore': [
'testprojects/src/'
]
}
})
self.assert_failure(run_result)
# Error message complains dependency dir has no BUILD files.
self.assertIn('testprojects/src/thrift/org/pantsbuild/constants_only', run_result.stderr_data)
@ensure_engine
def test_build_ignore_dependency_success(self):
run_result = self.run_pants(['-q',
'list',
'testprojects/tests/python/pants::'],
config={
'DEFAULT': {
'build_ignore': [
'testprojects/src/antlr'
]
}
})
self.assert_success(run_result)
self.assertIn('testprojects/tests/python/pants/constants_only:constants_only', run_result.stdout_data)
|
apache-2.0
|
waytai/django
|
django/contrib/gis/utils/ogrinspect.py
|
391
|
9090
|
"""
This module is for inspecting OGR data sources and generating either
models for GeoDjango and/or mapping dictionaries for use with the
`LayerMapping` utility.
"""
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTInteger64, OFTReal, OFTString,
OFTTime,
)
from django.utils import six
from django.utils.six.moves import zip
def mapping(data_source, geom_name='geom', layer_key=0, multi_geom=False):
"""
Given a DataSource, generates a dictionary that may be used
for invoking the LayerMapping utility.
Keyword Arguments:
`geom_name` => The name of the geometry field to use for the model.
`layer_key` => The key for specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
"""
if isinstance(data_source, six.string_types):
# Instantiating the DataSource from the string.
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError('Data source parameter must be a string or a DataSource object.')
# Creating the dictionary.
_mapping = {}
# Generating the field name for each field in the layer.
for field in data_source[layer_key].fields:
mfield = field.lower()
if mfield[-1:] == '_':
mfield += 'field'
_mapping[mfield] = field
gtype = data_source[layer_key].geom_type
if multi_geom:
gtype.to_multi()
_mapping[geom_name] = str(gtype).upper()
return _mapping
def ogrinspect(*args, **kwargs):
"""
Given a data source (either a string or a DataSource object) and a string
model name this function will generate a GeoDjango model.
Usage:
>>> from django.contrib.gis.utils import ogrinspect
>>> ogrinspect('/path/to/shapefile.shp','NewModel')
...will print model definition to stout
or put this in a python script and use to redirect the output to a new
model like:
$ python generate_model.py > myapp/models.py
# generate_model.py
from django.contrib.gis.utils import ogrinspect
shp_file = 'data/mapping_hacks/world_borders.shp'
model_name = 'WorldBorders'
print(ogrinspect(shp_file, model_name, multi_geom=True, srid=4326,
geom_name='shapes', blank=True))
Required Arguments
`datasource` => string or DataSource object to file pointer
`model name` => string of name of new model class to create
Optional Keyword Arguments
`geom_name` => For specifying the model name for the Geometry Field.
Otherwise will default to `geom`
`layer_key` => The key for specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`srid` => The SRID to use for the Geometry Field. If it can be determined,
the SRID of the datasource is used.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
`name_field` => String - specifies a field name to return for the
`__unicode__`/`__str__` function (which will be generated if specified).
`imports` => Boolean (default: True) - set to False to omit the
`from django.contrib.gis.db import models` code from the
autogenerated models thus avoiding duplicated imports when building
more than one model by batching ogrinspect()
`decimal` => Boolean or sequence (default: False). When set to True
all generated model fields corresponding to the `OFTReal` type will
be `DecimalField` instead of `FloatField`. A sequence of specific
field names to generate as `DecimalField` may also be used.
`blank` => Boolean or sequence (default: False). When set to True all
generated model fields will have `blank=True`. If the user wants to
give specific fields to have blank, then a list/tuple of OGR field
names may be used.
`null` => Boolean (default: False) - When set to True all generated
model fields will have `null=True`. If the user wants to specify
give specific fields to have null, then a list/tuple of OGR field
names may be used.
Note: This routine calls the _ogrinspect() helper to do the heavy lifting.
"""
return '\n'.join(s for s in _ogrinspect(*args, **kwargs))
def _ogrinspect(data_source, model_name, geom_name='geom', layer_key=0, srid=None,
multi_geom=False, name_field=None, imports=True,
decimal=False, blank=False, null=False):
"""
Helper routine for `ogrinspect` that generates GeoDjango models corresponding
to the given data source. See the `ogrinspect` docstring for more details.
"""
# Getting the DataSource
if isinstance(data_source, six.string_types):
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError('Data source parameter must be a string or a DataSource object.')
# Getting the layer corresponding to the layer key and getting
# a string listing of all OGR fields in the Layer.
layer = data_source[layer_key]
ogr_fields = layer.fields
# Creating lists from the `null`, `blank`, and `decimal`
# keyword arguments.
def process_kwarg(kwarg):
if isinstance(kwarg, (list, tuple)):
return [s.lower() for s in kwarg]
elif kwarg:
return [s.lower() for s in ogr_fields]
else:
return []
null_fields = process_kwarg(null)
blank_fields = process_kwarg(blank)
decimal_fields = process_kwarg(decimal)
# Gets the `null` and `blank` keywords for the given field name.
def get_kwargs_str(field_name):
kwlist = []
if field_name.lower() in null_fields:
kwlist.append('null=True')
if field_name.lower() in blank_fields:
kwlist.append('blank=True')
if kwlist:
return ', ' + ', '.join(kwlist)
else:
return ''
# For those wishing to disable the imports.
if imports:
yield '# This is an auto-generated Django model module created by ogrinspect.'
yield 'from django.contrib.gis.db import models'
yield ''
yield 'class %s(models.Model):' % model_name
for field_name, width, precision, field_type in zip(
ogr_fields, layer.field_widths, layer.field_precisions, layer.field_types):
# The model field name.
mfield = field_name.lower()
if mfield[-1:] == '_':
mfield += 'field'
# Getting the keyword args string.
kwargs_str = get_kwargs_str(field_name)
if field_type is OFTReal:
# By default OFTReals are mapped to `FloatField`, however, they
# may also be mapped to `DecimalField` if specified in the
# `decimal` keyword.
if field_name.lower() in decimal_fields:
yield ' %s = models.DecimalField(max_digits=%d, decimal_places=%d%s)' % (
mfield, width, precision, kwargs_str
)
else:
yield ' %s = models.FloatField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTInteger:
yield ' %s = models.IntegerField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTInteger64:
yield ' %s = models.BigIntegerField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTString:
yield ' %s = models.CharField(max_length=%s%s)' % (mfield, width, kwargs_str)
elif field_type is OFTDate:
yield ' %s = models.DateField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTDateTime:
yield ' %s = models.DateTimeField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTTime:
yield ' %s = models.TimeField(%s)' % (mfield, kwargs_str[2:])
else:
raise TypeError('Unknown field type %s in %s' % (field_type, mfield))
# TODO: Autodetection of multigeometry types (see #7218).
gtype = layer.geom_type
if multi_geom:
gtype.to_multi()
geom_field = gtype.django
# Setting up the SRID keyword string.
if srid is None:
if layer.srs is None:
srid_str = 'srid=-1'
else:
srid = layer.srs.srid
if srid is None:
srid_str = 'srid=-1'
elif srid == 4326:
# WGS84 is already the default.
srid_str = ''
else:
srid_str = 'srid=%s' % srid
else:
srid_str = 'srid=%s' % srid
yield ' %s = models.%s(%s)' % (geom_name, geom_field, srid_str)
if name_field:
yield ''
yield ' def __%s__(self): return self.%s' % (
'str' if six.PY3 else 'unicode', name_field)
|
bsd-3-clause
|
StephenKing/summerschool-2015-ryu
|
ryu/tests/unit/lib/test_import_module.py
|
47
|
2443
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from nose.tools import eq_
from ryu.utils import import_module
import ryu.tests.unit.lib.test_mod.fuga.mod
class Test_import_module(unittest.TestCase):
""" Test case for ryu.utils.import_module
"""
def setUp(self):
pass
def tearDown(self):
pass
@staticmethod
def _my_import(name):
mod = __import__(name)
components = name.split('.')
for c in components[1:]:
mod = getattr(mod, c)
return mod
def test_import_module_with_same_basename(self):
fuga = import_module('ryu.tests.unit.lib.test_mod.fuga.mod')
eq_("this is fuga", fuga.name)
hoge = import_module('ryu.tests.unit.lib.test_mod.hoge.mod')
eq_("this is hoge", hoge.name)
def test_import_module_by_filename(self):
fuga = import_module('./lib/test_mod/fuga/mod.py')
eq_("this is fuga", fuga.name)
hoge = import_module('./lib/test_mod/hoge/mod.py')
eq_("this is hoge", hoge.name)
def test_import_same_module1(self):
fuga1 = import_module('./lib/test_mod/fuga/mod.py')
eq_("this is fuga", fuga1.name)
eq_(ryu.tests.unit.lib.test_mod.fuga.mod, fuga1)
def test_import_same_module2(self):
fuga1 = import_module('./lib/test_mod/fuga/mod.py')
eq_("this is fuga", fuga1.name)
fuga2 = import_module('ryu.tests.unit.lib.test_mod.fuga.mod')
eq_("this is fuga", fuga2.name)
eq_(fuga1, fuga2)
def test_import_same_module3(self):
fuga1 = import_module('./lib/test_mod/fuga/mod.py')
eq_("this is fuga", fuga1.name)
fuga3 = self._my_import('ryu.tests.unit.lib.test_mod.fuga.mod')
eq_("this is fuga", fuga3.name)
eq_(fuga1, fuga3)
|
apache-2.0
|
atsolakid/edx-platform
|
openedx/core/djangoapps/user_api/legacy_urls.py
|
146
|
1461
|
"""
Defines the URL routes for this app.
"""
from django.conf import settings
from django.conf.urls import include, patterns, url
from rest_framework import routers
from . import views as user_api_views
from .models import UserPreference
USER_API_ROUTER = routers.DefaultRouter()
USER_API_ROUTER.register(r'users', user_api_views.UserViewSet)
USER_API_ROUTER.register(r'user_prefs', user_api_views.UserPreferenceViewSet)
urlpatterns = patterns(
'',
url(r'^v1/', include(USER_API_ROUTER.urls)),
url(
r'^v1/preferences/(?P<pref_key>{})/users/$'.format(UserPreference.KEY_REGEX),
user_api_views.PreferenceUsersListView.as_view()
),
url(
r'^v1/forum_roles/(?P<name>[a-zA-Z]+)/users/$',
user_api_views.ForumRoleUsersListView.as_view()
),
url(
r'^v1/preferences/email_opt_in/$',
user_api_views.UpdateEmailOptInPreference.as_view(),
name="preferences_email_opt_in"
),
)
if settings.FEATURES.get('ENABLE_COMBINED_LOGIN_REGISTRATION'):
urlpatterns += patterns(
'',
url(r'^v1/account/login_session/$', user_api_views.LoginSessionView.as_view(),
name="user_api_login_session"),
url(r'^v1/account/registration/$', user_api_views.RegistrationView.as_view(),
name="user_api_registration"),
url(r'^v1/account/password_reset/$', user_api_views.PasswordResetView.as_view(),
name="user_api_password_reset"),
)
|
agpl-3.0
|
gotostack/iSwift
|
iswift/azureurls.py
|
1
|
2538
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls.defaults import include
from django.conf.urls.defaults import patterns
from django.conf.urls.defaults import url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', 'iswift.azureviews.home', name='index'),
url(r'^usermain/$', 'iswift.azureviews.usermain'),
url(r'^login/$', 'iswift.azureviews.home'),
url(r'^logout/', 'iswift.azureviews.logout_view'),
url(r'^mng', include('iswift.supermanager.urls')),
url(r'^mng', include('iswift.companymanager.urls')),
url(r'', include('iswift.footerpages.urls')),
url(r'^auth/$', 'iswift.azureviews.authlogin'),
url(r'^auth/', include('openstack_auth.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^(?P<container>.*)/objectlist/$',
'iswift.azureviews.container_objects',
name='container_objectlist'),
url(r'^(?P<container>.*)/objectlist/'
'(?P<containername>.*)/(?P<objects>.*)/objectstat/$',
'iswift.azureviews.objects_stat',
name='objects_stat'),
url(r'^download/(?P<containernames>.*)/(?P<objects>.*)/$',
'iswift.azureviews.download_object',
name='download_object'),
url(r'^container/create/$',
'iswift.azureviews.create_container'),
url(r'^(?P<container>.*)/objectcreate/',
'iswift.azureviews.upload_a_object'),
url(r'^(?P<container_name>.+?)/(?P<subfolder_path>(.+/)+)?upload$',
'iswift.azureviews.upload_a_object',
name='object_upload'),
)
# Development static app and project media serving using the staticfiles app.
# urlpatterns += static(settings.STATIC_URL ,
# document_root = settings.STATIC_ROOT )
# Convenience function for serving user-uploaded media during
# development. Only active if DEBUG==True and the URL prefix is a local
# path. Production media should NOT be served by Django.
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
apache-2.0
|
Xero-Hige/LuGus-VHDL
|
VGASimulator/simulator.py
|
1
|
1204
|
#!/usr/local/bin/python
from screen import Screen
from vga_controller import VGAController
from reader import Reader
from translator import Translator
from time import sleep
import config
def clean():
reader.clean()
vga_controller.save()
def loop():
info = reader.readline()
while (info):
values = info.split(config.DELIMITER)
if (len(values) < 5):
print "Values not matching, expecting x y r g b"
info = reader.readline()
continue
vga_controller.update(int(values[0]),
int(values[1]),
translator.bin_to_int(values[2]),
translator.bin_to_int(values[3]),
translator.bin_to_int(values[4]))
info = reader.readline()
# #Start a screen
# screen = Screen(config.VISIBLE_HEIGHT,config.VISIBLE_WIDTH)
# screen.start()
# Start the vgaController
vga_controller = VGAController([config.VISIBLE_WIDTH, config.VISIBLE_HEIGHT])
# Start a reader
reader = Reader()
reader.initialize()
# Start translator
translator = Translator()
print "Starting"
try:
loop()
except Exception as e:
print e
clean()
|
gpl-3.0
|
tjth/lotterycoin
|
qa/rpc-tests/bipdersig.py
|
66
|
3130
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test the BIP66 changeover logic
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class BIP66Test(BitcoinTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, []))
self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=2"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-blockversion=3"]))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some old-version blocks
self.nodes[1].generate(100)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 100):
raise AssertionError("Failed to mine 100 version=2 blocks")
# Mine 750 new-version blocks
for i in xrange(15):
self.nodes[2].generate(50)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 850):
raise AssertionError("Failed to mine 750 version=3 blocks")
# TODO: check that new DERSIG rules are not enforced
# Mine 1 new-version block
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 851):
raise AssertionError("Failed to mine a version=3 blocks")
# TODO: check that new DERSIG rules are enforced
# Mine 198 new-version blocks
for i in xrange(2):
self.nodes[2].generate(99)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1049):
raise AssertionError("Failed to mine 198 version=3 blocks")
# Mine 1 old-version block
self.nodes[1].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1050):
raise AssertionError("Failed to mine a version=2 block after 949 version=3 blocks")
# Mine 1 new-version blocks
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Failed to mine a version=3 block")
# Mine 1 old-version blocks
try:
self.nodes[1].generate(1)
raise AssertionError("Succeeded to mine a version=2 block after 950 version=3 blocks")
except JSONRPCException:
pass
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Accepted a version=2 block after 950 version=3 blocks")
# Mine 1 new-version blocks
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1052):
raise AssertionError("Failed to mine a version=3 block")
if __name__ == '__main__':
BIP66Test().main()
|
mit
|
Peratham/tweater
|
py/nltk/metrics/scores.py
|
4
|
7976
|
# Natural Language Toolkit: Evaluation
#
# Copyright (C) 2001-2011 NLTK Project
# Author: Edward Loper <[email protected]>
# Steven Bird <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
import sys
import math
import random
try:
from scipy.stats.stats import betai
except ImportError:
betai = None
from nltk.util import LazyConcatenation, LazyMap
from itertools import izip
from nltk.probability import FreqDist
def accuracy(reference, test):
"""
Given a list of reference values and a corresponding list of test
values, return the fraction of corresponding values that are
equal. In particular, return the fraction of indices
C{0<i<=len(test)} such that C{test[i] == reference[i]}.
@type reference: C{list}
@param reference: An ordered list of reference values.
@type test: C{list}
@param test: A list of values to compare against the corresponding
reference values.
@raise ValueError: If C{reference} and C{length} do not have the
same length.
"""
if len(reference) != len(test):
raise ValueError("Lists must have the same length.")
num_correct = 0
for x, y in izip(reference, test):
if x == y:
num_correct += 1
return float(num_correct) / len(reference)
def precision(reference, test):
"""
Given a set of reference values and a set of test values, return
the fraction of test values that appear in the reference set.
In particular, return |C{reference}S{cap}C{test}|/|C{test}|.
If C{test} is empty, then return C{None}.
@type reference: C{Set}
@param reference: A set of reference values.
@type test: C{Set}
@param test: A set of values to compare against the reference set.
@rtype: C{float} or C{None}
"""
if (not hasattr(reference, 'intersection') or
not hasattr(test, 'intersection')):
raise TypeError('reference and test should be sets')
if len(test) == 0:
return None
else:
return float(len(reference.intersection(test)))/len(test)
def recall(reference, test):
"""
Given a set of reference values and a set of test values, return
the fraction of reference values that appear in the test set.
In particular, return |C{reference}S{cap}C{test}|/|C{reference}|.
If C{reference} is empty, then return C{None}.
@type reference: C{Set}
@param reference: A set of reference values.
@type test: C{Set}
@param test: A set of values to compare against the reference set.
@rtype: C{float} or C{None}
"""
if (not hasattr(reference, 'intersection') or
not hasattr(test, 'intersection')):
raise TypeError('reference and test should be sets')
if len(reference) == 0:
return None
else:
return float(len(reference.intersection(test)))/len(reference)
def f_measure(reference, test, alpha=0.5):
"""
Given a set of reference values and a set of test values, return
the f-measure of the test values, when compared against the
reference values. The f-measure is the harmonic mean of the
L{precision} and L{recall}, weighted by C{alpha}. In particular,
given the precision M{p} and recall M{r} defined by:
- M{p} = |C{reference}S{cap}C{test}|/|C{test}|
- M{r} = |C{reference}S{cap}C{test}|/|C{reference}|
The f-measure is:
- 1/(C{alpha}/M{p} + (1-C{alpha})/M{r})
If either C{reference} or C{test} is empty, then C{f_measure}
returns C{None}.
@type reference: C{Set}
@param reference: A set of reference values.
@type test: C{Set}
@param test: A set of values to compare against the reference set.
@rtype: C{float} or C{None}
"""
p = precision(reference, test)
r = recall(reference, test)
if p is None or r is None:
return None
if p == 0 or r == 0:
return 0
return 1.0/(alpha/p + (1-alpha)/r)
def log_likelihood(reference, test):
"""
Given a list of reference values and a corresponding list of test
probability distributions, return the average log likelihood of
the reference values, given the probability distributions.
@param reference: A list of reference values
@type reference: C{list}
@param test: A list of probability distributions over values to
compare against the corresponding reference values.
@type test: C{list} of L{ProbDistI}
"""
if len(reference) != len(test):
raise ValueError("Lists must have the same length.")
# Return the average value of dist.logprob(val).
total_likelihood = sum(dist.logprob(val)
for (val, dist) in zip(reference, test))
return total_likelihood/len(reference)
def approxrand(a, b, **kwargs):
"""
Returns an approximate significance level between two lists of
independently generated test values.
Approximate randomization calculates significance by randomly drawing
from a sample of the possible permutations. At the limit of the number
of possible permutations, the significance level is exact. The
approximate significance level is the sample mean number of times the
statistic of the permutated lists varies from the actual statistic of
the unpermuted argument lists.
@return: a tuple containing an approximate significance level, the count
of the number of times the pseudo-statistic varied from the
actual statistic, and the number of shuffles
@rtype: C{tuple}
@param a: a list of test values
@type a: C{list}
@param b: another list of independently generated test values
@type b: C{list}
"""
shuffles = kwargs.get('shuffles', 999)
# there's no point in trying to shuffle beyond all possible permutations
shuffles = \
min(shuffles, reduce(lambda x, y: x * y, xrange(1, len(a) + len(b) + 1)))
stat = kwargs.get('statistic', lambda lst: float(sum(lst)) / len(lst))
verbose = kwargs.get('verbose', False)
if verbose:
print 'shuffles: %d' % shuffles
actual_stat = math.fabs(stat(a) - stat(b))
if verbose:
print 'actual statistic: %f' % actual_stat
print '-' * 60
c = 1e-100
lst = LazyConcatenation([a, b])
indices = range(len(a) + len(b))
for i in range(shuffles):
if verbose and i % 10 == 0:
print 'shuffle: %d' % i
random.shuffle(indices)
pseudo_stat_a = stat(LazyMap(lambda i: lst[i], indices[:len(a)]))
pseudo_stat_b = stat(LazyMap(lambda i: lst[i], indices[len(a):]))
pseudo_stat = math.fabs(pseudo_stat_a - pseudo_stat_b)
if pseudo_stat >= actual_stat:
c += 1
if verbose and i % 10 == 0:
print 'pseudo-statistic: %f' % pseudo_stat
print 'significance: %f' % (float(c + 1) / (i + 1))
print '-' * 60
significance = float(c + 1) / (shuffles + 1)
if verbose:
print 'significance: %f' % significance
if betai:
for phi in [0.01, 0.05, 0.10, 0.15, 0.25, 0.50]:
print "prob(phi<=%f): %f" % (phi, betai(c, shuffles, phi))
return (significance, c, shuffles)
def demo():
print '-'*75
reference = 'DET NN VB DET JJ NN NN IN DET NN'.split()
test = 'DET VB VB DET NN NN NN IN DET NN'.split()
print 'Reference =', reference
print 'Test =', test
print 'Accuracy:', accuracy(reference, test)
print '-'*75
reference_set = set(reference)
test_set = set(test)
print 'Reference =', reference_set
print 'Test = ', test_set
print 'Precision:', precision(reference_set, test_set)
print ' Recall:', recall(reference_set, test_set)
print 'F-Measure:', f_measure(reference_set, test_set)
print '-'*75
if __name__ == '__main__':
demo()
|
gpl-3.0
|
nii-cloud/dodai-compute
|
nova/tests/test_quantum.py
|
3
|
12317
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy.session import get_session
from nova import exception
from nova import ipv6
from nova import log as logging
from nova.network.quantum import manager as quantum_manager
from nova import test
from nova import utils
LOG = logging.getLogger('nova.tests.quantum_network')
# this class can be used for unit functional/testing on nova,
# as it does not actually make remote calls to the Quantum service
class FakeQuantumClientConnection(object):
def __init__(self):
self.nets = {}
def get_networks_for_tenant(self, tenant_id):
net_ids = []
for net_id, n in self.nets.items():
if n['tenant-id'] == tenant_id:
net_ids.append(net_id)
return net_ids
def create_network(self, tenant_id, network_name):
uuid = str(utils.gen_uuid())
self.nets[uuid] = {'net-name': network_name,
'tenant-id': tenant_id,
'ports': {}}
return uuid
def delete_network(self, tenant_id, net_id):
if self.nets[net_id]['tenant-id'] == tenant_id:
del self.nets[net_id]
def network_exists(self, tenant_id, net_id):
try:
return self.nets[net_id]['tenant-id'] == tenant_id
except KeyError:
return False
def _confirm_not_attached(self, interface_id):
for n in self.nets.values():
for p in n['ports'].values():
if p['attachment-id'] == interface_id:
raise Exception(_("interface '%s' is already attached" %
interface_id))
def create_and_attach_port(self, tenant_id, net_id, interface_id):
if not self.network_exists(tenant_id, net_id):
raise Exception(
_("network %(net_id)s does not exist for tenant %(tenant_id)"
% locals()))
self._confirm_not_attached(interface_id)
uuid = str(utils.gen_uuid())
self.nets[net_id]['ports'][uuid] = \
{"port-state": "ACTIVE",
"attachment-id": interface_id}
def detach_and_delete_port(self, tenant_id, net_id, port_id):
if not self.network_exists(tenant_id, net_id):
raise exception.NotFound(
_("network %(net_id)s does not exist "
"for tenant %(tenant_id)s" % locals()))
del self.nets[net_id]['ports'][port_id]
def get_port_by_attachment(self, tenant_id, attachment_id):
for net_id, n in self.nets.items():
if n['tenant-id'] == tenant_id:
for port_id, p in n['ports'].items():
if p['attachment-id'] == attachment_id:
return (net_id, port_id)
return (None, None)
networks = [{'label': 'project1-net1',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:1db8::/64',
'gateway_v6': '2001:1db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': None,
'vpn_public_address': None,
'project_id': 'fake_project1',
'priority': 1},
{'label': 'project2-net1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:1db9::/64',
'gateway_v6': '2001:1db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': None,
'project_id': 'fake_project2',
'priority': 1},
{'label': "public",
'injected': False,
'multi_host': False,
'cidr': '10.0.0.0/24',
'cidr_v6': '2001:1dba::/64',
'gateway_v6': '2001:1dba::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '10.0.0.1',
'broadcast': '10.0.0.255',
'dns1': '10.0.0.1',
'dns2': '10.0.0.2',
'vlan': None,
'host': None,
'project_id': None,
'priority': 0},
{'label': "project2-net2",
'injected': False,
'multi_host': False,
'cidr': '9.0.0.0/24',
'cidr_v6': '2001:1dbb::/64',
'gateway_v6': '2001:1dbb::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '9.0.0.1',
'broadcast': '9.0.0.255',
'dns1': '9.0.0.1',
'dns2': '9.0.0.2',
'vlan': None,
'host': None,
'project_id': "fake_project2",
'priority': 2}]
# this is a base class to be used by all other Quantum Test classes
class QuantumTestCaseBase(object):
def test_create_and_delete_nets(self):
self._create_nets()
self._delete_nets()
def _create_nets(self):
for n in networks:
ctx = context.RequestContext('user1', n['project_id'])
self.net_man.create_networks(ctx,
label=n['label'], cidr=n['cidr'],
multi_host=n['multi_host'],
num_networks=1, network_size=256, cidr_v6=n['cidr_v6'],
gateway_v6=n['gateway_v6'], bridge=None,
bridge_interface=None, dns1=n['dns1'],
dns2=n['dns2'], project_id=n['project_id'],
priority=n['priority'])
def _delete_nets(self):
for n in networks:
ctx = context.RequestContext('user1', n['project_id'])
self.net_man.delete_network(ctx, n['cidr'])
def test_allocate_and_deallocate_instance_static(self):
self._create_nets()
project_id = "fake_project1"
ctx = context.RequestContext('user1', project_id)
instance_ref = db.api.instance_create(ctx,
{"project_id": project_id})
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
instance_type_id=instance_ref['instance_type_id'],
project_id=project_id)
self.assertEquals(len(nw_info), 2)
# we don't know which order the NICs will be in until we
# introduce the notion of priority
# v4 cidr
self.assertTrue(nw_info[0][0]['cidr'].startswith("10."))
self.assertTrue(nw_info[1][0]['cidr'].startswith("192."))
# v4 address
self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("10."))
self.assertTrue(nw_info[1][1]['ips'][0]['ip'].startswith("192."))
# v6 cidr
self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dba:"))
self.assertTrue(nw_info[1][0]['cidr_v6'].startswith("2001:1db8:"))
# v6 address
self.assertTrue(
nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dba:"))
self.assertTrue(
nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db8:"))
self.net_man.deallocate_for_instance(ctx,
instance_id=instance_ref['id'],
project_id=project_id)
self._delete_nets()
def test_allocate_and_deallocate_instance_dynamic(self):
self._create_nets()
project_id = "fake_project2"
ctx = context.RequestContext('user1', project_id)
net_ids = self.net_man.q_conn.get_networks_for_tenant(project_id)
requested_networks = [(net_id, None) for net_id in net_ids]
self.net_man.validate_networks(ctx, requested_networks)
instance_ref = db.api.instance_create(ctx,
{"project_id": project_id})
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
instance_type_id=instance_ref['instance_type_id'],
project_id=project_id,
requested_networks=requested_networks)
self.assertEquals(len(nw_info), 2)
# we don't know which order the NICs will be in until we
# introduce the notion of priority
# v4 cidr
self.assertTrue(nw_info[0][0]['cidr'].startswith("9.") or
nw_info[1][0]['cidr'].startswith("9."))
self.assertTrue(nw_info[0][0]['cidr'].startswith("192.") or
nw_info[1][0]['cidr'].startswith("192."))
# v4 address
self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("9.") or
nw_info[1][1]['ips'][0]['ip'].startswith("9."))
self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("192.") or
nw_info[1][1]['ips'][0]['ip'].startswith("192."))
# v6 cidr
self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dbb:") or
nw_info[1][0]['cidr_v6'].startswith("2001:1dbb:"))
self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1db9:") or
nw_info[1][0]['cidr_v6'].startswith("2001:1db9:"))
# v6 address
self.assertTrue(
nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dbb:") or
nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1dbb:"))
self.assertTrue(
nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1db9:") or
nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db9:"))
self.net_man.deallocate_for_instance(ctx,
instance_id=instance_ref['id'],
project_id=project_id)
self._delete_nets()
def test_validate_bad_network(self):
ctx = context.RequestContext('user1', 'fake_project1')
self.assertRaises(exception.NetworkNotFound,
self.net_man.validate_networks, ctx, [("", None)])
class QuantumNovaIPAMTestCase(QuantumTestCaseBase, test.TestCase):
def setUp(self):
super(QuantumNovaIPAMTestCase, self).setUp()
self.net_man = quantum_manager.QuantumManager(
ipam_lib="nova.network.quantum.nova_ipam_lib",
q_conn=FakeQuantumClientConnection())
# Tests seem to create some networks by default, which
# we don't want. So we delete them.
ctx = context.RequestContext('user1', 'fake_project1').elevated()
for n in db.network_get_all(ctx):
db.network_delete_safe(ctx, n['id'])
# Other unit tests (e.g., test_compute.py) have a nasty
# habit of of creating fixed IPs and not cleaning up, which
# can confuse these tests, so we remove all existing fixed
# ips before starting.
session = get_session()
result = session.query(models.FixedIp).all()
with session.begin():
for fip_ref in result:
session.delete(fip_ref)
|
apache-2.0
|
guillaumevincent/rangevoting
|
commands.py
|
2
|
1477
|
import logging
logger = logging.getLogger(__name__)
class RangeVoteCommandValidator:
def __init__(self, data):
self.data = data
def is_valid(self):
if 'question' not in self.data or 'choices' not in self.data:
logger.debug('RangeVoteCommandValidator : question or choices not in rangevote')
return False
if len(self.data['choices']) < 2:
logger.debug('RangeVoteCommandValidator : should have at least two choices in rangevote')
return False
return True
class RangeVoteCommand:
def __init__(self, uuid, question, choices):
self.uuid = uuid
self.question = question
self.choices = choices
class CreateRangeVoteCommand(RangeVoteCommand):
pass
class UpdateRangeVoteCommand(RangeVoteCommand):
def __init__(self, uuid, question, choices, votes):
self.votes = votes
super().__init__(uuid, question, choices)
class VoteCommandValidator:
def __init__(self, data):
self.data = data
def is_valid(self):
if 'elector' not in self.data or 'opinions' not in self.data:
logger.debug('RangeVoteCommandValidator : elector or opinions not in vote ({0})'.format(self.data))
return False
return True
class CreateVoteCommand:
def __init__(self, rangevote_id, elector, opinions):
self.rangevote_id = rangevote_id
self.elector = elector
self.opinions = opinions
|
mit
|
bilgili/Voreen
|
modules/python/ext/python27/modules/sysconfig.py
|
26
|
25574
|
"""Provide access to Python's configuration information.
"""
import sys
import os
from os.path import pardir, realpath
_INSTALL_SCHEMES = {
'posix_prefix': {
'stdlib': '{base}/lib/python{py_version_short}',
'platstdlib': '{platbase}/lib/python{py_version_short}',
'purelib': '{base}/lib/python{py_version_short}/site-packages',
'platlib': '{platbase}/lib/python{py_version_short}/site-packages',
'include': '{base}/include/python{py_version_short}',
'platinclude': '{platbase}/include/python{py_version_short}',
'scripts': '{base}/bin',
'data': '{base}',
},
'posix_home': {
'stdlib': '{base}/lib/python',
'platstdlib': '{base}/lib/python',
'purelib': '{base}/lib/python',
'platlib': '{base}/lib/python',
'include': '{base}/include/python',
'platinclude': '{base}/include/python',
'scripts': '{base}/bin',
'data' : '{base}',
},
'nt': {
'stdlib': '{base}/Lib',
'platstdlib': '{base}/Lib',
'purelib': '{base}/Lib/site-packages',
'platlib': '{base}/Lib/site-packages',
'include': '{base}/Include',
'platinclude': '{base}/Include',
'scripts': '{base}/Scripts',
'data' : '{base}',
},
'os2': {
'stdlib': '{base}/Lib',
'platstdlib': '{base}/Lib',
'purelib': '{base}/Lib/site-packages',
'platlib': '{base}/Lib/site-packages',
'include': '{base}/Include',
'platinclude': '{base}/Include',
'scripts': '{base}/Scripts',
'data' : '{base}',
},
'os2_home': {
'stdlib': '{userbase}/lib/python{py_version_short}',
'platstdlib': '{userbase}/lib/python{py_version_short}',
'purelib': '{userbase}/lib/python{py_version_short}/site-packages',
'platlib': '{userbase}/lib/python{py_version_short}/site-packages',
'include': '{userbase}/include/python{py_version_short}',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
'nt_user': {
'stdlib': '{userbase}/Python{py_version_nodot}',
'platstdlib': '{userbase}/Python{py_version_nodot}',
'purelib': '{userbase}/Python{py_version_nodot}/site-packages',
'platlib': '{userbase}/Python{py_version_nodot}/site-packages',
'include': '{userbase}/Python{py_version_nodot}/Include',
'scripts': '{userbase}/Scripts',
'data' : '{userbase}',
},
'posix_user': {
'stdlib': '{userbase}/lib/python{py_version_short}',
'platstdlib': '{userbase}/lib/python{py_version_short}',
'purelib': '{userbase}/lib/python{py_version_short}/site-packages',
'platlib': '{userbase}/lib/python{py_version_short}/site-packages',
'include': '{userbase}/include/python{py_version_short}',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
'osx_framework_user': {
'stdlib': '{userbase}/lib/python',
'platstdlib': '{userbase}/lib/python',
'purelib': '{userbase}/lib/python/site-packages',
'platlib': '{userbase}/lib/python/site-packages',
'include': '{userbase}/include',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
}
_SCHEME_KEYS = ('stdlib', 'platstdlib', 'purelib', 'platlib', 'include',
'scripts', 'data')
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = sys.version[:3]
_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
_PREFIX = os.path.normpath(sys.prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_CONFIG_VARS = None
_USER_BASE = None
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
def is_python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
return True
return False
_PYTHON_BUILD = is_python_build()
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_INSTALL_SCHEMES[scheme]['include'] = '{projectbase}/Include'
_INSTALL_SCHEMES[scheme]['platinclude'] = '{srcdir}'
def _subst_vars(s, local_vars):
try:
return s.format(**local_vars)
except KeyError:
try:
return s.format(**os.environ)
except KeyError, var:
raise AttributeError('{%s}' % var)
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _INSTALL_SCHEMES[scheme].items():
if os.name in ('posix', 'nt'):
value = os.path.expanduser(value)
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def _get_default_scheme():
if os.name == 'posix':
# the default scheme for posix is posix_prefix
return 'posix_prefix'
return os.name
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
# what about 'os2emx', 'riscos' ?
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
return env_base if env_base else joinuser(base, "Python")
if sys.platform == "darwin":
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
return env_base if env_base else \
joinuser("~", "Library", framework, "%d.%d"
% (sys.version_info[:2]))
return env_base if env_base else joinuser("~", ".local")
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
import re
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with open(filename) as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
while notdone:
for name in notdone.keys():
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
del notdone[name]
else:
# bogus variable reference; just drop it since we can't deal
del notdone[name]
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars
def _get_makefile_filename():
if _PYTHON_BUILD:
return os.path.join(_PROJECT_BASE, "Makefile")
return os.path.join(get_path('platstdlib'), "config", "Makefile")
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems."""
# load the installed Makefile:
makefile = _get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except IOError, e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except IOError, e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['LDSHARED'] = vars['BLDSHARED']
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['SO'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
#
# public APIs
#
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
import re
if vars is None:
vars = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try: v = int(v)
except ValueError: pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
def get_config_h_filename():
"""Returns the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_PROJECT_BASE, "PC")
else:
inc_dir = _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
def get_scheme_names():
"""Returns a tuple containing the schemes names."""
schemes = _INSTALL_SCHEMES.keys()
schemes.sort()
return tuple(schemes)
def get_path_names():
"""Returns a tuple containing the paths names."""
return _SCHEME_KEYS
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Returns a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
"""
if expand:
return _expand_vars(scheme, vars)
else:
return _INSTALL_SCHEMES[scheme]
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Returns a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform.
On Unix, this means every variable defined in Python's installed Makefile;
On Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
import re
global _CONFIG_VARS
if _CONFIG_VARS is None:
_CONFIG_VARS = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# Distutils.
_CONFIG_VARS['prefix'] = _PREFIX
_CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
_CONFIG_VARS['py_version'] = _PY_VERSION
_CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
_CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
_CONFIG_VARS['base'] = _PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
if os.name in ('nt', 'os2'):
_init_non_posix(_CONFIG_VARS)
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
# Setting 'userbase' is done below the call to the
# init function to enable using 'get_config_var' in
# the init-function.
_CONFIG_VARS['userbase'] = _getuserbase()
if 'srcdir' not in _CONFIG_VARS:
_CONFIG_VARS['srcdir'] = _PROJECT_BASE
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if _PYTHON_BUILD and os.name == "posix":
base = _PROJECT_BASE
try:
cwd = os.getcwd()
except OSError:
cwd = None
if (not os.path.isabs(_CONFIG_VARS['srcdir']) and
base != cwd):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
_CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_CONFIG_VARS[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_CONFIG_VARS[key] = flags
# If we're on OSX 10.5 or later and the user tries to
# compiles an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail.
#
# The major usecase for this is users using a Python.org
# binary installer on OSX 10.6: that installer uses
# the 10.4u SDK, but that SDK is not installed by default
# when you install Xcode.
#
CFLAGS = _CONFIG_VARS.get('CFLAGS', '')
m = re.search('-isysroot\s+(\S+)', CFLAGS)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
_CONFIG_VARS[key] = flags
if args:
vals = []
for name in args:
vals.append(_CONFIG_VARS.get(name))
return vals
else:
return _CONFIG_VARS
def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by
'get_config_vars()'.
Equivalent to get_config_vars().get(name)
"""
return get_config_vars().get(name)
def get_platform():
"""Return a string that identifies the current platform.
This is used mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
import re
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return sys.platform
j = sys.version.find(")", i)
look = sys.version[i+len(prefix):j].lower()
if look == 'amd64':
return 'win-amd64'
if look == 'itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
osname, host, release, version, machine = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# We can't use "platform.architecture()[0]" because a
# bootstrap problem. We use a dict to get an error
# if some suspicious happens.
bitness = {2147483647:"32bit", 9223372036854775807:"64bit"}
machine += ".%s" % bitness[sys.maxint]
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile (r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
cfgvars = get_config_vars()
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if 1:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(
r'<key>ProductUserVisibleVersion</key>\s*' +
r'<string>(.*?)</string>', f.read())
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
finally:
f.close()
if not macver:
macver = macrelease
if macver:
release = macver
osname = "macosx"
if (macrelease + '.') >= '10.4.' and \
'-arch' in get_config_vars().get('CFLAGS', '').strip():
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r"%(archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxint >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxint >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
|
gpl-2.0
|
JTCunning/sentry
|
src/sentry/api/endpoints/project_details.py
|
5
|
4612
|
from __future__ import absolute_import
from rest_framework import serializers, status
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.decorators import sudo_required
from sentry.api.serializers import serialize
from sentry.models import AuditLogEntryEvent, Project, ProjectStatus
from sentry.tasks.deletion import delete_project
def clean_newline_inputs(value):
result = []
for v in value.split('\n'):
v = v.lower().strip()
if v:
result.append(v)
return result
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = ('name', 'slug')
class ProjectDetailsEndpoint(ProjectEndpoint):
doc_section = DocSection.PROJECTS
def get(self, request, project):
"""
Retrieve a project
Return details on an individual project.
{method} {path}
"""
data = serialize(project, request.user)
data['options'] = {
'sentry:origins': '\n'.join(project.get_option('sentry:origins', None) or []),
'sentry:resolve_age': int(project.get_option('sentry:resolve_age', 0)),
'sentry:scrub_data': bool(project.get_option('sentry:scrub_data', True)),
'sentry:sensitive_fields': project.get_option('sentry:sensitive_fields', []),
}
return Response(data)
@sudo_required
def put(self, request, project):
"""
Update a project
Update various attributes and configurable settings for the given project.
{method} {path}
{{
"name": "My Project Name",
"options": {{
"sentry:origins": "*"
}}
}}
"""
serializer = ProjectSerializer(project, data=request.DATA, partial=True)
if serializer.is_valid():
project = serializer.save()
options = request.DATA.get('options', {})
if 'sentry:origins' in options:
project.update_option(
'sentry:origins',
clean_newline_inputs(options['sentry:origins'])
)
if 'sentry:resolve_age' in options:
project.update_option('sentry:resolve_age', int(options['sentry:resolve_age']))
if 'sentry:scrub_data' in options:
project.update_option('sentry:scrub_data', bool(options['sentry:scrub_data']))
if 'sentry:sensitive_fields' in options:
project.update_option(
'sentry:sensitive_fields',
[s.strip().lower() for s in options['sentry:sensitive_fields']]
)
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=project.id,
event=AuditLogEntryEvent.PROJECT_EDIT,
data=project.get_audit_log_data(),
)
data = serialize(project, request.user)
data['options'] = {
'sentry:origins': '\n'.join(project.get_option('sentry:origins', None) or []),
'sentry:resolve_age': int(project.get_option('sentry:resolve_age', 0)),
}
return Response(data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@sudo_required
def delete(self, request, project):
"""
Delete a project
Schedules a project for deletion.
{method} {path}
**Note:** Deletion happens asynchronously and therefor is not immediate.
However once deletion has begun the state of a project changes and will
be hidden from most public views.
"""
if project.is_internal_project():
return Response('{"error": "Cannot remove projects internally used by Sentry."}',
status=status.HTTP_403_FORBIDDEN)
updated = Project.objects.filter(
id=project.id,
status=ProjectStatus.VISIBLE,
).update(status=ProjectStatus.PENDING_DELETION)
if updated:
delete_project.delay(object_id=project.id)
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=project.id,
event=AuditLogEntryEvent.PROJECT_REMOVE,
data=project.get_audit_log_data(),
)
return Response(status=204)
|
bsd-3-clause
|
tensorflow/tensorflow
|
tensorflow/python/framework/tensor_shape_test.py
|
9
|
21959
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for shape inference helper classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class DimensionTest(test_util.TensorFlowTestCase):
def testDimension(self):
dim = tensor_shape.Dimension(12)
self.assertEqual(12, dim.value)
self.assertEqual(12, int(dim))
self.assertEqual(dim, tensor_shape.Dimension(12))
self.assertEqual(tensor_shape.Dimension(15),
dim + tensor_shape.Dimension(3))
self.assertEqual(tensor_shape.Dimension(15), dim + 3)
self.assertEqual(tensor_shape.Dimension(15), 3 + dim)
self.assertEqual(tensor_shape.Dimension(9), dim - 3)
self.assertEqual(tensor_shape.Dimension(1), 13 - dim)
self.assertEqual(tensor_shape.Dimension(24),
dim * tensor_shape.Dimension(2))
self.assertEqual(tensor_shape.Dimension(24), dim * 2)
self.assertEqual(tensor_shape.Dimension(24), 2 * dim)
self.assertEqual([4] * 12, [4] * dim)
self.assertEqual(12 * [4], dim * [4])
self.assertEqual(tensor_shape.Dimension(24), 2 * dim)
self.assertEqual(
tensor_shape.Dimension(6), dim // tensor_shape.Dimension(2))
self.assertEqual(tensor_shape.Dimension(6), dim // 2)
self.assertEqual(tensor_shape.Dimension(0), 2 // dim)
self.assertEqual(tensor_shape.Dimension(12),
dim.merge_with(tensor_shape.Dimension(12)))
self.assertEqual(tensor_shape.Dimension(12), dim.merge_with(12))
self.assertLess(tensor_shape.Dimension(12), tensor_shape.Dimension(13))
self.assertGreater(tensor_shape.Dimension(13), tensor_shape.Dimension(12))
self.assertLessEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(12))
self.assertLessEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(13))
self.assertGreater(tensor_shape.Dimension(13), tensor_shape.Dimension(12))
self.assertGreaterEqual(tensor_shape.Dimension(12),
tensor_shape.Dimension(12))
self.assertGreaterEqual(tensor_shape.Dimension(13),
tensor_shape.Dimension(12))
self.assertNotEqual(dim, (12,))
with self.assertRaises(ValueError):
dim.merge_with(tensor_shape.Dimension(13))
def testUnknownDimension(self):
dim = tensor_shape.Dimension(None)
self.assertIsNone(dim.value)
self.assertEqual(dim.value, tensor_shape.Dimension(None).value)
self.assertEqual(tensor_shape.Dimension(None).value,
(dim + tensor_shape.Dimension(None)).value)
self.assertEqual(tensor_shape.Dimension(None).value,
(dim * tensor_shape.Dimension(None)).value)
self.assertEqual(
tensor_shape.Dimension(None).value,
(dim // tensor_shape.Dimension(None)).value)
self.assertEqual(tensor_shape.Dimension(None).value,
dim.merge_with(tensor_shape.Dimension(None)).value)
self.assertIsNone(
tensor_shape.Dimension(None) < tensor_shape.Dimension(None))
self.assertIsNone(
tensor_shape.Dimension(None) <= tensor_shape.Dimension(None))
self.assertIsNone(
tensor_shape.Dimension(None) > tensor_shape.Dimension(None))
self.assertIsNone(
tensor_shape.Dimension(None) >= tensor_shape.Dimension(None))
def testKnownAndUnknownDimensions(self):
known = tensor_shape.Dimension(12)
unknown = tensor_shape.Dimension(None)
self.assertEqual(
tensor_shape.Dimension(None).value, (known + unknown).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (unknown + known).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (known * unknown).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (unknown * known).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (known // unknown).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (unknown // known).value)
self.assertEqual(
tensor_shape.Dimension(12), known.merge_with(unknown))
self.assertEqual(
tensor_shape.Dimension(12), unknown.merge_with(known))
self.assertIsNone(tensor_shape.Dimension(12) < tensor_shape.Dimension(None))
self.assertIsNone(
tensor_shape.Dimension(12) <= tensor_shape.Dimension(None))
self.assertIsNone(tensor_shape.Dimension(12) > tensor_shape.Dimension(None))
self.assertIsNone(
tensor_shape.Dimension(12) >= tensor_shape.Dimension(None))
self.assertIsNone(tensor_shape.Dimension(None) < tensor_shape.Dimension(12))
self.assertIsNone(
tensor_shape.Dimension(None) <= tensor_shape.Dimension(12))
self.assertIsNone(tensor_shape.Dimension(None) > tensor_shape.Dimension(12))
self.assertIsNone(
tensor_shape.Dimension(None) >= tensor_shape.Dimension(12))
def testAsDimension(self):
self.assertEqual(tensor_shape.Dimension(12),
tensor_shape.as_dimension(tensor_shape.Dimension(12)))
self.assertEqual(tensor_shape.Dimension(12), tensor_shape.as_dimension(12))
self.assertEqual(
tensor_shape.Dimension(None).value,
tensor_shape.as_dimension(tensor_shape.Dimension(None)).value)
self.assertEqual(tensor_shape.Dimension(None).value,
tensor_shape.as_dimension(None).value)
def testEquality(self):
self.assertEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(12))
self.assertNotEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(13))
self.assertIsNone(
tensor_shape.Dimension(12) == tensor_shape.Dimension(None))
self.assertIsNone(
tensor_shape.Dimension(None) == tensor_shape.Dimension(12))
self.assertIsNone(
tensor_shape.Dimension(None) == tensor_shape.Dimension(None))
# None indicates ambiguous comparison, but comparison vs the wrong type
# is unambiguously False.
self.assertIsNotNone(tensor_shape.Dimension(None) == 12.99)
self.assertNotEqual(tensor_shape.Dimension(None), 12.99)
# pylint: disable=singleton-comparison, g-equals-none
self.assertIsNone(tensor_shape.Dimension(None) == None)
# pylint: enable=singleton-comparison, g-equals-none
self.assertNotEqual(tensor_shape.Dimension(12), 12.99)
def testInequality(self):
self.assertNotEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(13))
self.assertEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(12))
self.assertIsNone(
tensor_shape.Dimension(12) != tensor_shape.Dimension(None))
self.assertIsNone(
tensor_shape.Dimension(None) != tensor_shape.Dimension(12))
self.assertIsNone(
tensor_shape.Dimension(None) != tensor_shape.Dimension(None))
# None indicates ambiguous comparison, but comparison vs the wrong type
# is unambiguously False.
self.assertIsNotNone(tensor_shape.Dimension(None) != 12.99)
self.assertNotEqual(tensor_shape.Dimension(None), 12.99)
self.assertIsNone(tensor_shape.Dimension(None) != None) # pylint: disable=g-equals-none
self.assertNotEqual(tensor_shape.Dimension(12), 12.99)
def testIsCompatibleWithError(self):
with self.assertRaisesRegex(TypeError, "must be integer or None"):
tensor_shape.Dimension(42).is_compatible_with([])
with self.assertRaisesRegex(ValueError, "must be >= 0"):
tensor_shape.Dimension(42).is_compatible_with(-1)
def testMergeWithError(self):
with self.assertRaisesRegex(TypeError, "must be integer or None"):
tensor_shape.Dimension(42).merge_with([])
with self.assertRaisesRegex(ValueError, "must be >= 0"):
tensor_shape.Dimension(42).merge_with(-1)
def testRepr(self):
self.assertEqual(repr(tensor_shape.Dimension(7)), "Dimension(7)")
self.assertEqual(repr(tensor_shape.Dimension(None)), "Dimension(None)")
def testStr(self):
self.assertEqual(str(tensor_shape.Dimension(7)), "7")
self.assertEqual(str(tensor_shape.Dimension(None)), "?")
def testUnsupportedType(self):
with self.assertRaises(TypeError):
tensor_shape.Dimension(dtypes.string)
def testBool(self):
one = tensor_shape.Dimension(1)
zero = tensor_shape.Dimension(0)
has_none = tensor_shape.Dimension(None)
self.assertTrue(one)
self.assertFalse(zero)
self.assertFalse(has_none)
def testMod(self):
four = tensor_shape.Dimension(4)
nine = tensor_shape.Dimension(9)
self.assertEqual(nine % four, 1)
# test both __mod__ and __rmod__.
self.assertEqual(nine % 4, 1)
self.assertEqual(4 % nine, 4)
def testReduce(self):
dim = tensor_shape.Dimension(5)
ctor, args = dim.__reduce__()
self.assertEqual(ctor, tensor_shape.Dimension)
self.assertEqual(args, (5,))
reconstructed = ctor(*args)
self.assertEqual(reconstructed, dim)
def testDiv(self):
# Note: This test is related to GitHub issue 25790.
six = tensor_shape.Dimension(6)
two = tensor_shape.Dimension(2)
message = (r"unsupported operand type\(s\) for /: "
r"'Dimension' and 'Dimension', please use // instead")
with self.assertRaisesRegex(TypeError, message):
_ = six / two
message = (r"unsupported operand type\(s\) for /: "
r"'Dimension' and 'int', please use // instead")
with self.assertRaisesRegex(TypeError, message):
_ = six / 2
message = (r"unsupported operand type\(s\) for /: "
r"'int' and 'Dimension', please use // instead")
with self.assertRaisesRegex(TypeError, message):
_ = 6 / two
class ShapeTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def testUnknownShape(self):
s = tensor_shape.TensorShape(None)
# pylint: disable=g-error-prone-assert-raises
with self.assertRaisesRegex(ValueError, "Shape .+ is not fully defined"):
s.assert_is_fully_defined()
# pylint: enable=g-error-prone-assert-raises
self.assertIsNone(s.rank)
with self.assertRaisesRegex(
ValueError, "Cannot take the length of shape with unknown rank."):
len(s)
self.assertFalse(s)
self.assertIsNone(s.dims)
with self.assertRaisesRegex(
ValueError, "Cannot iterate over a shape with unknown rank."):
for _ in tensor_shape.TensorShape(None):
pass
def testFullyDefinedShape(self):
s = tensor_shape.TensorShape([tensor_shape.Dimension(
3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])
s.assert_is_fully_defined()
self.assertEqual(s.rank, 3)
self.assertLen(s, 3)
self.assertTrue(s)
s.assert_has_rank(3)
self.assertEqual([tensor_shape.Dimension(3),
tensor_shape.Dimension(4),
tensor_shape.Dimension(7)], s.dims)
self.assertEqual(tensor_shape.Dimension(3), s[0])
self.assertEqual(tensor_shape.Dimension(4), s[1])
self.assertEqual(tensor_shape.Dimension(7), s[2])
self.assertEqual([3, 4, 7], s.as_list())
s.assert_is_compatible_with([3, 4, 7])
s.assert_same_rank([6, 3, 7])
for d1, d2 in zip(s, [3, 4, 7]):
assert tensor_shape.dimension_value(d1) == d2
def testPartiallyDefinedShape(self):
s = tensor_shape.TensorShape([tensor_shape.Dimension(
3), tensor_shape.Dimension(None), tensor_shape.Dimension(7)])
# pylint: disable=g-error-prone-assert-raises
with self.assertRaisesRegex(ValueError, "Shape .+ is not fully defined"):
s.assert_is_fully_defined()
# pylint: enable=g-error-prone-assert-raises
self.assertEqual(s.rank, 3)
self.assertLen(s, 3)
self.assertTrue(s)
s.assert_has_rank(3)
self.assertEqual(tensor_shape.Dimension(3), s[0])
self.assertEqual(tensor_shape.Dimension(None).value, s.dims[1].value)
self.assertEqual(tensor_shape.Dimension(7), s.dims[2])
s.assert_same_rank([6, 3, 7])
for d1, d2 in zip(s, [3, None, 7]):
assert tensor_shape.dimension_value(d1) == d2
def testMergeFullShapes(self):
self.assertEqual([3, 4, 7],
tensor_shape.TensorShape([3, 4, 7]).merge_with(
tensor_shape.TensorShape([3, 4, 7])).as_list())
with self.assertRaises(ValueError):
tensor_shape.TensorShape([3, 4, 7]).merge_with(
tensor_shape.TensorShape([6, 3, 7]))
def testMergePartialShapes(self):
s1 = tensor_shape.TensorShape([tensor_shape.Dimension(
3), tensor_shape.Dimension(None), tensor_shape.Dimension(7)])
s2 = tensor_shape.TensorShape([tensor_shape.Dimension(
None), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])
self.assertEqual([3, 4, 7], s1.merge_with(s2).as_list())
def testMergeFullAndUnknownShape(self):
self.assertEqual([3, 4, 7],
tensor_shape.TensorShape([3, 4, 7]).merge_with(
tensor_shape.TensorShape(None)).as_list())
def testSlice(self):
known = tensor_shape.TensorShape([0, 1, 2, 3, 4])
self.assertEqual(tensor_shape.Dimension(2), known[2])
tensor_shape.TensorShape([1, 2, 3]).assert_is_compatible_with(known[1:4])
unknown = tensor_shape.TensorShape(None)
self.assertEqual(
tensor_shape.Dimension(None).value,
tensor_shape.dimension_value(unknown[2]))
tensor_shape.TensorShape(
[None, None, None]).assert_is_compatible_with(unknown[1:4])
@parameterized.named_parameters(
("Concatenate", lambda x, y: x.concatenate(y)),
("Add", lambda x, y: x + y),
("RAdd", lambda x, y: y.__radd__(x)))
def testConcatenate(self, concatenate_fn):
tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(
concatenate_fn(
tensor_shape.TensorShape([1, 2]),
tensor_shape.TensorShape([3, 4])))
tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(
concatenate_fn(
tensor_shape.TensorShape([1, 2]),
tensor_shape.TensorShape(None)))
tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(
concatenate_fn(
tensor_shape.TensorShape(None),
tensor_shape.TensorShape([3, 4])))
tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(
concatenate_fn(
tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None)))
@parameterized.named_parameters(
("Concatenate", lambda x, y: x.concatenate(y)),
("Add", lambda x, y: x + y))
def testConcatenateWithDimension(self, concatenate_fn):
tensor_shape.TensorShape([1, 2, 3]).assert_is_compatible_with(
concatenate_fn(
tensor_shape.TensorShape([1, 2]),
tensor_shape.Dimension(3)))
@parameterized.named_parameters(
("List", [3, 4, 5]),
("Tuple", (3, 4, 5)))
def testAdd_nonTensorShape(self, addend):
two = tensor_shape.TensorShape([2])
result = two + addend
self.assertIsInstance(result, tensor_shape.TensorShape)
tensor_shape.TensorShape([2, 3, 4, 5]).assert_is_compatible_with(result)
@parameterized.named_parameters(
("List", [2, 3, 4]),
("Tuple", (2, 3, 4)))
def testRAdd_nonTensorShape(self, addend):
five = tensor_shape.TensorShape([5])
result = addend + five
self.assertIsInstance(result, tensor_shape.TensorShape)
tensor_shape.TensorShape([2, 3, 4, 5]).assert_is_compatible_with(result)
def _testMostSpecificCompatibleShapeHelper(self, x, y, expected):
mcs = tensor_shape.TensorShape(x).most_specific_compatible_shape(
tensor_shape.TensorShape(y))
mcs_dims = mcs.dims
if expected is None or mcs_dims is None:
self.assertIs(expected, mcs_dims)
else:
self.assertEqual(expected, mcs.as_list())
def testMostSpecificCompatibleShape(self):
self._testMostSpecificCompatibleShapeHelper([1, 2], None, None)
self._testMostSpecificCompatibleShapeHelper(None, [1, 2], None)
self._testMostSpecificCompatibleShapeHelper([1, 2], [1, 2, 3, 4], None)
self._testMostSpecificCompatibleShapeHelper([1, 2, 3, 4], [1, 2], None)
self._testMostSpecificCompatibleShapeHelper([1, 2], [1, 2], [1, 2])
self._testMostSpecificCompatibleShapeHelper([None, 2, 3], [1, 1, 3],
[None, None, 3])
self._testMostSpecificCompatibleShapeHelper([1, 1, 3], [None, 2, 3],
[None, None, 3])
def testTruedivFails(self):
unknown = tensor_shape.Dimension(None)
self.assertEqual((unknown // unknown).value, None)
with self.assertRaisesRegex(TypeError, r"unsupported operand type"):
unknown / unknown # pylint: disable=pointless-statement
def testConvertFromProto(self):
def make_tensor_shape_proto(shape):
return tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=x) for x in shape])
proto = make_tensor_shape_proto([])
self.assertEqual(tensor_shape.TensorShape([]),
tensor_shape.TensorShape(proto))
self.assertEqual(tensor_shape.TensorShape([]),
tensor_shape.as_shape(proto))
proto = make_tensor_shape_proto([1, 37, 42])
self.assertEqual(tensor_shape.TensorShape([1, 37, 42]),
tensor_shape.TensorShape(proto))
self.assertEqual(tensor_shape.TensorShape([1, 37, 42]),
tensor_shape.as_shape(proto))
partial_proto_shape = tensor_shape.as_shape(
make_tensor_shape_proto([-1, 37, 42]))
partial_shape = tensor_shape.TensorShape([None, 37, 42])
self.assertNotEqual(partial_proto_shape, partial_shape)
self.assertEqual(tensor_shape.dimension_value(partial_proto_shape[0]), None)
self.assertEqual(tensor_shape.dimension_value(partial_proto_shape[1]), 37)
self.assertEqual(tensor_shape.dimension_value(partial_proto_shape[2]), 42)
self.assertTrue(partial_shape.is_compatible_with(partial_proto_shape))
def testStr(self):
self.assertEqual("<unknown>", str(tensor_shape.unknown_shape()))
self.assertEqual(
"(None,)",
str(tensor_shape.unknown_shape(rank=1)).replace("?", "None"))
self.assertEqual(
"(None, None)",
str(tensor_shape.unknown_shape(rank=2)).replace("?", "None"))
self.assertEqual(
"(None, None, None)",
str(tensor_shape.unknown_shape(rank=3)).replace("?", "None"))
self.assertEqual(
"(32, None, 1, 9)",
str(tensor_shape.TensorShape([32, None, 1, 9])).replace("?", "None"))
self.assertEqual("()", str(tensor_shape.TensorShape([])))
self.assertEqual("(7,)", str(tensor_shape.TensorShape([7])))
self.assertEqual("(3, 8)", str(tensor_shape.TensorShape([3, 8])))
self.assertEqual("(4, 5, 2)", str(tensor_shape.TensorShape([4, 5, 2])))
def testAsProto(self):
self.assertTrue(tensor_shape.unknown_shape().as_proto().unknown_rank)
self.assertFalse(
tensor_shape.unknown_shape(rank=3).as_proto().unknown_rank)
self.assertFalse(
tensor_shape.TensorShape([1, 2, 3]).as_proto().unknown_rank)
self.assertFalse(
tensor_shape.TensorShape([1, None, 3]).as_proto().unknown_rank)
def testEquality(self):
s1 = tensor_shape.TensorShape([tensor_shape.Dimension(
3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])
s2 = tensor_shape.TensorShape([tensor_shape.Dimension(
3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)])
s3 = tensor_shape.TensorShape([tensor_shape.Dimension(3),
tensor_shape.Dimension(4), None])
self.assertEqual(s1, s2)
self.assertEqual(s1, s2)
# Test with an unknown shape in s3
self.assertNotEqual(s1, s3)
# eq and neq are not symmetric for unknown shapes.
# pylint: disable=g-generic-assert
unk0 = tensor_shape.unknown_shape()
self.assertFalse(unk0 == s1)
self.assertFalse(s1 == unk0)
# pylint: enable=g-generic-assert
with self.assertRaises(ValueError):
_ = unk0 != s1
with self.assertRaises(ValueError):
_ = s1 != unk0
unk1 = tensor_shape.unknown_shape()
# pylint: disable=g-generic-assert
self.assertTrue(unk0 == unk1)
self.assertTrue(unk1 == unk0)
# pylint: enable=g-generic-assert
with self.assertRaises(ValueError):
_ = unk0 != unk1
with self.assertRaises(ValueError):
_ = unk1 != unk0
def testAsList(self):
with self.assertRaisesRegex(ValueError,
"not defined on an unknown TensorShape"):
tensor_shape.unknown_shape().as_list()
self.assertAllEqual([None, None], tensor_shape.unknown_shape(2).as_list())
self.assertAllEqual([2, None, 4], tensor_shape.TensorShape(
(2, None, 4)).as_list())
def testReduce(self):
shape = tensor_shape.TensorShape([2, 3])
ctor, args = shape.__reduce__()
self.assertEqual(ctor, tensor_shape.TensorShape)
self.assertEqual(args,
([tensor_shape.Dimension(2),
tensor_shape.Dimension(3)],))
reconstructed = ctor(*args)
self.assertEqual(reconstructed, shape)
if __name__ == "__main__":
googletest.main()
|
apache-2.0
|
jklenzing/pysat
|
pysat/utils/coords.py
|
2
|
16721
|
"""
pysat.coords - coordinate transformations for pysat
=========================================
pysat.coords contains a number of coordinate-transformation
functions used throughout the pysat package.
"""
import numpy as np
import pandas as pds
def adjust_cyclic_data(samples, high=2.0*np.pi, low=0.0):
"""Adjust cyclic values such as longitude to a different scale
Parameters
-----------
samples : array_like
Input array
high: float or int
Upper boundary for circular standard deviation range (default=2 pi)
low : float or int
Lower boundary for circular standard deviation range (default=0)
axis : int or NoneType
Axis along which standard deviations are computed. The default is to
compute the standard deviation of the flattened array
Returns
--------
out_samples : float
Circular standard deviation
"""
out_samples = np.asarray(samples)
sample_range = high - low
out_samples[out_samples >= high] -= sample_range
out_samples[out_samples < low] += sample_range
return out_samples
def update_longitude(inst, lon_name=None, high=180.0, low=-180.0):
""" Update longitude to the desired range
Parameters
------------
inst : pysat.Instrument instance
instrument object to be updated
lon_name : string
name of the longtiude data
high : float
Highest allowed longitude value (default=180.0)
low : float
Lowest allowed longitude value (default=-180.0)
Returns
---------
updates instrument data in column 'lon_name'
"""
from pysat.utils.coords import adjust_cyclic_data
if lon_name not in inst.data.keys():
raise ValueError('uknown longitude variable name')
new_lon = adjust_cyclic_data(inst[lon_name], high=high, low=low)
# Update based on data type
if inst.pandas_format:
inst[lon_name] = new_lon
else:
inst[lon_name].data = new_lon
return
def calc_solar_local_time(inst, lon_name=None, slt_name='slt'):
""" Append solar local time to an instrument object
Parameters
------------
inst : pysat.Instrument instance
instrument object to be updated
lon_name : string
name of the longtiude data key (assumes data are in degrees)
slt_name : string
name of the output solar local time data key (default='slt')
Returns
---------
updates instrument data in column specified by slt_name
"""
import datetime as dt
if lon_name not in inst.data.keys():
raise ValueError('uknown longitude variable name')
# Convert from numpy epoch nanoseconds to UT seconds of day
ut_hr = list()
for nptime in inst.index.values.astype(int):
# Numpy times come out in nanoseconds and timestamp converts
# from seconds
dtime = dt.datetime.utcfromtimestamp(nptime * 1.0e-9)
ut_hr.append((dtime.hour * 3600.0 + dtime.minute * 60.0 +
dtime.second + dtime.microsecond * 1.0e-6) / 3600.0)
# Calculate solar local time
slt = np.array([t + inst[lon_name][i] / 15.0 for i, t in enumerate(ut_hr)])
# Ensure that solar local time falls between 0 and 24 hours
slt = np.mod(slt, 24.0)
# Add the solar local time to the instrument
if inst.pandas_format:
inst[slt_name] = pds.Series(slt, index=inst.data.index)
else:
data = inst.data.assign(pysat_slt=(inst.data.coords.keys(), slt))
data.rename({"pysat_slt": slt_name}, inplace=True)
inst.data = data
# Add units to the metadata
inst.meta[slt_name] = {inst.meta.units_label: 'h',
inst.meta.name_label: "Solar Local Time",
inst.meta.desc_label: "Solar local time",
inst.meta.plot_label: "SLT",
inst.meta.axis_label: "SLT",
inst.meta.scale_label: "linear",
inst.meta.min_label: 0.0,
inst.meta.max_label: 24.0,
inst.meta.fill_label: np.nan}
return
def scale_units(out_unit, in_unit):
"""Deprecated function, moved to pysat.utils._core"""
import warnings
from pysat import utils
warnings.warn(' '.join(["utils.computational_form is deprecated, use",
"pysat.ssnl.computational_form instead"]),
DeprecationWarning, stacklevel=2)
unit_scale = utils.scale_units(out_unit, in_unit)
return unit_scale
def geodetic_to_geocentric(lat_in, lon_in=None, inverse=False):
"""Converts position from geodetic to geocentric or vice-versa.
Parameters
----------
lat_in : float
latitude in degrees.
lon_in : float or NoneType
longitude in degrees. Remains unchanged, so does not need to be
included. (default=None)
inverse : bool
False for geodetic to geocentric, True for geocentric to geodetic.
(default=False)
Returns
-------
lat_out : float
latitude [degree] (geocentric/detic if inverse=False/True)
lon_out : float or NoneType
longitude [degree] (geocentric/detic if inverse=False/True)
rad_earth : float
Earth radius [km] (geocentric/detic if inverse=False/True)
Notes
-----
Uses WGS-84 values
References
----------
Based on J.M. Ruohoniemi's geopack and R.J. Barnes radar.pro
"""
rad_eq = 6378.1370 # WGS-84 semi-major axis
flat = 1.0 / 298.257223563 # WGS-84 flattening
rad_pol = rad_eq * (1.0 - flat) # WGS-84 semi-minor axis
# The ratio between the semi-major and minor axis is used several times
rad_ratio_sq = (rad_eq / rad_pol)**2
# Calculate the square of the second eccentricity (e')
eprime_sq = rad_ratio_sq - 1.0
# Calculate the tangent of the input latitude
tan_in = np.tan(np.radians(lat_in))
# If converting from geodetic to geocentric, take the inverse of the
# radius ratio
if not inverse:
rad_ratio_sq = 1.0 / rad_ratio_sq
# Calculate the output latitude
lat_out = np.degrees(np.arctan(rad_ratio_sq * tan_in))
# Calculate the Earth radius at this latitude
rad_earth = rad_eq / np.sqrt(1.0 + eprime_sq *
np.sin(np.radians(lat_out))**2)
# longitude remains unchanged
lon_out = lon_in
return lat_out, lon_out, rad_earth
def geodetic_to_geocentric_horizontal(lat_in, lon_in, az_in, el_in,
inverse=False):
"""Converts from local horizontal coordinates in a geodetic system to local
horizontal coordinates in a geocentric system
Parameters
----------
lat_in : float
latitude in degrees of the local horizontal coordinate system center
lon_in : float
longitude in degrees of the local horizontal coordinate system center
az_in : float
azimuth in degrees within the local horizontal coordinate system
el_in : float
elevation in degrees within the local horizontal coordinate system
inverse : bool
False for geodetic to geocentric, True for inverse (default=False)
Returns
-------
lat_out : float
latitude in degrees of the converted horizontal coordinate system
center
lon_out : float
longitude in degrees of the converted horizontal coordinate system
center
rad_earth : float
Earth radius in km at the geocentric/detic (False/True) location
az_out : float
azimuth in degrees of the converted horizontal coordinate system
el_out : float
elevation in degrees of the converted horizontal coordinate system
References
----------
Based on J.M. Ruohoniemi's geopack and R.J. Barnes radar.pro
"""
az = np.radians(az_in)
el = np.radians(el_in)
# Transform the location of the local horizontal coordinate system center
lat_out, lon_out, rad_earth = geodetic_to_geocentric(lat_in, lon_in,
inverse=inverse)
# Calcualte the deviation from vertical in radians
dev_vert = np.radians(lat_in - lat_out)
# Calculate cartesian coordinated in local system
x_local = np.cos(el) * np.sin(az)
y_local = np.cos(el) * np.cos(az)
z_local = np.sin(el)
# Now rotate system about the x axis to align local vertical vector
# with Earth radial vector
x_out = x_local
y_out = y_local * np.cos(dev_vert) + z_local * np.sin(dev_vert)
z_out = -y_local * np.sin(dev_vert) + z_local * np.cos(dev_vert)
# Transform the azimuth and elevation angles
az_out = np.degrees(np.arctan2(x_out, y_out))
el_out = np.degrees(np.arctan(z_out / np.sqrt(x_out**2 + y_out**2)))
return lat_out, lon_out, rad_earth, az_out, el_out
def spherical_to_cartesian(az_in, el_in, r_in, inverse=False):
"""Convert a position from spherical to cartesian, or vice-versa
Parameters
----------
az_in : float
azimuth/longitude in degrees or cartesian x in km (inverse=False/True)
el_in : float
elevation/latitude in degrees or cartesian y in km (inverse=False/True)
r_in : float
distance from origin in km or cartesian z in km (inverse=False/True)
inverse : boolian
False to go from spherical to cartesian and True for the inverse
Returns
-------
x_out : float
cartesian x in km or azimuth/longitude in degrees (inverse=False/True)
y_out : float
cartesian y in km or elevation/latitude in degrees (inverse=False/True)
z_out : float
cartesian z in km or distance from origin in km (inverse=False/True)
Notes
------
This transform is the same for local or global spherical/cartesian
transformations.
Returns elevation angle (angle from the xy plane) rather than zenith angle
(angle from the z-axis)
"""
if inverse:
# Cartesian to Spherical
xy_sq = az_in**2 + el_in**2
z_out = np.sqrt(xy_sq + r_in**2) # This is r
y_out = np.degrees(np.arctan2(np.sqrt(xy_sq), r_in)) # This is zenith
y_out = 90.0 - y_out # This is the elevation
x_out = np.degrees(np.arctan2(el_in, az_in)) # This is azimuth
else:
# Spherical coordinate system uses zenith angle (degrees from the
# z-axis) and not the elevation angle (degrees from the x-y plane)
zen_in = np.radians(90.0 - el_in)
# Spherical to Cartesian
x_out = r_in * np.sin(zen_in) * np.cos(np.radians(az_in))
y_out = r_in * np.sin(zen_in) * np.sin(np.radians(az_in))
z_out = r_in * np.cos(zen_in)
return x_out, y_out, z_out
def global_to_local_cartesian(x_in, y_in, z_in, lat_cent, lon_cent, rad_cent,
inverse=False):
"""Converts a position from global to local cartesian or vice-versa
Parameters
----------
x_in : float
global or local cartesian x in km (inverse=False/True)
y_in : float
global or local cartesian y in km (inverse=False/True)
z_in : float
global or local cartesian z in km (inverse=False/True)
lat_cent : float
geocentric latitude in degrees of local cartesian system origin
lon_cent : float
geocentric longitude in degrees of local cartesian system origin
rad_cent : float
distance from center of the Earth in km of local cartesian system
origin
inverse : bool
False to convert from global to local cartesian coodiantes, and True
for the inverse (default=False)
Returns
-------
x_out : float
local or global cartesian x in km (inverse=False/True)
y_out : float
local or global cartesian y in km (inverse=False/True)
z_out : float
local or global cartesian z in km (inverse=False/True)
Notes
-------
The global cartesian coordinate system has its origin at the center of the
Earth, while the local system has its origin specified by the input
latitude, longitude, and radius. The global system has x intersecting
the equatorial plane and the prime meridian, z pointing North along the
rotational axis, and y completing the right-handed coodinate system.
The local system has z pointing up, y pointing North, and x pointing East.
"""
# Get the global cartesian coordinates of local origin
x_cent, y_cent, z_cent = spherical_to_cartesian(lon_cent, lat_cent,
rad_cent)
# Get the amount of rotation needed to align the x-axis with the
# Earth's rotational axis
ax_rot = np.radians(90.0 - lat_cent)
# Get the amount of rotation needed to align the global x-axis with the
# prime meridian
mer_rot = np.radians(lon_cent - 90.0)
if inverse:
# Rotate about the x-axis to align the z-axis with the Earth's
# rotational axis
xrot = x_in
yrot = y_in * np.cos(ax_rot) - z_in * np.sin(ax_rot)
zrot = y_in * np.sin(ax_rot) + z_in * np.cos(ax_rot)
# Rotate about the global z-axis to get the global x-axis aligned
# with the prime meridian and translate the local center to the
# global origin
x_out = xrot * np.cos(mer_rot) - yrot * np.sin(mer_rot) + x_cent
y_out = xrot * np.sin(mer_rot) + yrot * np.cos(mer_rot) + y_cent
z_out = zrot + z_cent
else:
# Translate global origin to the local origin
xtrans = x_in - x_cent
ytrans = y_in - y_cent
ztrans = z_in - z_cent
# Rotate about the global z-axis to get the local x-axis pointing East
xrot = xtrans * np.cos(-mer_rot) - ytrans * np.sin(-mer_rot)
yrot = xtrans * np.sin(-mer_rot) + ytrans * np.cos(-mer_rot)
zrot = ztrans
# Rotate about the x-axis to get the z-axis pointing up
x_out = xrot
y_out = yrot * np.cos(-ax_rot) - zrot * np.sin(-ax_rot)
z_out = yrot * np.sin(-ax_rot) + zrot * np.cos(-ax_rot)
return x_out, y_out, z_out
def local_horizontal_to_global_geo(az, el, dist, lat_orig, lon_orig, alt_orig,
geodetic=True):
""" Convert from local horizontal coordinates to geodetic or geocentric
coordinates
Parameters
----------
az : float
Azimuth (angle from North) of point in degrees
el : float
Elevation (angle from ground) of point in degrees
dist : float
Distance from origin to point in km
lat_orig : float
Latitude of origin in degrees
lon_orig : float
Longitude of origin in degrees
alt_orig : float
Altitude of origin in km from the surface of the Earth
geodetic : bool
True if origin coordinates are geodetic, False if they are geocentric.
Will return coordinates in the same system as the origin input.
(default=True)
Returns
-------
lat_pnt : float
Latitude of point in degrees
lon_pnt : float
Longitude of point in degrees
rad_pnt : float
Distance to the point from the centre of the Earth in km
References
----------
Based on J.M. Ruohoniemi's geopack and R.J. Barnes radar.pro
"""
# If the data are in geodetic coordiantes, convert to geocentric
if geodetic:
(glat, glon, rearth, gaz, gel) = \
geodetic_to_geocentric_horizontal(lat_orig, lon_orig, az, el,
inverse=False)
grad = rearth + alt_orig
else:
glat = lat_orig
glon = lon_orig
grad = alt_orig + 6371.0 # Add the mean earth radius in km
gaz = az
gel = el
# Convert from local horizontal to local cartesian coordiantes
x_loc, y_loc, z_loc = spherical_to_cartesian(gaz, gel, dist, inverse=False)
# Convert from local to global cartesian coordiantes
x_glob, y_glob, z_glob = global_to_local_cartesian(x_loc, y_loc, z_loc,
glat, glon, grad,
inverse=True)
# Convert from global cartesian to geocentric coordinates
lon_pnt, lat_pnt, rad_pnt = spherical_to_cartesian(x_glob, y_glob, z_glob,
inverse=True)
# Convert from geocentric to geodetic, if desired
if geodetic:
lat_pnt, lon_pnt, rearth = geodetic_to_geocentric(lat_pnt, lon_pnt,
inverse=True)
rad_pnt = rearth + rad_pnt - 6371.0
return lat_pnt, lon_pnt, rad_pnt
|
bsd-3-clause
|
philsch/ansible
|
lib/ansible/modules/cloud/webfaction/webfaction_db.py
|
63
|
6442
|
#!/usr/bin/python
#
# Create a webfaction database using Ansible and the Webfaction API
#
# ------------------------------------------
#
# (c) Quentin Stafford-Fraser 2015, with contributions gratefully acknowledged from:
# * Andy Baker
# * Federico Tarantini
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: webfaction_db
short_description: Add or remove a database on Webfaction
description:
- Add or remove a database on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- >
You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
your host, you may want to add C(serial: 1) to the plays.
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options:
name:
description:
- The name of the database
required: true
state:
description:
- Whether the database should exist
required: false
choices: ['present', 'absent']
default: "present"
type:
description:
- The type of database to create.
required: true
choices: ['mysql', 'postgresql']
password:
description:
- The password for the new database user.
required: false
default: None
login_name:
description:
- The webfaction account to use
required: true
login_password:
description:
- The webfaction password to use
required: true
machine:
description:
- The machine name to use (optional for accounts with only one machine)
required: false
'''
EXAMPLES = '''
# This will also create a default DB user with the same
# name as the database, and the specified password.
- name: Create a database
webfaction_db:
name: "{{webfaction_user}}_db1"
password: mytestsql
type: mysql
login_name: "{{webfaction_user}}"
login_password: "{{webfaction_passwd}}"
machine: "{{webfaction_machine}}"
# Note that, for symmetry's sake, deleting a database using
# 'state: absent' will also delete the matching user.
'''
import socket
import xmlrpclib
webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(required=False, choices=['present', 'absent'], default='present'),
# You can specify an IP address or hostname.
type = dict(required=True),
password = dict(required=False, default=None, no_log=True),
login_name = dict(required=True),
login_password = dict(required=True, no_log=True),
machine = dict(required=False, default=False),
),
supports_check_mode=True
)
db_name = module.params['name']
db_state = module.params['state']
db_type = module.params['type']
db_passwd = module.params['password']
if module.params['machine']:
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password'],
module.params['machine']
)
else:
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password']
)
db_list = webfaction.list_dbs(session_id)
db_map = dict([(i['name'], i) for i in db_list])
existing_db = db_map.get(db_name)
user_list = webfaction.list_db_users(session_id)
user_map = dict([(i['username'], i) for i in user_list])
existing_user = user_map.get(db_name)
result = {}
# Here's where the real stuff happens
if db_state == 'present':
# Does a database with this name already exist?
if existing_db:
# Yes, but of a different type - fail
if existing_db['db_type'] != db_type:
module.fail_json(msg="Database already exists but is a different type. Please fix by hand.")
# If it exists with the right type, we don't change anything.
module.exit_json(
changed = False,
)
if not module.check_mode:
# If this isn't a dry run, create the db
# and default user.
result.update(
webfaction.create_db(
session_id, db_name, db_type, db_passwd
)
)
elif db_state == 'absent':
# If this isn't a dry run...
if not module.check_mode:
if not (existing_db or existing_user):
module.exit_json(changed = False,)
if existing_db:
# Delete the db if it exists
result.update(
webfaction.delete_db(session_id, db_name, db_type)
)
if existing_user:
# Delete the default db user if it exists
result.update(
webfaction.delete_db_user(session_id, db_name, db_type)
)
else:
module.fail_json(msg="Unknown state specified: {}".format(db_state))
module.exit_json(
changed = True,
result = result
)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
NerdHerd91/flailbot
|
flailbot.py
|
1
|
1879
|
# FlailBot - Provides on-demand flailing capabilities.
# Much of this structure is borrowed from the following example on the TwistedMatrix site.
# https://twistedmatrix.com/documents/12.0.0/core/howto/clients.html
# twisted imports
from twisted.words.protocols import irc
from twisted.internet import reactor, protocol
# system imports
import sys
class FlailBot(irc.IRCClient):
"""A flailing IRC bot."""
nickname = "FlailBot"
def connectionMade(self):
irc.IRCClient.connectionMade(self)
def connectionLost(self, reason):
irc.IRCClient.connectionLost(self, reason)
def signedOn(self):
self.join(self.factory.channel)
def privmsg(self, user, channel, msg):
if msg == ".awk":
self.msg(channel, "BUT AWKWARDDDDDDD D:")
if msg == ".fail":
self.msg(channel, "WHAT IF FAILLLL?! D:")
if msg == ".flail":
self.msg(channel, "FLAILLLLLLLLLL!!!")
if msg == "Do you like cupcakes?":
self.msg(channel, "I LOVE CUPCAKES!!!")
if msg == "Really?":
self.msg(channel, "REALLY!!!")
class FlailBotFactory(protocol.ClientFactory):
"""A factory for FlailBots"""
def __init__(self, channel):
self.channel = channel
def buildProtocol(self, addr):
p = FlailBot()
p.factory = self
return p
def clientConnectionLost(self, connector, reason):
"""If we get disconnected, reconnect to server."""
connector.connect()
def clientConnectionFailed(self, connector, reason):
print "connection failed:", reason
reactor.stop()
if __name__ == '__main__':
# create factory protocol and application
f = FlailBotFactory(sys.argv[1])
# connect factory to this host and port
reactor.connectTCP("irc.adelais.net", 6667, f)
# run bot
reactor.run()
|
mit
|
4doemaster/enigma2
|
lib/python/Screens/RdsDisplay.py
|
18
|
8347
|
from enigma import iPlayableService, iRdsDecoder
from Screens.Screen import Screen
from Components.ActionMap import NumberActionMap
from Components.ServiceEventTracker import ServiceEventTracker
from Components.Pixmap import Pixmap
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN
from Tools.LoadPixmap import LoadPixmap
class RdsInfoDisplaySummary(Screen):
def __init__(self, session, parent):
Screen.__init__(self, session, parent = parent)
self["message"] = StaticText("")
self.parent.onText.append(self.onText)
def onText(self, message):
self["message"].text = message
if message and len(message):
self.show()
else:
self.hide()
class RdsInfoDisplay(Screen):
ALLOW_SUSPEND = True
def __init__(self, session):
Screen.__init__(self, session)
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evEnd: self.__serviceStopped,
iPlayableService.evUpdatedRadioText: self.RadioTextChanged,
iPlayableService.evUpdatedRtpText: self.RtpTextChanged,
iPlayableService.evUpdatedRassInteractivePicMask: self.RassInteractivePicMaskChanged,
})
self["RadioText"] = Label()
self["RtpText"] = Label()
self["RassLogo"] = Pixmap()
self.onLayoutFinish.append(self.hideWidgets)
self.rassInteractivePossible=False
self.onRassInteractivePossibilityChanged = [ ]
self.onText = [ ]
def createSummary(self):
return RdsInfoDisplaySummary
def hideWidgets(self):
for x in (self["RadioText"],self["RtpText"],self["RassLogo"]):
x.hide()
for x in self.onText:
x('')
def RadioTextChanged(self):
service = self.session.nav.getCurrentService()
decoder = service and service.rdsDecoder()
rdsText = decoder and decoder.getText(iRdsDecoder.RadioText)
if rdsText and len(rdsText):
self["RadioText"].setText(rdsText)
self["RadioText"].show()
else:
self["RadioText"].hide()
for x in self.onText:
x(rdsText)
def RtpTextChanged(self):
service = self.session.nav.getCurrentService()
decoder = service and service.rdsDecoder()
rtpText = decoder and decoder.getText(iRdsDecoder.RtpText)
if rtpText and len(rtpText):
self["RtpText"].setText(rtpText)
self["RtpText"].show()
else:
self["RtpText"].hide()
for x in self.onText:
x(rtpText)
def RassInteractivePicMaskChanged(self):
if not self.rassInteractivePossible:
service = self.session.nav.getCurrentService()
decoder = service and service.rdsDecoder()
mask = decoder and decoder.getRassInteractiveMask()
if mask[0] & 1: #rass interactive index page available
self["RassLogo"].show()
self.rassInteractivePossible = True
for x in self.onRassInteractivePossibilityChanged:
x(True)
def __serviceStopped(self):
self.hideWidgets()
if self.rassInteractivePossible:
self.rassInteractivePossible = False
for x in self.onRassInteractivePossibilityChanged:
x(False)
class RassInteractive(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self["actions"] = NumberActionMap( [ "NumberActions", "RassInteractiveActions" ],
{
"exit": self.close,
"0": lambda x : self.numPressed(0),
"1": lambda x : self.numPressed(1),
"2": lambda x : self.numPressed(2),
"3": lambda x : self.numPressed(3),
"4": lambda x : self.numPressed(4),
"5": lambda x : self.numPressed(5),
"6": lambda x : self.numPressed(6),
"7": lambda x : self.numPressed(7),
"8": lambda x : self.numPressed(8),
"9": lambda x : self.numPressed(9),
"nextPage": self.nextPage,
"prevPage": self.prevPage,
"nextSubPage": self.nextSubPage,
"prevSubPage": self.prevSubPage
})
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evUpdatedRassInteractivePicMask: self.recvRassInteractivePicMaskChanged
})
self["subpages_1"] = Pixmap()
self["subpages_2"] = Pixmap()
self["subpages_3"] = Pixmap()
self["subpages_4"] = Pixmap()
self["subpages_5"] = Pixmap()
self["subpages_6"] = Pixmap()
self["subpages_7"] = Pixmap()
self["subpages_8"] = Pixmap()
self["subpages_9"] = Pixmap()
self["Marker"] = Label(">")
self.subpage = {
1 : self["subpages_1"],
2 : self["subpages_2"],
3 : self["subpages_3"],
4 : self["subpages_4"],
5 : self["subpages_5"],
6 : self["subpages_6"],
7 : self["subpages_7"],
8 : self["subpages_8"],
9 : self["subpages_9"] }
self.subpage_png = {
1 : LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/rass_page1.png")),
2 : LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/rass_page2.png")),
3 : LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/rass_page3.png")),
4 : LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/rass_page4.png")) }
self.current_page=0;
self.current_subpage=0;
self.showRassPage(0,0)
self.onLayoutFinish.append(self.updateSubPagePixmaps)
def updateSubPagePixmaps(self):
service = self.session.nav.getCurrentService()
decoder = service and service.rdsDecoder()
if not decoder: # this should never happen
print "NO RDS DECODER in showRassPage"
else:
mask = decoder.getRassInteractiveMask()
page = 1
while page < 10:
subpage_cnt = self.countAvailSubpages(page, mask)
subpage = self.subpage[page]
if subpage_cnt > 0:
if subpage.instance:
png = self.subpage_png[subpage_cnt]
if png:
subpage.instance.setPixmap(png)
subpage.show()
else:
print "rass png missing"
else:
subpage.hide()
page += 1
def recvRassInteractivePicMaskChanged(self):
self.updateSubPagePixmaps()
def showRassPage(self, page, subpage):
service = self.session.nav.getCurrentService()
decoder = service and service.rdsDecoder()
if not decoder: # this should never happen
print "NO RDS DECODER in showRassPage"
else:
decoder.showRassInteractivePic(page, subpage)
page_diff = page - self.current_page
self.current_page = page
if page_diff:
current_pos = self["Marker"].getPosition()
y = current_pos[1]
y += page_diff * 25
self["Marker"].setPosition(current_pos[0],y)
def getMaskForPage(self, page, masks=None):
if not masks:
service = self.session.nav.getCurrentService()
decoder = service and service.rdsDecoder()
if not decoder: # this should never happen
print "NO RDS DECODER in getMaskForPage"
masks = decoder.getRassInteractiveMask()
if masks:
mask = masks[(page*4)/8]
if page % 2:
mask >>= 4
else:
mask &= 0xF
return mask
def countAvailSubpages(self, page, masks):
mask = self.getMaskForPage(page, masks)
cnt = 0
while mask:
if mask & 1:
cnt += 1
mask >>= 1
return cnt
def nextPage(self):
mask = 0
page = self.current_page
while mask == 0:
page += 1
if page > 9:
page = 0
mask = self.getMaskForPage(page)
self.numPressed(page)
def prevPage(self):
mask = 0
page = self.current_page
while mask == 0:
if page > 0:
page -= 1
else:
page = 9
mask = self.getMaskForPage(page)
self.numPressed(page)
def nextSubPage(self):
self.numPressed(self.current_page)
def prevSubPage(self):
num = self.current_page
mask = self.getMaskForPage(num)
cur_bit = 1 << self.current_subpage
tmp = cur_bit
while True:
if tmp == 1:
tmp = 8
else:
tmp >>= 1
if tmp == cur_bit: # no other subpage avail
return
if mask & tmp: # next subpage found
subpage = 0
while tmp > 1: # convert bit to subpage
subpage += 1
tmp >>= 1
self.current_subpage = subpage
self.showRassPage(num, subpage)
return
def numPressed(self, num):
mask = self.getMaskForPage(num)
if self.current_page == num:
self.skip = 0
cur_bit = 1 << self.current_subpage
tmp = cur_bit
else:
self.skip = 1
cur_bit = 16
tmp = 1
while True:
if not self.skip:
if tmp == 8 and cur_bit < 16:
tmp = 1
else:
tmp <<= 1
else:
self.skip = 0
if tmp == cur_bit: # no other subpage avail
return
if mask & tmp: # next subpage found
subpage = 0
while tmp > 1: # convert bit to subpage
subpage += 1
tmp >>= 1
self.current_subpage = subpage
self.showRassPage(num, subpage)
return
|
gpl-2.0
|
idekerlab/graph-services
|
services/nx_pagerank/service/test/cxmate_pb2.py
|
5
|
92271
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: cxmate.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='cxmate.proto',
package='proto',
syntax='proto3',
serialized_pb=_b('\n\x0c\x63xmate.proto\x12\x05proto\"\xc8\x08\n\x0eNetworkElement\x12\r\n\x05label\x18\x01 \x01(\t\x12%\n\tparameter\x18\x02 \x01(\x0b\x32\x10.proto.ParameterH\x00\x12\x1d\n\x05\x65rror\x18\x03 \x01(\x0b\x32\x0c.proto.ErrorH\x00\x12\x1b\n\x04node\x18\x04 \x01(\x0b\x32\x0b.proto.NodeH\x00\x12\x1b\n\x04\x65\x64ge\x18\x05 \x01(\x0b\x32\x0b.proto.EdgeH\x00\x12-\n\rnodeAttribute\x18\x06 \x01(\x0b\x32\x14.proto.NodeAttributeH\x00\x12-\n\redgeAttribute\x18\x07 \x01(\x0b\x32\x14.proto.EdgeAttributeH\x00\x12\x33\n\x10networkAttribute\x18\x08 \x01(\x0b\x32\x17.proto.NetworkAttributeH\x00\x12\x39\n\x13\x43\x61rtesianCoordinate\x18\t \x01(\x0b\x32\x1a.proto.CartesianCoordinateH\x00\x12!\n\x07\x63yGroup\x18\n \x01(\x0b\x32\x0e.proto.CyGroupH\x00\x12\x1f\n\x06\x63yView\x18\x0b \x01(\x0b\x32\r.proto.CyViewH\x00\x12\x33\n\x10\x63yVisualProperty\x18\x0c \x01(\x0b\x32\x17.proto.CyVisualPropertyH\x00\x12\x35\n\x11\x63yHiddenAttribute\x18\r \x01(\x0b\x32\x18.proto.CyHiddenAttributeH\x00\x12\x35\n\x11\x63yNetworkRelation\x18\x0e \x01(\x0b\x32\x18.proto.CyNetworkRelationH\x00\x12+\n\x0c\x63ySubNetwork\x18\x0f \x01(\x0b\x32\x13.proto.CySubNetworkH\x00\x12-\n\rcyTableColumn\x18\x10 \x01(\x0b\x32\x14.proto.CyTableColumnH\x00\x12\'\n\nndexStatus\x18\x11 \x01(\x0b\x32\x11.proto.NdexStatusH\x00\x12#\n\x08\x63itation\x18\x12 \x01(\x0b\x32\x0f.proto.CitationH\x00\x12-\n\rnodeCitations\x18\x13 \x01(\x0b\x32\x14.proto.NodeCitationsH\x00\x12-\n\redgeCitations\x18\x14 \x01(\x0b\x32\x14.proto.EdgeCitationsH\x00\x12!\n\x07support\x18\x15 \x01(\x0b\x32\x0e.proto.SupportH\x00\x12\x31\n\x0fnodeSupportance\x18\x16 \x01(\x0b\x32\x16.proto.NodeSupportanceH\x00\x12\x31\n\x0f\x65\x64geSupportance\x18\x17 \x01(\x0b\x32\x16.proto.EdgeSupportanceH\x00\x12+\n\x0c\x66unctionTerm\x18\x18 \x01(\x0b\x32\x13.proto.FunctionTermH\x00\x12)\n\x0breifiedEdge\x18\x19 \x01(\x0b\x32\x12.proto.ReifiedEdgeH\x00\x42\t\n\x07\x65lement\"\x90\x01\n\tParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06\x66ormat\x18\x02 \x01(\t\x12\x15\n\x0bstringValue\x18\x03 \x01(\tH\x00\x12\x16\n\x0c\x62ooleanValue\x18\x04 \x01(\x08H\x00\x12\x16\n\x0cintegerValue\x18\x05 \x01(\x03H\x00\x12\x15\n\x0bnumberValue\x18\x06 \x01(\x01H\x00\x42\x07\n\x05value\"D\n\x05\x45rror\x12\x0e\n\x06status\x18\x01 \x01(\x03\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x0c\n\x04link\x18\x04 \x01(\t\"?\n\x04Node\x12\x0f\n\x02id\x18\x01 \x01(\x03R\x03@id\x12\x0f\n\x04name\x18\x02 \x01(\tR\x01n\x12\x15\n\nrepresents\x18\x03 \x01(\tR\x01r\"Y\n\x04\x45\x64ge\x12\x0f\n\x02id\x18\x01 \x01(\x03R\x03@id\x12\x13\n\x08sourceId\x18\x02 \x01(\x03R\x01s\x12\x13\n\x08targetId\x18\x03 \x01(\x03R\x01t\x12\x16\n\x0binteraction\x18\x04 \x01(\tR\x01i\"l\n\rNodeAttribute\x12\x12\n\x06nodeId\x18\x01 \x01(\x03R\x02po\x12\x0f\n\x04name\x18\x02 \x01(\tR\x01n\x12\x10\n\x05value\x18\x03 \x01(\tR\x01v\x12\x0f\n\x04type\x18\x04 \x01(\tR\x01\x64\x12\x13\n\x08subnetId\x18\x05 \x01(\x03R\x01s\"l\n\rEdgeAttribute\x12\x12\n\x06\x65\x64geId\x18\x01 \x01(\x03R\x02po\x12\x0f\n\x04name\x18\x02 \x01(\tR\x01n\x12\x10\n\x05value\x18\x03 \x01(\tR\x01v\x12\x0f\n\x04type\x18\x04 \x01(\tR\x01\x64\x12\x13\n\x08subnetId\x18\x05 \x01(\x03R\x01s\"[\n\x10NetworkAttribute\x12\x0f\n\x04name\x18\x01 \x01(\tR\x01n\x12\x10\n\x05value\x18\x02 \x01(\tR\x01v\x12\x0f\n\x04type\x18\x03 \x01(\tR\x01\x64\x12\x13\n\x08subnetId\x18\x04 \x01(\x03R\x01s\"k\n\x13\x43\x61rtesianCoordinate\x12\x14\n\x06nodeId\x18\x01 \x01(\x03R\x04node\x12\x0c\n\x01x\x18\x02 \x01(\x01R\x01x\x12\x0c\n\x01y\x18\x03 \x01(\x01R\x01y\x12\x0c\n\x01z\x18\x04 \x01(\x01R\x01z\x12\x14\n\x06viewId\x18\x05 \x01(\x03R\x04view\"\xa5\x01\n\x07\x43yGroup\x12\x0f\n\x02id\x18\x01 \x01(\x03R\x03@id\x12\x12\n\x04view\x18\x02 \x01(\x03R\x04view\x12\x12\n\x04name\x18\x03 \x01(\tR\x04name\x12\x14\n\x05nodes\x18\x04 \x03(\x03R\x05nodes\x12$\n\rexternalEdges\x18\x05 \x03(\x03R\rexternal_edge\x12%\n\rinternalEdges\x18\x06 \x03(\x03R\x0einternal_edges\".\n\x06\x43yView\x12\x0f\n\x02id\x18\x01 \x01(\x03R\x03@id\x12\x13\n\x08subnetId\x18\x02 \x01(\x03R\x01s\"\xd5\x03\n\x10\x43yVisualProperty\x12\x1c\n\x05owner\x18\x01 \x01(\tR\rproperties_of\x12\x1b\n\x07ownerId\x18\x02 \x01(\x03R\napplies_to\x12\x12\n\x04view\x18\x04 \x01(\x03R\x04view\x12G\n\nproperties\x18\x05 \x03(\x0b\x32\'.proto.CyVisualProperty.PropertiesEntryR\nproperties\x12M\n\x0c\x64\x65pendencies\x18\x06 \x03(\x0b\x32).proto.CyVisualProperty.DependenciesEntryR\x0c\x64\x65pendencies\x12\x41\n\x08mappings\x18\x07 \x03(\x0b\x32%.proto.CyVisualProperty.MappingsEntryR\x08mappings\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x33\n\x11\x44\x65pendenciesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a/\n\rMappingsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c:\x02\x38\x01\"\\\n\x11\x43yHiddenAttribute\x12\x0f\n\x04name\x18\x01 \x01(\tR\x01n\x12\x10\n\x05value\x18\x02 \x01(\tR\x01v\x12\x0f\n\x04type\x18\x03 \x01(\tR\x01\x64\x12\x13\n\x08subnetId\x18\x04 \x01(\x03R\x01s\"i\n\x11\x43yNetworkRelation\x12\x13\n\x08parentId\x18\x01 \x01(\x03R\x01p\x12\x12\n\x07\x63hildId\x18\x02 \x01(\x03R\x01\x63\x12\x17\n\x0crelationship\x18\x03 \x01(\tR\x01r\x12\x12\n\x04name\x18\x04 \x01(\tR\x04name\"K\n\x0c\x43ySubNetwork\x12\x0f\n\x02id\x18\x01 \x01(\x03R\x03@id\x12\x14\n\x05\x65\x64ges\x18\x02 \x03(\x03R\x05\x65\x64ges\x12\x14\n\x05nodes\x18\x03 \x03(\x03R\x05nodes\"k\n\rCyTableColumn\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x12\n\x04type\x18\x02 \x01(\tR\x04type\x12\x14\n\x05owner\x18\x03 \x01(\tR\x05owner\x12\x1c\n\x08subnetId\x18\x04 \x01(\x03R\napplies_to\"\xf7\x01\n\nNdexStatus\x12\x16\n\x02id\x18\x01 \x01(\tR\nexternalId\x12\x1f\n\x08location\x18\x02 \x01(\tR\rndexServerURI\x12\x14\n\x05owner\x18\x03 \x01(\tR\x05owner\x12\"\n\x0c\x63reationTime\x18\x04 \x01(\tR\x0c\x63reationTime\x12\x1a\n\x08readOnly\x18\x05 \x01(\x08R\x08readOnly\x12\x1e\n\nvisibility\x18\x06 \x01(\tR\nvisibility\x12\x1c\n\tedgeCount\x18\x07 \x01(\x03R\tedgeCount\x12\x1c\n\tnodeCount\x18\x08 \x01(\x03R\tnodeCount\"\xf2\x01\n\x08\x43itation\x12\x0f\n\x02id\x18\x01 \x01(\tR\x03@id\x12\x17\n\x05title\x18\x02 \x01(\tR\x08\x64\x63:title\x12#\n\x0b\x64\x65scription\x18\x03 \x01(\tR\x0e\x64\x63:description\x12#\n\x0b\x63ontributor\x18\x04 \x01(\tR\x0e\x64\x63:contributor\x12!\n\nidentifier\x18\x05 \x01(\tR\rdc:identifier\x12\x15\n\x04type\x18\x06 \x01(\tR\x07\x64\x63:type\x12\x38\n\nattributes\x18\x07 \x03(\x0b\x32\x18.proto.CitationAttributeR\nattributes\"G\n\x11\x43itationAttribute\x12\x0f\n\x04name\x18\x01 \x01(\tR\x01n\x12\x10\n\x05value\x18\x02 \x01(\tR\x01v\x12\x0f\n\x04type\x18\x03 \x01(\tR\x01t\"@\n\rNodeCitations\x12\x18\n\x05nodes\x18\x01 \x03(\x03R\tcitations\x12\x15\n\tcitations\x18\x02 \x03(\x03R\x02po\"@\n\rEdgeCitations\x12\x18\n\x05\x65\x64ges\x18\x01 \x03(\x03R\tcitations\x12\x15\n\tcitations\x18\x02 \x03(\x03R\x02po\"\x85\x01\n\x07Support\x12\x0f\n\x02id\x18\x01 \x01(\x03R\x03@id\x12\x1c\n\ncitationId\x18\x02 \x01(\x03R\x08\x63itation\x12\x12\n\x04text\x18\x03 \x01(\tR\x04text\x12\x37\n\nattributes\x18\x04 \x03(\x0b\x32\x17.proto.SupportAttributeR\nattributes\"F\n\x10SupportAttribute\x12\x0f\n\x04name\x18\x01 \x01(\tR\x01n\x12\x10\n\x05value\x18\x02 \x01(\tR\x01v\x12\x0f\n\x04type\x18\x03 \x01(\tR\x01t\"@\n\x0fNodeSupportance\x12\x14\n\x05nodes\x18\x01 \x03(\x03R\x05nodes\x12\x17\n\x0bsupportance\x18\x02 \x03(\x03R\x02po\"@\n\x0f\x45\x64geSupportance\x12\x14\n\x05\x65\x64ges\x18\x01 \x03(\x03R\x05\x65\x64ges\x12\x17\n\x0bsupportance\x18\x02 \x03(\x03R\x02po\"P\n\x0c\x46unctionTerm\x12\x12\n\x06nodeId\x18\x01 \x01(\x03R\x02po\x12\x13\n\x08\x66unction\x18\x02 \x01(\tR\x01\x66\x12\x17\n\targuments\x18\x03 \x03(\tR\x04\x61rgs\"9\n\x0bReifiedEdge\x12\x14\n\x06\x65\x64geId\x18\x01 \x01(\x03R\x04\x65\x64ge\x12\x14\n\x06nodeId\x18\x02 \x01(\x03R\x04node2U\n\rcxMateService\x12\x44\n\x0eStreamNetworks\x12\x15.proto.NetworkElement\x1a\x15.proto.NetworkElement\"\x00(\x01\x30\x01\x62\x06proto3')
)
_NETWORKELEMENT = _descriptor.Descriptor(
name='NetworkElement',
full_name='proto.NetworkElement',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='label', full_name='proto.NetworkElement.label', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='parameter', full_name='proto.NetworkElement.parameter', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error', full_name='proto.NetworkElement.error', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='node', full_name='proto.NetworkElement.node', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='edge', full_name='proto.NetworkElement.edge', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='nodeAttribute', full_name='proto.NetworkElement.nodeAttribute', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='edgeAttribute', full_name='proto.NetworkElement.edgeAttribute', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='networkAttribute', full_name='proto.NetworkElement.networkAttribute', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='CartesianCoordinate', full_name='proto.NetworkElement.CartesianCoordinate', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cyGroup', full_name='proto.NetworkElement.cyGroup', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cyView', full_name='proto.NetworkElement.cyView', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cyVisualProperty', full_name='proto.NetworkElement.cyVisualProperty', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cyHiddenAttribute', full_name='proto.NetworkElement.cyHiddenAttribute', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cyNetworkRelation', full_name='proto.NetworkElement.cyNetworkRelation', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cySubNetwork', full_name='proto.NetworkElement.cySubNetwork', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cyTableColumn', full_name='proto.NetworkElement.cyTableColumn', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ndexStatus', full_name='proto.NetworkElement.ndexStatus', index=16,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='citation', full_name='proto.NetworkElement.citation', index=17,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='nodeCitations', full_name='proto.NetworkElement.nodeCitations', index=18,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='edgeCitations', full_name='proto.NetworkElement.edgeCitations', index=19,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='support', full_name='proto.NetworkElement.support', index=20,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='nodeSupportance', full_name='proto.NetworkElement.nodeSupportance', index=21,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='edgeSupportance', full_name='proto.NetworkElement.edgeSupportance', index=22,
number=23, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='functionTerm', full_name='proto.NetworkElement.functionTerm', index=23,
number=24, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reifiedEdge', full_name='proto.NetworkElement.reifiedEdge', index=24,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='element', full_name='proto.NetworkElement.element',
index=0, containing_type=None, fields=[]),
],
serialized_start=24,
serialized_end=1120,
)
_PARAMETER = _descriptor.Descriptor(
name='Parameter',
full_name='proto.Parameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='proto.Parameter.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='format', full_name='proto.Parameter.format', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stringValue', full_name='proto.Parameter.stringValue', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='booleanValue', full_name='proto.Parameter.booleanValue', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='integerValue', full_name='proto.Parameter.integerValue', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='numberValue', full_name='proto.Parameter.numberValue', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='value', full_name='proto.Parameter.value',
index=0, containing_type=None, fields=[]),
],
serialized_start=1123,
serialized_end=1267,
)
_ERROR = _descriptor.Descriptor(
name='Error',
full_name='proto.Error',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='proto.Error.status', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='proto.Error.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='proto.Error.message', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='link', full_name='proto.Error.link', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1269,
serialized_end=1337,
)
_NODE = _descriptor.Descriptor(
name='Node',
full_name='proto.Node',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='proto.Node.id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='@id'),
_descriptor.FieldDescriptor(
name='name', full_name='proto.Node.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='n'),
_descriptor.FieldDescriptor(
name='represents', full_name='proto.Node.represents', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='r'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1339,
serialized_end=1402,
)
_EDGE = _descriptor.Descriptor(
name='Edge',
full_name='proto.Edge',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='proto.Edge.id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='@id'),
_descriptor.FieldDescriptor(
name='sourceId', full_name='proto.Edge.sourceId', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='s'),
_descriptor.FieldDescriptor(
name='targetId', full_name='proto.Edge.targetId', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='t'),
_descriptor.FieldDescriptor(
name='interaction', full_name='proto.Edge.interaction', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='i'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1404,
serialized_end=1493,
)
_NODEATTRIBUTE = _descriptor.Descriptor(
name='NodeAttribute',
full_name='proto.NodeAttribute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='nodeId', full_name='proto.NodeAttribute.nodeId', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='po'),
_descriptor.FieldDescriptor(
name='name', full_name='proto.NodeAttribute.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='n'),
_descriptor.FieldDescriptor(
name='value', full_name='proto.NodeAttribute.value', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='v'),
_descriptor.FieldDescriptor(
name='type', full_name='proto.NodeAttribute.type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='d'),
_descriptor.FieldDescriptor(
name='subnetId', full_name='proto.NodeAttribute.subnetId', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='s'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1495,
serialized_end=1603,
)
_EDGEATTRIBUTE = _descriptor.Descriptor(
name='EdgeAttribute',
full_name='proto.EdgeAttribute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='edgeId', full_name='proto.EdgeAttribute.edgeId', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='po'),
_descriptor.FieldDescriptor(
name='name', full_name='proto.EdgeAttribute.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='n'),
_descriptor.FieldDescriptor(
name='value', full_name='proto.EdgeAttribute.value', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='v'),
_descriptor.FieldDescriptor(
name='type', full_name='proto.EdgeAttribute.type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='d'),
_descriptor.FieldDescriptor(
name='subnetId', full_name='proto.EdgeAttribute.subnetId', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='s'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1605,
serialized_end=1713,
)
_NETWORKATTRIBUTE = _descriptor.Descriptor(
name='NetworkAttribute',
full_name='proto.NetworkAttribute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='proto.NetworkAttribute.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='n'),
_descriptor.FieldDescriptor(
name='value', full_name='proto.NetworkAttribute.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='v'),
_descriptor.FieldDescriptor(
name='type', full_name='proto.NetworkAttribute.type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='d'),
_descriptor.FieldDescriptor(
name='subnetId', full_name='proto.NetworkAttribute.subnetId', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='s'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1715,
serialized_end=1806,
)
_CARTESIANCOORDINATE = _descriptor.Descriptor(
name='CartesianCoordinate',
full_name='proto.CartesianCoordinate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='nodeId', full_name='proto.CartesianCoordinate.nodeId', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='node'),
_descriptor.FieldDescriptor(
name='x', full_name='proto.CartesianCoordinate.x', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='x'),
_descriptor.FieldDescriptor(
name='y', full_name='proto.CartesianCoordinate.y', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='y'),
_descriptor.FieldDescriptor(
name='z', full_name='proto.CartesianCoordinate.z', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='z'),
_descriptor.FieldDescriptor(
name='viewId', full_name='proto.CartesianCoordinate.viewId', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='view'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1808,
serialized_end=1915,
)
_CYGROUP = _descriptor.Descriptor(
name='CyGroup',
full_name='proto.CyGroup',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='proto.CyGroup.id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='@id'),
_descriptor.FieldDescriptor(
name='view', full_name='proto.CyGroup.view', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='view'),
_descriptor.FieldDescriptor(
name='name', full_name='proto.CyGroup.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='name'),
_descriptor.FieldDescriptor(
name='nodes', full_name='proto.CyGroup.nodes', index=3,
number=4, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='nodes'),
_descriptor.FieldDescriptor(
name='externalEdges', full_name='proto.CyGroup.externalEdges', index=4,
number=5, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='external_edge'),
_descriptor.FieldDescriptor(
name='internalEdges', full_name='proto.CyGroup.internalEdges', index=5,
number=6, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='internal_edges'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1918,
serialized_end=2083,
)
_CYVIEW = _descriptor.Descriptor(
name='CyView',
full_name='proto.CyView',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='proto.CyView.id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='@id'),
_descriptor.FieldDescriptor(
name='subnetId', full_name='proto.CyView.subnetId', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='s'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2085,
serialized_end=2131,
)
_CYVISUALPROPERTY_PROPERTIESENTRY = _descriptor.Descriptor(
name='PropertiesEntry',
full_name='proto.CyVisualProperty.PropertiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='proto.CyVisualProperty.PropertiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='proto.CyVisualProperty.PropertiesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2452,
serialized_end=2501,
)
_CYVISUALPROPERTY_DEPENDENCIESENTRY = _descriptor.Descriptor(
name='DependenciesEntry',
full_name='proto.CyVisualProperty.DependenciesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='proto.CyVisualProperty.DependenciesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='proto.CyVisualProperty.DependenciesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2503,
serialized_end=2554,
)
_CYVISUALPROPERTY_MAPPINGSENTRY = _descriptor.Descriptor(
name='MappingsEntry',
full_name='proto.CyVisualProperty.MappingsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='proto.CyVisualProperty.MappingsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='proto.CyVisualProperty.MappingsEntry.value', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2556,
serialized_end=2603,
)
_CYVISUALPROPERTY = _descriptor.Descriptor(
name='CyVisualProperty',
full_name='proto.CyVisualProperty',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='owner', full_name='proto.CyVisualProperty.owner', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='properties_of'),
_descriptor.FieldDescriptor(
name='ownerId', full_name='proto.CyVisualProperty.ownerId', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='applies_to'),
_descriptor.FieldDescriptor(
name='view', full_name='proto.CyVisualProperty.view', index=2,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='view'),
_descriptor.FieldDescriptor(
name='properties', full_name='proto.CyVisualProperty.properties', index=3,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='properties'),
_descriptor.FieldDescriptor(
name='dependencies', full_name='proto.CyVisualProperty.dependencies', index=4,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='dependencies'),
_descriptor.FieldDescriptor(
name='mappings', full_name='proto.CyVisualProperty.mappings', index=5,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='mappings'),
],
extensions=[
],
nested_types=[_CYVISUALPROPERTY_PROPERTIESENTRY, _CYVISUALPROPERTY_DEPENDENCIESENTRY, _CYVISUALPROPERTY_MAPPINGSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2134,
serialized_end=2603,
)
_CYHIDDENATTRIBUTE = _descriptor.Descriptor(
name='CyHiddenAttribute',
full_name='proto.CyHiddenAttribute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='proto.CyHiddenAttribute.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='n'),
_descriptor.FieldDescriptor(
name='value', full_name='proto.CyHiddenAttribute.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='v'),
_descriptor.FieldDescriptor(
name='type', full_name='proto.CyHiddenAttribute.type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='d'),
_descriptor.FieldDescriptor(
name='subnetId', full_name='proto.CyHiddenAttribute.subnetId', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='s'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2605,
serialized_end=2697,
)
_CYNETWORKRELATION = _descriptor.Descriptor(
name='CyNetworkRelation',
full_name='proto.CyNetworkRelation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parentId', full_name='proto.CyNetworkRelation.parentId', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='p'),
_descriptor.FieldDescriptor(
name='childId', full_name='proto.CyNetworkRelation.childId', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='c'),
_descriptor.FieldDescriptor(
name='relationship', full_name='proto.CyNetworkRelation.relationship', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='r'),
_descriptor.FieldDescriptor(
name='name', full_name='proto.CyNetworkRelation.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='name'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2699,
serialized_end=2804,
)
_CYSUBNETWORK = _descriptor.Descriptor(
name='CySubNetwork',
full_name='proto.CySubNetwork',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='proto.CySubNetwork.id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='@id'),
_descriptor.FieldDescriptor(
name='edges', full_name='proto.CySubNetwork.edges', index=1,
number=2, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='edges'),
_descriptor.FieldDescriptor(
name='nodes', full_name='proto.CySubNetwork.nodes', index=2,
number=3, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='nodes'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2806,
serialized_end=2881,
)
_CYTABLECOLUMN = _descriptor.Descriptor(
name='CyTableColumn',
full_name='proto.CyTableColumn',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='proto.CyTableColumn.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='name'),
_descriptor.FieldDescriptor(
name='type', full_name='proto.CyTableColumn.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='type'),
_descriptor.FieldDescriptor(
name='owner', full_name='proto.CyTableColumn.owner', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='owner'),
_descriptor.FieldDescriptor(
name='subnetId', full_name='proto.CyTableColumn.subnetId', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='applies_to'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2883,
serialized_end=2990,
)
_NDEXSTATUS = _descriptor.Descriptor(
name='NdexStatus',
full_name='proto.NdexStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='proto.NdexStatus.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='externalId'),
_descriptor.FieldDescriptor(
name='location', full_name='proto.NdexStatus.location', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='ndexServerURI'),
_descriptor.FieldDescriptor(
name='owner', full_name='proto.NdexStatus.owner', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='owner'),
_descriptor.FieldDescriptor(
name='creationTime', full_name='proto.NdexStatus.creationTime', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='creationTime'),
_descriptor.FieldDescriptor(
name='readOnly', full_name='proto.NdexStatus.readOnly', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='readOnly'),
_descriptor.FieldDescriptor(
name='visibility', full_name='proto.NdexStatus.visibility', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='visibility'),
_descriptor.FieldDescriptor(
name='edgeCount', full_name='proto.NdexStatus.edgeCount', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='edgeCount'),
_descriptor.FieldDescriptor(
name='nodeCount', full_name='proto.NdexStatus.nodeCount', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='nodeCount'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2993,
serialized_end=3240,
)
_CITATION = _descriptor.Descriptor(
name='Citation',
full_name='proto.Citation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='proto.Citation.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='@id'),
_descriptor.FieldDescriptor(
name='title', full_name='proto.Citation.title', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='dc:title'),
_descriptor.FieldDescriptor(
name='description', full_name='proto.Citation.description', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='dc:description'),
_descriptor.FieldDescriptor(
name='contributor', full_name='proto.Citation.contributor', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='dc:contributor'),
_descriptor.FieldDescriptor(
name='identifier', full_name='proto.Citation.identifier', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='dc:identifier'),
_descriptor.FieldDescriptor(
name='type', full_name='proto.Citation.type', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='dc:type'),
_descriptor.FieldDescriptor(
name='attributes', full_name='proto.Citation.attributes', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='attributes'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3243,
serialized_end=3485,
)
_CITATIONATTRIBUTE = _descriptor.Descriptor(
name='CitationAttribute',
full_name='proto.CitationAttribute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='proto.CitationAttribute.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='n'),
_descriptor.FieldDescriptor(
name='value', full_name='proto.CitationAttribute.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='v'),
_descriptor.FieldDescriptor(
name='type', full_name='proto.CitationAttribute.type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='t'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3487,
serialized_end=3558,
)
_NODECITATIONS = _descriptor.Descriptor(
name='NodeCitations',
full_name='proto.NodeCitations',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='nodes', full_name='proto.NodeCitations.nodes', index=0,
number=1, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='citations'),
_descriptor.FieldDescriptor(
name='citations', full_name='proto.NodeCitations.citations', index=1,
number=2, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='po'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3560,
serialized_end=3624,
)
_EDGECITATIONS = _descriptor.Descriptor(
name='EdgeCitations',
full_name='proto.EdgeCitations',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='edges', full_name='proto.EdgeCitations.edges', index=0,
number=1, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='citations'),
_descriptor.FieldDescriptor(
name='citations', full_name='proto.EdgeCitations.citations', index=1,
number=2, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='po'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3626,
serialized_end=3690,
)
_SUPPORT = _descriptor.Descriptor(
name='Support',
full_name='proto.Support',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='proto.Support.id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='@id'),
_descriptor.FieldDescriptor(
name='citationId', full_name='proto.Support.citationId', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='citation'),
_descriptor.FieldDescriptor(
name='text', full_name='proto.Support.text', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='text'),
_descriptor.FieldDescriptor(
name='attributes', full_name='proto.Support.attributes', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='attributes'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3693,
serialized_end=3826,
)
_SUPPORTATTRIBUTE = _descriptor.Descriptor(
name='SupportAttribute',
full_name='proto.SupportAttribute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='proto.SupportAttribute.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='n'),
_descriptor.FieldDescriptor(
name='value', full_name='proto.SupportAttribute.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='v'),
_descriptor.FieldDescriptor(
name='type', full_name='proto.SupportAttribute.type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='t'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3828,
serialized_end=3898,
)
_NODESUPPORTANCE = _descriptor.Descriptor(
name='NodeSupportance',
full_name='proto.NodeSupportance',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='nodes', full_name='proto.NodeSupportance.nodes', index=0,
number=1, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='nodes'),
_descriptor.FieldDescriptor(
name='supportance', full_name='proto.NodeSupportance.supportance', index=1,
number=2, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='po'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3900,
serialized_end=3964,
)
_EDGESUPPORTANCE = _descriptor.Descriptor(
name='EdgeSupportance',
full_name='proto.EdgeSupportance',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='edges', full_name='proto.EdgeSupportance.edges', index=0,
number=1, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='edges'),
_descriptor.FieldDescriptor(
name='supportance', full_name='proto.EdgeSupportance.supportance', index=1,
number=2, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='po'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3966,
serialized_end=4030,
)
_FUNCTIONTERM = _descriptor.Descriptor(
name='FunctionTerm',
full_name='proto.FunctionTerm',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='nodeId', full_name='proto.FunctionTerm.nodeId', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='po'),
_descriptor.FieldDescriptor(
name='function', full_name='proto.FunctionTerm.function', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='f'),
_descriptor.FieldDescriptor(
name='arguments', full_name='proto.FunctionTerm.arguments', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='args'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4032,
serialized_end=4112,
)
_REIFIEDEDGE = _descriptor.Descriptor(
name='ReifiedEdge',
full_name='proto.ReifiedEdge',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='edgeId', full_name='proto.ReifiedEdge.edgeId', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='edge'),
_descriptor.FieldDescriptor(
name='nodeId', full_name='proto.ReifiedEdge.nodeId', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, json_name='node'),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4114,
serialized_end=4171,
)
_NETWORKELEMENT.fields_by_name['parameter'].message_type = _PARAMETER
_NETWORKELEMENT.fields_by_name['error'].message_type = _ERROR
_NETWORKELEMENT.fields_by_name['node'].message_type = _NODE
_NETWORKELEMENT.fields_by_name['edge'].message_type = _EDGE
_NETWORKELEMENT.fields_by_name['nodeAttribute'].message_type = _NODEATTRIBUTE
_NETWORKELEMENT.fields_by_name['edgeAttribute'].message_type = _EDGEATTRIBUTE
_NETWORKELEMENT.fields_by_name['networkAttribute'].message_type = _NETWORKATTRIBUTE
_NETWORKELEMENT.fields_by_name['CartesianCoordinate'].message_type = _CARTESIANCOORDINATE
_NETWORKELEMENT.fields_by_name['cyGroup'].message_type = _CYGROUP
_NETWORKELEMENT.fields_by_name['cyView'].message_type = _CYVIEW
_NETWORKELEMENT.fields_by_name['cyVisualProperty'].message_type = _CYVISUALPROPERTY
_NETWORKELEMENT.fields_by_name['cyHiddenAttribute'].message_type = _CYHIDDENATTRIBUTE
_NETWORKELEMENT.fields_by_name['cyNetworkRelation'].message_type = _CYNETWORKRELATION
_NETWORKELEMENT.fields_by_name['cySubNetwork'].message_type = _CYSUBNETWORK
_NETWORKELEMENT.fields_by_name['cyTableColumn'].message_type = _CYTABLECOLUMN
_NETWORKELEMENT.fields_by_name['ndexStatus'].message_type = _NDEXSTATUS
_NETWORKELEMENT.fields_by_name['citation'].message_type = _CITATION
_NETWORKELEMENT.fields_by_name['nodeCitations'].message_type = _NODECITATIONS
_NETWORKELEMENT.fields_by_name['edgeCitations'].message_type = _EDGECITATIONS
_NETWORKELEMENT.fields_by_name['support'].message_type = _SUPPORT
_NETWORKELEMENT.fields_by_name['nodeSupportance'].message_type = _NODESUPPORTANCE
_NETWORKELEMENT.fields_by_name['edgeSupportance'].message_type = _EDGESUPPORTANCE
_NETWORKELEMENT.fields_by_name['functionTerm'].message_type = _FUNCTIONTERM
_NETWORKELEMENT.fields_by_name['reifiedEdge'].message_type = _REIFIEDEDGE
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['parameter'])
_NETWORKELEMENT.fields_by_name['parameter'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['error'])
_NETWORKELEMENT.fields_by_name['error'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['node'])
_NETWORKELEMENT.fields_by_name['node'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['edge'])
_NETWORKELEMENT.fields_by_name['edge'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['nodeAttribute'])
_NETWORKELEMENT.fields_by_name['nodeAttribute'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['edgeAttribute'])
_NETWORKELEMENT.fields_by_name['edgeAttribute'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['networkAttribute'])
_NETWORKELEMENT.fields_by_name['networkAttribute'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['CartesianCoordinate'])
_NETWORKELEMENT.fields_by_name['CartesianCoordinate'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['cyGroup'])
_NETWORKELEMENT.fields_by_name['cyGroup'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['cyView'])
_NETWORKELEMENT.fields_by_name['cyView'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['cyVisualProperty'])
_NETWORKELEMENT.fields_by_name['cyVisualProperty'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['cyHiddenAttribute'])
_NETWORKELEMENT.fields_by_name['cyHiddenAttribute'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['cyNetworkRelation'])
_NETWORKELEMENT.fields_by_name['cyNetworkRelation'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['cySubNetwork'])
_NETWORKELEMENT.fields_by_name['cySubNetwork'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['cyTableColumn'])
_NETWORKELEMENT.fields_by_name['cyTableColumn'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['ndexStatus'])
_NETWORKELEMENT.fields_by_name['ndexStatus'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['citation'])
_NETWORKELEMENT.fields_by_name['citation'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['nodeCitations'])
_NETWORKELEMENT.fields_by_name['nodeCitations'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['edgeCitations'])
_NETWORKELEMENT.fields_by_name['edgeCitations'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['support'])
_NETWORKELEMENT.fields_by_name['support'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['nodeSupportance'])
_NETWORKELEMENT.fields_by_name['nodeSupportance'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['edgeSupportance'])
_NETWORKELEMENT.fields_by_name['edgeSupportance'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['functionTerm'])
_NETWORKELEMENT.fields_by_name['functionTerm'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_NETWORKELEMENT.oneofs_by_name['element'].fields.append(
_NETWORKELEMENT.fields_by_name['reifiedEdge'])
_NETWORKELEMENT.fields_by_name['reifiedEdge'].containing_oneof = _NETWORKELEMENT.oneofs_by_name['element']
_PARAMETER.oneofs_by_name['value'].fields.append(
_PARAMETER.fields_by_name['stringValue'])
_PARAMETER.fields_by_name['stringValue'].containing_oneof = _PARAMETER.oneofs_by_name['value']
_PARAMETER.oneofs_by_name['value'].fields.append(
_PARAMETER.fields_by_name['booleanValue'])
_PARAMETER.fields_by_name['booleanValue'].containing_oneof = _PARAMETER.oneofs_by_name['value']
_PARAMETER.oneofs_by_name['value'].fields.append(
_PARAMETER.fields_by_name['integerValue'])
_PARAMETER.fields_by_name['integerValue'].containing_oneof = _PARAMETER.oneofs_by_name['value']
_PARAMETER.oneofs_by_name['value'].fields.append(
_PARAMETER.fields_by_name['numberValue'])
_PARAMETER.fields_by_name['numberValue'].containing_oneof = _PARAMETER.oneofs_by_name['value']
_CYVISUALPROPERTY_PROPERTIESENTRY.containing_type = _CYVISUALPROPERTY
_CYVISUALPROPERTY_DEPENDENCIESENTRY.containing_type = _CYVISUALPROPERTY
_CYVISUALPROPERTY_MAPPINGSENTRY.containing_type = _CYVISUALPROPERTY
_CYVISUALPROPERTY.fields_by_name['properties'].message_type = _CYVISUALPROPERTY_PROPERTIESENTRY
_CYVISUALPROPERTY.fields_by_name['dependencies'].message_type = _CYVISUALPROPERTY_DEPENDENCIESENTRY
_CYVISUALPROPERTY.fields_by_name['mappings'].message_type = _CYVISUALPROPERTY_MAPPINGSENTRY
_CITATION.fields_by_name['attributes'].message_type = _CITATIONATTRIBUTE
_SUPPORT.fields_by_name['attributes'].message_type = _SUPPORTATTRIBUTE
DESCRIPTOR.message_types_by_name['NetworkElement'] = _NETWORKELEMENT
DESCRIPTOR.message_types_by_name['Parameter'] = _PARAMETER
DESCRIPTOR.message_types_by_name['Error'] = _ERROR
DESCRIPTOR.message_types_by_name['Node'] = _NODE
DESCRIPTOR.message_types_by_name['Edge'] = _EDGE
DESCRIPTOR.message_types_by_name['NodeAttribute'] = _NODEATTRIBUTE
DESCRIPTOR.message_types_by_name['EdgeAttribute'] = _EDGEATTRIBUTE
DESCRIPTOR.message_types_by_name['NetworkAttribute'] = _NETWORKATTRIBUTE
DESCRIPTOR.message_types_by_name['CartesianCoordinate'] = _CARTESIANCOORDINATE
DESCRIPTOR.message_types_by_name['CyGroup'] = _CYGROUP
DESCRIPTOR.message_types_by_name['CyView'] = _CYVIEW
DESCRIPTOR.message_types_by_name['CyVisualProperty'] = _CYVISUALPROPERTY
DESCRIPTOR.message_types_by_name['CyHiddenAttribute'] = _CYHIDDENATTRIBUTE
DESCRIPTOR.message_types_by_name['CyNetworkRelation'] = _CYNETWORKRELATION
DESCRIPTOR.message_types_by_name['CySubNetwork'] = _CYSUBNETWORK
DESCRIPTOR.message_types_by_name['CyTableColumn'] = _CYTABLECOLUMN
DESCRIPTOR.message_types_by_name['NdexStatus'] = _NDEXSTATUS
DESCRIPTOR.message_types_by_name['Citation'] = _CITATION
DESCRIPTOR.message_types_by_name['CitationAttribute'] = _CITATIONATTRIBUTE
DESCRIPTOR.message_types_by_name['NodeCitations'] = _NODECITATIONS
DESCRIPTOR.message_types_by_name['EdgeCitations'] = _EDGECITATIONS
DESCRIPTOR.message_types_by_name['Support'] = _SUPPORT
DESCRIPTOR.message_types_by_name['SupportAttribute'] = _SUPPORTATTRIBUTE
DESCRIPTOR.message_types_by_name['NodeSupportance'] = _NODESUPPORTANCE
DESCRIPTOR.message_types_by_name['EdgeSupportance'] = _EDGESUPPORTANCE
DESCRIPTOR.message_types_by_name['FunctionTerm'] = _FUNCTIONTERM
DESCRIPTOR.message_types_by_name['ReifiedEdge'] = _REIFIEDEDGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NetworkElement = _reflection.GeneratedProtocolMessageType('NetworkElement', (_message.Message,), dict(
DESCRIPTOR = _NETWORKELEMENT,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.NetworkElement)
))
_sym_db.RegisterMessage(NetworkElement)
Parameter = _reflection.GeneratedProtocolMessageType('Parameter', (_message.Message,), dict(
DESCRIPTOR = _PARAMETER,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.Parameter)
))
_sym_db.RegisterMessage(Parameter)
Error = _reflection.GeneratedProtocolMessageType('Error', (_message.Message,), dict(
DESCRIPTOR = _ERROR,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.Error)
))
_sym_db.RegisterMessage(Error)
Node = _reflection.GeneratedProtocolMessageType('Node', (_message.Message,), dict(
DESCRIPTOR = _NODE,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.Node)
))
_sym_db.RegisterMessage(Node)
Edge = _reflection.GeneratedProtocolMessageType('Edge', (_message.Message,), dict(
DESCRIPTOR = _EDGE,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.Edge)
))
_sym_db.RegisterMessage(Edge)
NodeAttribute = _reflection.GeneratedProtocolMessageType('NodeAttribute', (_message.Message,), dict(
DESCRIPTOR = _NODEATTRIBUTE,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.NodeAttribute)
))
_sym_db.RegisterMessage(NodeAttribute)
EdgeAttribute = _reflection.GeneratedProtocolMessageType('EdgeAttribute', (_message.Message,), dict(
DESCRIPTOR = _EDGEATTRIBUTE,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.EdgeAttribute)
))
_sym_db.RegisterMessage(EdgeAttribute)
NetworkAttribute = _reflection.GeneratedProtocolMessageType('NetworkAttribute', (_message.Message,), dict(
DESCRIPTOR = _NETWORKATTRIBUTE,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.NetworkAttribute)
))
_sym_db.RegisterMessage(NetworkAttribute)
CartesianCoordinate = _reflection.GeneratedProtocolMessageType('CartesianCoordinate', (_message.Message,), dict(
DESCRIPTOR = _CARTESIANCOORDINATE,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.CartesianCoordinate)
))
_sym_db.RegisterMessage(CartesianCoordinate)
CyGroup = _reflection.GeneratedProtocolMessageType('CyGroup', (_message.Message,), dict(
DESCRIPTOR = _CYGROUP,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.CyGroup)
))
_sym_db.RegisterMessage(CyGroup)
CyView = _reflection.GeneratedProtocolMessageType('CyView', (_message.Message,), dict(
DESCRIPTOR = _CYVIEW,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.CyView)
))
_sym_db.RegisterMessage(CyView)
CyVisualProperty = _reflection.GeneratedProtocolMessageType('CyVisualProperty', (_message.Message,), dict(
PropertiesEntry = _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), dict(
DESCRIPTOR = _CYVISUALPROPERTY_PROPERTIESENTRY,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.CyVisualProperty.PropertiesEntry)
))
,
DependenciesEntry = _reflection.GeneratedProtocolMessageType('DependenciesEntry', (_message.Message,), dict(
DESCRIPTOR = _CYVISUALPROPERTY_DEPENDENCIESENTRY,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.CyVisualProperty.DependenciesEntry)
))
,
MappingsEntry = _reflection.GeneratedProtocolMessageType('MappingsEntry', (_message.Message,), dict(
DESCRIPTOR = _CYVISUALPROPERTY_MAPPINGSENTRY,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.CyVisualProperty.MappingsEntry)
))
,
DESCRIPTOR = _CYVISUALPROPERTY,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.CyVisualProperty)
))
_sym_db.RegisterMessage(CyVisualProperty)
_sym_db.RegisterMessage(CyVisualProperty.PropertiesEntry)
_sym_db.RegisterMessage(CyVisualProperty.DependenciesEntry)
_sym_db.RegisterMessage(CyVisualProperty.MappingsEntry)
CyHiddenAttribute = _reflection.GeneratedProtocolMessageType('CyHiddenAttribute', (_message.Message,), dict(
DESCRIPTOR = _CYHIDDENATTRIBUTE,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.CyHiddenAttribute)
))
_sym_db.RegisterMessage(CyHiddenAttribute)
CyNetworkRelation = _reflection.GeneratedProtocolMessageType('CyNetworkRelation', (_message.Message,), dict(
DESCRIPTOR = _CYNETWORKRELATION,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.CyNetworkRelation)
))
_sym_db.RegisterMessage(CyNetworkRelation)
CySubNetwork = _reflection.GeneratedProtocolMessageType('CySubNetwork', (_message.Message,), dict(
DESCRIPTOR = _CYSUBNETWORK,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.CySubNetwork)
))
_sym_db.RegisterMessage(CySubNetwork)
CyTableColumn = _reflection.GeneratedProtocolMessageType('CyTableColumn', (_message.Message,), dict(
DESCRIPTOR = _CYTABLECOLUMN,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.CyTableColumn)
))
_sym_db.RegisterMessage(CyTableColumn)
NdexStatus = _reflection.GeneratedProtocolMessageType('NdexStatus', (_message.Message,), dict(
DESCRIPTOR = _NDEXSTATUS,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.NdexStatus)
))
_sym_db.RegisterMessage(NdexStatus)
Citation = _reflection.GeneratedProtocolMessageType('Citation', (_message.Message,), dict(
DESCRIPTOR = _CITATION,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.Citation)
))
_sym_db.RegisterMessage(Citation)
CitationAttribute = _reflection.GeneratedProtocolMessageType('CitationAttribute', (_message.Message,), dict(
DESCRIPTOR = _CITATIONATTRIBUTE,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.CitationAttribute)
))
_sym_db.RegisterMessage(CitationAttribute)
NodeCitations = _reflection.GeneratedProtocolMessageType('NodeCitations', (_message.Message,), dict(
DESCRIPTOR = _NODECITATIONS,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.NodeCitations)
))
_sym_db.RegisterMessage(NodeCitations)
EdgeCitations = _reflection.GeneratedProtocolMessageType('EdgeCitations', (_message.Message,), dict(
DESCRIPTOR = _EDGECITATIONS,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.EdgeCitations)
))
_sym_db.RegisterMessage(EdgeCitations)
Support = _reflection.GeneratedProtocolMessageType('Support', (_message.Message,), dict(
DESCRIPTOR = _SUPPORT,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.Support)
))
_sym_db.RegisterMessage(Support)
SupportAttribute = _reflection.GeneratedProtocolMessageType('SupportAttribute', (_message.Message,), dict(
DESCRIPTOR = _SUPPORTATTRIBUTE,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.SupportAttribute)
))
_sym_db.RegisterMessage(SupportAttribute)
NodeSupportance = _reflection.GeneratedProtocolMessageType('NodeSupportance', (_message.Message,), dict(
DESCRIPTOR = _NODESUPPORTANCE,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.NodeSupportance)
))
_sym_db.RegisterMessage(NodeSupportance)
EdgeSupportance = _reflection.GeneratedProtocolMessageType('EdgeSupportance', (_message.Message,), dict(
DESCRIPTOR = _EDGESUPPORTANCE,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.EdgeSupportance)
))
_sym_db.RegisterMessage(EdgeSupportance)
FunctionTerm = _reflection.GeneratedProtocolMessageType('FunctionTerm', (_message.Message,), dict(
DESCRIPTOR = _FUNCTIONTERM,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.FunctionTerm)
))
_sym_db.RegisterMessage(FunctionTerm)
ReifiedEdge = _reflection.GeneratedProtocolMessageType('ReifiedEdge', (_message.Message,), dict(
DESCRIPTOR = _REIFIEDEDGE,
__module__ = 'cxmate_pb2'
# @@protoc_insertion_point(class_scope:proto.ReifiedEdge)
))
_sym_db.RegisterMessage(ReifiedEdge)
_CYVISUALPROPERTY_PROPERTIESENTRY.has_options = True
_CYVISUALPROPERTY_PROPERTIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_CYVISUALPROPERTY_DEPENDENCIESENTRY.has_options = True
_CYVISUALPROPERTY_DEPENDENCIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_CYVISUALPROPERTY_MAPPINGSENTRY.has_options = True
_CYVISUALPROPERTY_MAPPINGSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class cxMateServiceStub(object):
"""********** SERVICE DEFINITION **********
cxMateService defines a cxMate compatible service implementation
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.StreamNetworks = channel.stream_stream(
'/proto.cxMateService/StreamNetworks',
request_serializer=NetworkElement.SerializeToString,
response_deserializer=NetworkElement.FromString,
)
class cxMateServiceServicer(object):
"""********** SERVICE DEFINITION **********
cxMateService defines a cxMate compatible service implementation
"""
def StreamNetworks(self, request_iterator, context):
"""StreamNetworks transfers one or more networks to and from the service.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_cxMateServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'StreamNetworks': grpc.stream_stream_rpc_method_handler(
servicer.StreamNetworks,
request_deserializer=NetworkElement.FromString,
response_serializer=NetworkElement.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'proto.cxMateService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetacxMateServiceServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""********** SERVICE DEFINITION **********
cxMateService defines a cxMate compatible service implementation
"""
def StreamNetworks(self, request_iterator, context):
"""StreamNetworks transfers one or more networks to and from the service.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetacxMateServiceStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""********** SERVICE DEFINITION **********
cxMateService defines a cxMate compatible service implementation
"""
def StreamNetworks(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None):
"""StreamNetworks transfers one or more networks to and from the service.
"""
raise NotImplementedError()
def beta_create_cxMateService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('proto.cxMateService', 'StreamNetworks'): NetworkElement.FromString,
}
response_serializers = {
('proto.cxMateService', 'StreamNetworks'): NetworkElement.SerializeToString,
}
method_implementations = {
('proto.cxMateService', 'StreamNetworks'): face_utilities.stream_stream_inline(servicer.StreamNetworks),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_cxMateService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('proto.cxMateService', 'StreamNetworks'): NetworkElement.SerializeToString,
}
response_deserializers = {
('proto.cxMateService', 'StreamNetworks'): NetworkElement.FromString,
}
cardinalities = {
'StreamNetworks': cardinality.Cardinality.STREAM_STREAM,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'proto.cxMateService', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
|
mit
|
ssokolow/ventilomatic
|
control_node/main.py
|
1
|
5623
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Ventilomatic Control Node Program
--snip--
Requires:
- PySerial
Copyright (C) 2014 Stephan Sokolow
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
import json, serial, select, socket, time
import logging, pprint
log = logging.getLogger(__name__)
from rules import RULES
__appname__ = "Ventilomatic Control Node"
__author__ = "Stephan Sokolow (deitarion/SSokolow)"
__version__ = "0.2"
__license__ = "GNU GPL 2 or later"
SERIAL_INPUTS = ['/dev/ttyUSB0']
UDP_ADDR = ('0.0.0.0', 51199)
WARMUP_TIME = 3
inputs = []
for path in SERIAL_INPUTS:
fobj = serial.Serial(path, 9600)
inputs.append(fobj)
if UDP_ADDR:
# TODO: Decide how best to handle IPv6
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(UDP_ADDR)
inputs.append(sock)
class Monitor(object):
"""Application for keeping track of sensor state."""
def __init__(self, inputs):
"""
:inputs: A list of `select()`able objects.
"""
self._inputs = inputs
self._buffers = {}
self._model = {}
self.last_rule_eval = None
def defragment_input(self, handle):
"""Given a readable socket, move data to the accumulation buffers.
:handle: A readable object as returned by select()
"""
fno = handle.fileno()
if hasattr(handle, 'readable') and hasattr(handle, 'inWaiting'):
key = fno
self._buffers.setdefault(key, '')
self._buffers[fno] += handle.read(handle.inWaiting())
elif hasattr(handle, 'recvfrom'):
data, addr = handle.recvfrom(1024)
key = (fno, addr)
self._buffers.setdefault(key, '')
self._buffers[key] += data
else:
log.error("Unknown type of data source encountered!")
def parse_buffers(self):
"""Extract any complete messages from the accumulation buffers and
parse them."""
messages = []
for key in self._buffers:
while '\n' in self._buffers[key]:
raw, self._buffers[key] = self._buffers[key].replace('\r',
'').split('\n', 1)
if not raw:
continue
try:
data = json.loads(raw)
except ValueError:
log.debug("Packet was not valid JSON: %r", raw)
continue
api_version = data.get('api_version', None)
if not api_version == 0:
log.error('Packet had unsupported API version \"%s\": %s',
api_version, data)
log.debug(data)
messages.append(data)
return messages
def update_model(self, message):
"""Update our model of the world"""
node_id = message.get('node_id', None)
if not node_id:
log.error("Message has no node ID: %s" % message)
return
#XXX: On which layer should further format validation happen?
self._model[node_id] = message
def loop_iteration(self):
"""Perform one iteration of the main loop"""
# Give data time to come in before letting rules complain
if self.last_rule_eval is None:
self.last_rule_eval = time.time() + WARMUP_TIME
readable, _, errored = select.select(self._inputs, [], self._inputs)
for sck in readable:
self.defragment_input(sck)
for message in self.parse_buffers():
self.update_model(message)
# Only run rules once per second at most
if (time.time() - self.last_rule_eval) > 1:
for rule in RULES:
rule(self._model)
def run(self):
while self._inputs:
self.loop_iteration()
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser(version="%%prog v%s" % __version__,
usage="%prog [options] <argument> ...",
description=__doc__.replace('\r\n', '\n').split('\n--snip--\n')[0])
parser.add_option('-v', '--verbose', action="count", dest="verbose",
default=2, help="Increase the verbosity. Use twice for extra effect")
parser.add_option('-q', '--quiet', action="count", dest="quiet",
default=0, help="Decrease the verbosity. Use twice for extra effect")
# Reminder: %default can be used in help strings.
# Allow pre-formatted descriptions
parser.formatter.format_description = lambda description: description
opts, args = parser.parse_args()
# Set up clean logging to stderr
log_levels = [logging.CRITICAL, logging.ERROR, logging.WARNING,
logging.INFO, logging.DEBUG]
opts.verbose = min(opts.verbose - opts.quiet, len(log_levels) - 1)
opts.verbose = max(opts.verbose, 0)
logging.basicConfig(level=log_levels[opts.verbose],
format='%(levelname)s: %(message)s')
app = Monitor(inputs)
app.run()
|
gpl-2.0
|
kampanita/pelisalacarta
|
python/version-mediaserver/platformcode/controllers/jsonserver.py
|
6
|
7114
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta
# Controlador para RSS
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import sys, os
from core import config
from core import logger
from controller import Controller
from controller import Platformtools
import random
import re
from core.item import Item
import threading
import json
import time
class jsonserver(Controller):
pattern = re.compile("^/json")
data = {}
def __init__(self, handler=None):
super(jsonserver,self).__init__(handler)
self.platformtools = platformtools(self)
def extract_item(self,path):
if path=="/json" or path =="/json/":
item = Item(channel="channelselector", action="mainlist")
else:
item=Item().fromurl(path.replace("/json/",""))
return item
def run(self, path):
item = self.extract_item(path)
from platformcode import launcher
launcher.run(item)
def set_data(self, data):
self.data = data
def get_data(self, id):
if "id" in self.data and self.data["id"] == id:
data = self.data["result"]
else:
data = None
return data
def send_data(self, data, headers = {}, response = 200):
headers.setdefault("content-type","application/json")
headers.setdefault("connection","close")
self.handler.send_response(response)
for header in headers:
self.handler.send_header(header, headers[header])
self.handler.end_headers()
self.handler.wfile.write(data)
class platformtools(Platformtools):
def __init__(self, controller):
self.controller = controller
self.handler = controller.handler
def render_items(self, itemlist, parentitem):
JSONResponse = {}
JSONResponse["title"] = parentitem.title
JSONResponse["date"] = time.strftime("%x")
JSONResponse["time"] = time.strftime("%X")
JSONResponse["count"]=len(itemlist)
JSONResponse["list"]= []
for item in itemlist:
JSONItem = {}
JSONItem["title"] = item.title
JSONItem["url"] = "http://" + self.controller.host+"/json/" + item.tourl()
if item.thumbnail: JSONItem["thumbnail"] = item.thumbnail
if item.plot: JSONItem["plot"] = item.plot
JSONResponse["list"].append(JSONItem)
self.controller.send_data(json.dumps(JSONResponse, indent=4, sort_keys=True))
def dialog_select(self, heading, list):
ID = "%032x" %(random.getrandbits(128))
response = '<?xml version="1.0" encoding="UTF-8" ?>\n'
response +='<rss version="2.0" xmlns:dc="http://purl.org/dc/elements/1.1/">\n'
response +='<channel>\n'
response +='<link>/rss</link>\n'
response +='<title>'+heading+'</title>\n'
for option in list:
response += '<item>\n'
response += '<title>'+option +'</title>\n'
response += '\n'
response += '<link>http://'+self.controller.host+'/data/'+threading.current_thread().name +'/'+ID+'/'+str(list.index(option))+'</link>\n'
response += '</item>\n\n'
response += '</channel>\n'
response += '</rss>\n'
self.controller.send_data(response)
self.handler.server.shutdown_request(self.handler.request)
while not self.controller.get_data(ID):
continue
return int(self.controller.get_data(ID))
def dialog_ok(self, heading, line1, line2="", line3=""):
text = line1
if line2: text += "\n" + line2
if line3: text += "\n" + line3
ID = "%032x" %(random.getrandbits(128))
response = '<?xml version="1.0" encoding="UTF-8" ?>\n'
response +='<rss version="2.0" xmlns:dc="http://purl.org/dc/elements/1.1/">\n'
response +='<channel>\n'
response +='<link>/rss</link>\n'
response +='<title>'+heading+'</title>\n'
response += '<item>\n'
response += '<title>'+text +'</title>\n'
response += '\n'
response += '<link></link>\n'
response += '</item>\n\n'
response += '<item>\n'
response += '<title>Si</title>\n'
response += '\n'
response += '<link>http://'+self.controller.host+'/data/'+threading.current_thread().name +'/'+ID+'/1</link>\n'
response += '</item>\n\n'
response += '<item>\n'
response += '</channel>\n'
response += '</rss>\n'
self.controller.send_data(response)
self.handler.server.shutdown_request(self.handler.request)
while not self.controller.get_data(ID):
continue
def dialog_yesno(self, heading, line1, line2="", line3=""):
text = line1
if line2: text += "\n" + line2
if line3: text += "\n" + line3
ID = "%032x" %(random.getrandbits(128))
response = '<?xml version="1.0" encoding="UTF-8" ?>\n'
response +='<rss version="2.0" xmlns:dc="http://purl.org/dc/elements/1.1/">\n'
response +='<channel>\n'
response +='<link>/rss</link>\n'
response +='<title>'+heading+'</title>\n'
response += '<item>\n'
response += '<title>'+text +'</title>\n'
response += '\n'
response += '<link></link>\n'
response += '</item>\n\n'
response += '<item>\n'
response += '<title>Si</title>\n'
response += '\n'
response += '<link>http://'+self.controller.host+'/data/'+threading.current_thread().name +'/'+ID+'/1</link>\n'
response += '</item>\n\n'
response += '<item>\n'
response += '<title>No</title>\n'
response += '\n'
response += '<link>http://'+self.controller.host+'/data/'+threading.current_thread().name +'/'+ID+'/0</link>\n'
response += '</item>\n\n'
response += '</channel>\n'
response += '</rss>\n'
self.controller.send_data(response)
self.handler.server.shutdown_request(self.handler.request)
while not self.controller.get_data(ID):
continue
return bool(int(self.controller.get_data(ID)))
def dialog_notification(self, heading, message, icon=0, time=5000, sound=True):
#No disponible por ahora, muestra un dialog_ok
self.dialog_ok(heading,message)
def play_video(self, item):
response = '<?xml version="1.0" encoding="UTF-8" ?>\n'
response +='<rss version="2.0" xmlns:dc="http://purl.org/dc/elements/1.1/">\n'
response +='<channel>\n'
response +='<link>/rss</link>\n'
response +='<title>'+item.title+'</title>\n'
response += '<item>\n'
response += '<title>'+item.title +'</title>\n'
response += '\n'
response += '<link>'+item.video_url+'</link>\n'
response += '</item>\n\n'
response += '</channel>\n'
response += '</rss>\n'
self.controller.send_data(response)
|
gpl-3.0
|
eonpatapon/nova
|
nova/objects/image_meta.py
|
13
|
16310
|
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import utils
from nova.virt import hardware
@base.NovaObjectRegistry.register
class ImageMeta(base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: updated ImageMetaProps
VERSION = '1.1'
# These are driven by what the image client API returns
# to Nova from Glance. This is defined in the glance
# code glance/api/v2/images.py get_base_properties()
# method. A few things are currently left out:
# self, file, schema - Nova does not appear to ever use
# these field; locations - modelling the arbitrary
# data in the 'metadata' subfield is non-trivial as
# there's no clear spec
#
fields = {
'id': fields.UUIDField(),
'name': fields.StringField(),
'status': fields.StringField(),
'visibility': fields.StringField(),
'protected': fields.FlexibleBooleanField(),
'checksum': fields.StringField(),
'owner': fields.StringField(),
'size': fields.IntegerField(),
'virtual_size': fields.IntegerField(),
'container_format': fields.StringField(),
'disk_format': fields.StringField(),
'created_at': fields.DateTimeField(nullable=True),
'updated_at': fields.DateTimeField(nullable=True),
'tags': fields.ListOfStringsField(),
'direct_url': fields.StringField(),
'min_ram': fields.IntegerField(),
'min_disk': fields.IntegerField(),
'properties': fields.ObjectField('ImageMetaProps'),
}
obj_relationships = {
'properties': [('1.0', '1.0'),
('1.1', '1.1')],
}
@classmethod
def from_dict(cls, image_meta):
"""Create instance from image metadata dict
:param image_meta: image metadata dictionary
Creates a new object instance, initializing from the
properties associated with the image metadata instance
:returns: an ImageMeta instance
"""
if image_meta is None:
image_meta = {}
# We must turn 'properties' key dict into an object
# so copy image_meta to avoid changing original
image_meta = copy.deepcopy(image_meta)
image_meta["properties"] = \
objects.ImageMetaProps.from_dict(
image_meta.get("properties", {}))
return cls(**image_meta)
@classmethod
def from_instance(cls, instance):
"""Create instance from instance system metadata
:param instance: Instance object
Creates a new object instance, initializing from the
system metadata "image_" properties associated with
instance
:returns: an ImageMeta instance
"""
sysmeta = utils.instance_sys_meta(instance)
image_meta = utils.get_image_from_system_metadata(sysmeta)
return cls.from_dict(image_meta)
@base.NovaObjectRegistry.register
class ImageMetaProps(base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: added os_require_quiesce field
VERSION = ImageMeta.VERSION
# Maximum number of NUMA nodes permitted for the guest topology
NUMA_NODES_MAX = 128
# 'hw_' - settings affecting the guest virtual machine hardware
# 'img_' - settings affecting the use of images by the compute node
# 'os_' - settings affecting the guest operating system setup
fields = {
# name of guest hardware architecture eg i686, x86_64, ppc64
'hw_architecture': fields.ArchitectureField(),
# used to decide to expand root disk partition and fs to full size of
# root disk
'hw_auto_disk_config': fields.StringField(),
# whether to display BIOS boot device menu
'hw_boot_menu': fields.FlexibleBooleanField(),
# name of the CDROM bus to use eg virtio, scsi, ide
'hw_cdrom_bus': fields.DiskBusField(),
# preferred number of CPU cores per socket
'hw_cpu_cores': fields.IntegerField(),
# preferred number of CPU sockets
'hw_cpu_sockets': fields.IntegerField(),
# maximum number of CPU cores per socket
'hw_cpu_max_cores': fields.IntegerField(),
# maximum number of CPU sockets
'hw_cpu_max_sockets': fields.IntegerField(),
# maximum number of CPU threads per core
'hw_cpu_max_threads': fields.IntegerField(),
# CPU thread allocation policy
'hw_cpu_policy': fields.CPUAllocationPolicyField(),
# preferred number of CPU threads per core
'hw_cpu_threads': fields.IntegerField(),
# guest ABI version for guest xentools either 1 or 2 (or 3 - depends on
# Citrix PV tools version installed in image)
'hw_device_id': fields.IntegerField(),
# name of the hard disk bus to use eg virtio, scsi, ide
'hw_disk_bus': fields.DiskBusField(),
# allocation mode eg 'preallocated'
'hw_disk_type': fields.StringField(),
# name of the floppy disk bus to use eg fd, scsi, ide
'hw_floppy_bus': fields.DiskBusField(),
# boolean - used to trigger code to inject networking when booting a CD
# image with a network boot image
'hw_ipxe_boot': fields.FlexibleBooleanField(),
# There are sooooooooooo many possible machine types in
# QEMU - several new ones with each new release - that it
# is not practical to enumerate them all. So we use a free
# form string
'hw_machine_type': fields.StringField(),
# One of the magic strings 'small', 'any', 'large'
# or an explicit page size in KB (eg 4, 2048, ...)
'hw_mem_page_size': fields.StringField(),
# Number of guest NUMA nodes
'hw_numa_nodes': fields.IntegerField(),
# Each list entry corresponds to a guest NUMA node and the
# set members indicate CPUs for that node
'hw_numa_cpus': fields.ListOfSetsOfIntegersField(),
# Each list entry corresponds to a guest NUMA node and the
# list value indicates the memory size of that node.
'hw_numa_mem': fields.ListOfIntegersField(),
# boolean 'yes' or 'no' to enable QEMU guest agent
'hw_qemu_guest_agent': fields.FlexibleBooleanField(),
# name of the RNG device type eg virtio
'hw_rng_model': fields.RNGModelField(),
# number of serial ports to create
'hw_serial_port_count': fields.IntegerField(),
# name of the SCSI bus controller eg 'virtio-scsi', 'lsilogic', etc
'hw_scsi_model': fields.SCSIModelField(),
# name of the video adapter model to use, eg cirrus, vga, xen, qxl
'hw_video_model': fields.VideoModelField(),
# MB of video RAM to provide eg 64
'hw_video_ram': fields.IntegerField(),
# name of a NIC device model eg virtio, e1000, rtl8139
'hw_vif_model': fields.VIFModelField(),
# "xen" vs "hvm"
'hw_vm_mode': fields.VMModeField(),
# action to take when watchdog device fires eg reset, poweroff, pause,
# none
'hw_watchdog_action': fields.WatchdogActionField(),
# if true download using bittorrent
'img_bittorrent': fields.FlexibleBooleanField(),
# Which data format the 'img_block_device_mapping' field is
# using to represent the block device mapping
'img_bdm_v2': fields.FlexibleBooleanField(),
# Block device mapping - the may can be in one or two completely
# different formats. The 'img_bdm_v2' field determines whether
# it is in legacy format, or the new current format. Ideally
# we would have a formal data type for this field instead of a
# dict, but with 2 different formats to represent this is hard.
# See nova/block_device.py from_legacy_mapping() for the complex
# conversion code. So for now leave it as a dict and continue
# to use existing code that is able to convert dict into the
# desired internal BDM formats
'img_block_device_mapping':
fields.ListOfDictOfNullableStringsField(),
# boolean - if True, and image cache set to "some" decides if image
# should be cached on host when server is booted on that host
'img_cache_in_nova': fields.FlexibleBooleanField(),
# Compression level for images. (1-9)
'img_compression_level': fields.IntegerField(),
# boolean flag to set space-saving or performance behavior on the
# Datastore
'img_linked_clone': fields.FlexibleBooleanField(),
# Image mappings - related to Block device mapping data - mapping
# of virtual image names to device names. This could be represented
# as a formatl data type, but is left as dict for same reason as
# img_block_device_mapping field. It would arguably make sense for
# the two to be combined into a single field and data type in the
# future.
'img_mappings': fields.ListOfDictOfNullableStringsField(),
# image project id (set on upload)
'img_owner_id': fields.StringField(),
# root device name, used in snapshotting eg /dev/<blah>
'img_root_device_name': fields.StringField(),
# boolean - if false don't talk to nova agent
'img_use_agent': fields.FlexibleBooleanField(),
# integer value 1
'img_version': fields.IntegerField(),
# string of boot time command line arguments for the guest kernel
'os_command_line': fields.StringField(),
# the name of the specific guest operating system distro. This
# is not done as an Enum since the list of operating systems is
# growing incredibly fast, and valid values can be arbitrarily
# user defined. Nova has no real need for strict validation so
# leave it freeform
'os_distro': fields.StringField(),
# boolean - if true, then guest must support disk quiesce
# or snapshot operation will be denied
'os_require_quiesce': fields.FlexibleBooleanField(),
# boolean - if using agent don't inject files, assume someone else is
# doing that (cloud-init)
'os_skip_agent_inject_files_at_boot': fields.FlexibleBooleanField(),
# boolean - if using agent don't try inject ssh key, assume someone
# else is doing that (cloud-init)
'os_skip_agent_inject_ssh': fields.FlexibleBooleanField(),
# The guest operating system family such as 'linux', 'windows' - this
# is a fairly generic type. For a detailed type consider os_distro
# instead
'os_type': fields.OSTypeField(),
}
# The keys are the legacy property names and
# the values are the current preferred names
_legacy_property_map = {
'architecture': 'hw_architecture',
'owner_id': 'img_owner_id',
'vmware_disktype': 'hw_disk_type',
'vmware_image_version': 'img_version',
'vmware_ostype': 'os_distro',
'auto_disk_config': 'hw_auto_disk_config',
'ipxe_boot': 'hw_ipxe_boot',
'xenapi_device_id': 'hw_device_id',
'xenapi_image_compression_level': 'img_compression_level',
'vmware_linked_clone': 'img_linked_clone',
'xenapi_use_agent': 'img_use_agent',
'xenapi_skip_agent_inject_ssh': 'os_skip_agent_inject_ssh',
'xenapi_skip_agent_inject_files_at_boot':
'os_skip_agent_inject_files_at_boot',
'cache_in_nova': 'img_cache_in_nova',
'vm_mode': 'hw_vm_mode',
'bittorrent': 'img_bittorrent',
'mappings': 'img_mappings',
'block_device_mapping': 'img_block_device_mapping',
'bdm_v2': 'img_bdm_v2',
'root_device_name': 'img_root_device_name',
}
# TODO(berrange): Need to run this from a data migration
# at some point so we can eventually kill off the compat
def _set_attr_from_legacy_names(self, image_props):
for legacy_key in self._legacy_property_map:
new_key = self._legacy_property_map[legacy_key]
if legacy_key not in image_props:
continue
setattr(self, new_key, image_props[legacy_key])
vmware_adaptertype = image_props.get("vmware_adaptertype")
if vmware_adaptertype == "ide":
setattr(self, "hw_disk_bus", "ide")
elif vmware_adaptertype is not None:
setattr(self, "hw_disk_bus", "scsi")
setattr(self, "hw_scsi_model", vmware_adaptertype)
def _set_numa_mem(self, image_props):
hw_numa_mem = []
hw_numa_mem_set = False
for cellid in range(ImageMetaProps.NUMA_NODES_MAX):
memprop = "hw_numa_mem.%d" % cellid
if memprop not in image_props:
break
hw_numa_mem.append(int(image_props[memprop]))
hw_numa_mem_set = True
del image_props[memprop]
if hw_numa_mem_set:
self.hw_numa_mem = hw_numa_mem
def _set_numa_cpus(self, image_props):
hw_numa_cpus = []
hw_numa_cpus_set = False
for cellid in range(ImageMetaProps.NUMA_NODES_MAX):
cpuprop = "hw_numa_cpus.%d" % cellid
if cpuprop not in image_props:
break
hw_numa_cpus.append(
hardware.parse_cpu_spec(image_props[cpuprop]))
hw_numa_cpus_set = True
del image_props[cpuprop]
if hw_numa_cpus_set:
self.hw_numa_cpus = hw_numa_cpus
def _set_attr_from_current_names(self, image_props):
for key in self.fields:
# The two NUMA fields need special handling to
# un-stringify them correctly
if key == "hw_numa_mem":
self._set_numa_mem(image_props)
elif key == "hw_numa_cpus":
self._set_numa_cpus(image_props)
else:
if key not in image_props:
continue
setattr(self, key, image_props[key])
@classmethod
def from_dict(cls, image_props):
"""Create instance from image properties dict
:param image_props: dictionary of image metdata properties
Creates a new object instance, initializing from a
dictionary of image metadata properties
:returns: an ImageMetaProps instance
"""
obj = cls()
# We look to see if the dict has entries for any
# of the legacy property names first. Then we use
# the current property names. That way if both the
# current and legacy names are set, the value
# associated with the current name takes priority
obj._set_attr_from_legacy_names(image_props)
obj._set_attr_from_current_names(image_props)
return obj
def get(self, name, defvalue=None):
"""Get the value of an attribute
:param name: the attribute to request
:param defvalue: the default value if not set
This returns the value of an attribute if it is currently
set, otherwise it will return None.
This differs from accessing props.attrname, because that
will raise an exception if the attribute has no value set.
So instead of
if image_meta.properties.obj_attr_is_set("some_attr"):
val = image_meta.properties.some_attr
else
val = None
Callers can rely on unconditional access
val = image_meta.properties.get("some_attr")
:returns: the attribute value or None
"""
if not self.obj_attr_is_set(name):
return defvalue
return getattr(self, name)
|
apache-2.0
|
jkburges/phantomjs
|
src/qt/qtwebkit/Tools/BuildSlaveSupport/build.webkit.org-config/wkbuild_unittest.py
|
116
|
4793
|
# Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import wkbuild
class ShouldBuildTest(unittest.TestCase):
_should_build_tests = [
(["ChangeLog", "Source/WebCore/ChangeLog", "Source/WebKit2/ChangeLog-2011-02-11"], []),
(["GNUmakefile.am", "Source/WebCore/GNUmakefile.am"], ["gtk"]),
(["Websites/bugs.webkit.org/foo", "Source/WebCore/bar"], ["*"]),
(["Websites/bugs.webkit.org/foo"], []),
(["Source/JavaScriptCore/JavaScriptCore.xcodeproj/foo"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
(["Source/JavaScriptCore/JavaScriptCore.vcproj/foo", "Source/WebKit2/win/WebKit2.vcproj", "Source/WebKit/win/WebKit.sln", "Tools/WebKitTestRunner/Configurations/WebKitTestRunnerCommon.vsprops"], ["win"]),
(["LayoutTests/platform/mac/foo", "Source/WebCore/bar"], ["*"]),
(["LayoutTests/foo"], ["*"]),
(["LayoutTests/canvas/philip/tests/size.attributes.parse.exp-expected.txt", "LayoutTests/canvas/philip/tests/size.attributes.parse.exp.html"], ["*"]),
(["LayoutTests/platform/mac-leopard/foo"], ["mac-leopard"]),
(["LayoutTests/platform/mac-lion/foo"], ["mac-leopard", "mac-lion", "mac-snowleopard", "win"]),
(["LayoutTests/platform/mac-snowleopard/foo"], ["mac-leopard", "mac-snowleopard"]),
(["LayoutTests/platform/mac-wk2/Skipped"], ["mac-lion", "mac-mountainlion", "mac-snowleopard", "win"]),
(["LayoutTests/platform/mac/foo"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard", "win"]),
(["LayoutTests/platform/win-xp/foo"], ["win"]),
(["LayoutTests/platform/win-wk2/foo"], ["win"]),
(["LayoutTests/platform/win/foo"], ["win"]),
(["Source/WebCore.exp.in", "Source/WebKit/mac/WebKit.exp"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
(["Source/WebCore/mac/foo"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
(["Source/WebCore/win/foo"], ["win"]),
(["Source/WebCore/platform/graphics/gpu/foo"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
(["Source/WebCore/platform/wx/wxcode/win/foo"], []),
(["Source/WebCore/rendering/RenderThemeMac.mm", "Source/WebCore/rendering/RenderThemeMac.h"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
(["Source/WebCore/rendering/RenderThemeWinCE.h"], []),
(["Tools/BuildSlaveSupport/build.webkit.org-config/public_html/LeaksViewer/LeaksViewer.js"], []),
]
def test_should_build(self):
for files, platforms in self._should_build_tests:
# FIXME: We should test more platforms here once
# wkbuild._should_file_trigger_build is implemented for them.
for platform in ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard", "win"]:
should_build = platform in platforms or "*" in platforms
self.assertEqual(wkbuild.should_build(platform, files), should_build, "%s should%s have built but did%s (files: %s)" % (platform, "" if should_build else "n't", "n't" if should_build else "", str(files)))
# FIXME: We should run this file as part of test-rm .
# Unfortunately test-rm currently requires that unittests
# be located in a directory with a valid module name.
# 'build.webkit.org-config' is not a valid module name (due to '.' and '-')
# so for now this is a stand-alone test harness.
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
Debian/openjfx
|
modules/web/src/main/native/Source/JavaScriptCore/inspector/scripts/codegen/objc_generator.py
|
2
|
25284
|
#!/usr/bin/env python
#
# Copyright (c) 2014, 2016 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import logging
from generator import Generator, ucfirst
from models import PrimitiveType, ObjectType, ArrayType, EnumType, AliasedType, Frameworks
log = logging.getLogger('global')
def join_type_and_name(type_str, name_str):
if type_str.endswith('*'):
return type_str + name_str
return type_str + ' ' + name_str
def strip_block_comment_markers(str):
return str.replace('/*', '').replace('*/', '')
def remove_duplicate_from_str(str, possible_duplicate):
return str.replace(possible_duplicate + possible_duplicate, possible_duplicate)
_OBJC_IDENTIFIER_RENAME_MAP = {
'this': 'thisObject', # Debugger.CallFrame.this
'description': 'stringRepresentation', # Runtime.RemoteObject.description
'id': 'identifier', # Page.Frame.id, Runtime.ExecutionContextDescription.id, Debugger.BreakpointAction.id
}
_OBJC_IDENTIFIER_REVERSE_RENAME_MAP = dict((v, k) for k, v in _OBJC_IDENTIFIER_RENAME_MAP.iteritems())
class ObjCTypeCategory:
Simple = 0
String = 1
Object = 2
Array = 3
@staticmethod
def category_for_type(_type):
if (isinstance(_type, PrimitiveType)):
if _type.raw_name() is 'string':
return ObjCTypeCategory.String
if _type.raw_name() in ['object', 'any']:
return ObjCTypeCategory.Object
if _type.raw_name() is 'array':
return ObjCTypeCategory.Array
return ObjCTypeCategory.Simple
if (isinstance(_type, ObjectType)):
return ObjCTypeCategory.Object
if (isinstance(_type, ArrayType)):
return ObjCTypeCategory.Array
if (isinstance(_type, AliasedType)):
return ObjCTypeCategory.category_for_type(_type.aliased_type)
if (isinstance(_type, EnumType)):
return ObjCTypeCategory.category_for_type(_type.primitive_type)
return None
# Almost all Objective-C class names require the use of a prefix that depends on the
# target framework's 'objc_prefix' setting. So, most helpers are instance methods.
class ObjCGenerator(Generator):
# Do not use a dynamic prefix for RWIProtocolJSONObject since it's used as a common
# base class and needs a consistent Objective-C prefix to be in a reusable framework.
OBJC_HELPER_PREFIX = 'RWI'
OBJC_SHARED_PREFIX = 'Protocol'
OBJC_STATIC_PREFIX = '%s%s' % (OBJC_HELPER_PREFIX, OBJC_SHARED_PREFIX)
def __init__(self, *args, **kwargs):
Generator.__init__(self, *args, **kwargs)
# The 'protocol name' is used to prefix filenames for a protocol group (a set of domains generated together).
def protocol_name(self):
protocol_group = self.model().framework.setting('objc_protocol_group', '')
return '%s%s' % (protocol_group, ObjCGenerator.OBJC_SHARED_PREFIX)
# The 'ObjC prefix' is used to prefix Objective-C class names and enums with a
# framework-specific prefix. It is separate from filename prefixes.
def objc_prefix(self):
framework_prefix = self.model().framework.setting('objc_prefix', None)
if not framework_prefix:
return ''
else:
return '%s%s' % (framework_prefix, ObjCGenerator.OBJC_SHARED_PREFIX)
# Adjust identifier names that collide with ObjC keywords.
@staticmethod
def identifier_to_objc_identifier(name):
return _OBJC_IDENTIFIER_RENAME_MAP.get(name, name)
@staticmethod
def objc_identifier_to_identifier(name):
return _OBJC_IDENTIFIER_REVERSE_RENAME_MAP.get(name, name)
# Generate ObjC types, command handlers, and event dispatchers for a subset of domains.
DOMAINS_TO_GENERATE = ['CSS', 'DOM', 'DOMStorage', 'Network', 'Page', 'Automation', 'GenericTypes']
def should_generate_types_for_domain(self, domain):
if not len(self.type_declarations_for_domain(domain)):
return False
if self.model().framework is Frameworks.Test:
return True
whitelist = set(ObjCGenerator.DOMAINS_TO_GENERATE)
whitelist.update(set(['Console', 'Debugger', 'Runtime']))
return domain.domain_name in whitelist
def should_generate_commands_for_domain(self, domain):
if not len(self.commands_for_domain(domain)):
return False
if self.model().framework is Frameworks.Test:
return True
whitelist = set(ObjCGenerator.DOMAINS_TO_GENERATE)
return domain.domain_name in whitelist
def should_generate_events_for_domain(self, domain):
if not len(self.events_for_domain(domain)):
return False
if self.model().framework is Frameworks.Test:
return True
whitelist = set(ObjCGenerator.DOMAINS_TO_GENERATE)
whitelist.add('Console')
return domain.domain_name in whitelist
# ObjC enum and type names.
def objc_name_for_type(self, type):
name = type.qualified_name().replace('.', '')
name = remove_duplicate_from_str(name, type.type_domain().domain_name)
return '%s%s' % (self.objc_prefix(), name)
def objc_enum_name_for_anonymous_enum_declaration(self, declaration):
domain_name = declaration.type.type_domain().domain_name
name = '%s%s' % (domain_name, declaration.type.raw_name())
name = remove_duplicate_from_str(name, domain_name)
return '%s%s' % (self.objc_prefix(), name)
def objc_enum_name_for_anonymous_enum_member(self, declaration, member):
domain_name = member.type.type_domain().domain_name
name = '%s%s%s' % (domain_name, declaration.type.raw_name(), ucfirst(member.member_name))
name = remove_duplicate_from_str(name, domain_name)
return '%s%s' % (self.objc_prefix(), name)
def objc_enum_name_for_anonymous_enum_parameter(self, domain, event_or_command_name, parameter):
domain_name = domain.domain_name
name = '%s%s%s' % (domain_name, ucfirst(event_or_command_name), ucfirst(parameter.parameter_name))
name = remove_duplicate_from_str(name, domain_name)
return '%s%s' % (self.objc_prefix(), name)
def objc_enum_name_for_non_anonymous_enum(self, _type):
domain_name = _type.type_domain().domain_name
name = _type.qualified_name().replace('.', '')
name = remove_duplicate_from_str(name, domain_name)
return '%s%s' % (self.objc_prefix(), name)
# Miscellaneous name handling.
@staticmethod
def variable_name_prefix_for_domain(domain):
domain_name = domain.domain_name
if domain_name.startswith('DOM'):
return 'dom' + domain_name[3:]
if domain_name.startswith('CSS'):
return 'css' + domain_name[3:]
return domain_name[:1].lower() + domain_name[1:]
# Type basics.
@staticmethod
def objc_accessor_type_for_raw_name(raw_name):
if raw_name in ['string', 'array']:
return 'copy'
if raw_name in ['integer', 'number', 'boolean']:
return 'assign'
if raw_name in ['any', 'object']:
return 'retain'
return None
@staticmethod
def objc_type_for_raw_name(raw_name):
if raw_name is 'string':
return 'NSString *'
if raw_name is 'array':
return 'NSArray *'
if raw_name is 'integer':
return 'int'
if raw_name is 'number':
return 'double'
if raw_name is 'boolean':
return 'BOOL'
if raw_name in ['any', 'object']:
return '%sJSONObject *' % ObjCGenerator.OBJC_STATIC_PREFIX
return None
@staticmethod
def objc_class_for_raw_name(raw_name):
if raw_name is 'string':
return 'NSString'
if raw_name is 'array':
return 'NSArray'
if raw_name in ['integer', 'number', 'boolean']:
return 'NSNumber'
if raw_name in ['any', 'object']:
return '%sJSONObject' % ObjCGenerator.OBJC_STATIC_PREFIX
return None
# FIXME: Can these protocol_type functions be removed in favor of C++ generators functions?
@staticmethod
def protocol_type_for_raw_name(raw_name):
if raw_name is 'string':
return 'String'
if raw_name is 'integer':
return 'int'
if raw_name is 'number':
return 'double'
if raw_name is 'boolean':
return 'bool'
if raw_name in ['any', 'object']:
return 'InspectorObject'
return None
@staticmethod
def protocol_type_for_type(_type):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
return ObjCGenerator.protocol_type_for_raw_name(_type.raw_name())
if (isinstance(_type, EnumType)):
return ObjCGenerator.protocol_type_for_type(_type.primitive_type)
if (isinstance(_type, ObjectType)):
return 'Inspector::Protocol::%s::%s' % (_type.type_domain().domain_name, _type.raw_name())
if (isinstance(_type, ArrayType)):
sub_type = ObjCGenerator.protocol_type_for_type(_type.element_type)
return 'Inspector::Protocol::Array<%s>' % sub_type
return None
@staticmethod
def is_type_objc_pointer_type(_type):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
return _type.raw_name() in ['string', 'array', 'any', 'object']
if (isinstance(_type, EnumType)):
return False
if (isinstance(_type, ObjectType)):
return True
if (isinstance(_type, ArrayType)):
return True
return None
def objc_class_for_type(self, _type):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
return ObjCGenerator.objc_class_for_raw_name(_type.raw_name())
if (isinstance(_type, EnumType)):
return ObjCGenerator.objc_class_for_raw_name(_type.primitive_type.raw_name())
if (isinstance(_type, ObjectType)):
return self.objc_name_for_type(_type)
if (isinstance(_type, ArrayType)):
sub_type = strip_block_comment_markers(self.objc_class_for_type(_type.element_type))
return 'NSArray/*<%s>*/' % sub_type
return None
def objc_class_for_array_type(self, _type):
if isinstance(_type, AliasedType):
_type = _type.aliased_type
if isinstance(_type, ArrayType):
return self.objc_class_for_type(_type.element_type)
return None
def objc_accessor_type_for_member(self, member):
return self.objc_accessor_type_for_member_internal(member.type)
def objc_accessor_type_for_member_internal(self, _type):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
return self.objc_accessor_type_for_raw_name(_type.raw_name())
if (isinstance(_type, EnumType)):
return 'assign'
if (isinstance(_type, ObjectType)):
return 'retain'
if (isinstance(_type, ArrayType)):
return 'copy'
return None
def objc_type_for_member(self, declaration, member):
return self.objc_type_for_member_internal(member.type, declaration, member)
def objc_type_for_member_internal(self, _type, declaration, member):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
return self.objc_type_for_raw_name(_type.raw_name())
if (isinstance(_type, EnumType)):
if (_type.is_anonymous):
return self.objc_enum_name_for_anonymous_enum_member(declaration, member)
return self.objc_enum_name_for_non_anonymous_enum(_type)
if (isinstance(_type, ObjectType)):
return self.objc_name_for_type(_type) + ' *'
if (isinstance(_type, ArrayType)):
sub_type = strip_block_comment_markers(self.objc_class_for_type(_type.element_type))
return 'NSArray/*<%s>*/ *' % sub_type
return None
def objc_type_for_param(self, domain, event_or_command_name, parameter, respect_optional=True):
objc_type = self.objc_type_for_param_internal(parameter.type, domain, event_or_command_name, parameter)
if respect_optional and parameter.is_optional:
if objc_type.endswith('*'):
return objc_type + '*'
return objc_type + ' *'
return objc_type
def objc_type_for_param_internal(self, _type, domain, event_or_command_name, parameter):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
return self.objc_type_for_raw_name(_type.raw_name())
if (isinstance(_type, EnumType)):
if _type.is_anonymous:
return self.objc_enum_name_for_anonymous_enum_parameter(domain, event_or_command_name, parameter)
return self.objc_enum_name_for_non_anonymous_enum(_type)
if (isinstance(_type, ObjectType)):
return self.objc_name_for_type(_type) + ' *'
if (isinstance(_type, ArrayType)):
sub_type = strip_block_comment_markers(self.objc_class_for_type(_type.element_type))
return 'NSArray/*<%s>*/ *' % sub_type
return None
# ObjC <-> Protocol conversion for commands and events.
# - convert a command call parameter received from Protocol to ObjC for handler
# - convert a command return parameter in callback block from ObjC to Protocol to send
# - convert an event parameter from ObjC API to Protocol to send
def objc_protocol_export_expression_for_variable(self, var_type, var_name):
category = ObjCTypeCategory.category_for_type(var_type)
if category in [ObjCTypeCategory.Simple, ObjCTypeCategory.String]:
if isinstance(var_type, EnumType):
return 'toProtocolString(%s)' % var_name
return var_name
if category is ObjCTypeCategory.Object:
return '[%s toInspectorObject]' % var_name
if category is ObjCTypeCategory.Array:
protocol_type = ObjCGenerator.protocol_type_for_type(var_type.element_type)
objc_class = self.objc_class_for_type(var_type.element_type)
if protocol_type == 'Inspector::Protocol::Array<String>':
return 'inspectorStringArrayArray(%s)' % var_name
if protocol_type is 'String' and objc_class is 'NSString':
return 'inspectorStringArray(%s)' % var_name
if protocol_type is 'int' and objc_class is 'NSNumber':
return 'inspectorIntegerArray(%s)' % var_name
if protocol_type is 'double' and objc_class is 'NSNumber':
return 'inspectorDoubleArray(%s)' % var_name
return 'inspectorObjectArray(%s)' % var_name
def objc_protocol_import_expression_for_member(self, name, declaration, member):
if isinstance(member.type, EnumType):
if member.type.is_anonymous:
return 'fromProtocolString<%s>(%s)' % (self.objc_enum_name_for_anonymous_enum_member(declaration, member), name)
return 'fromProtocolString<%s>(%s)' % (self.objc_enum_name_for_non_anonymous_enum(member.type), name)
return self.objc_protocol_import_expression_for_variable(member.type, name)
def objc_protocol_import_expression_for_parameter(self, name, domain, event_or_command_name, parameter):
if isinstance(parameter.type, EnumType):
if parameter.type.is_anonymous:
return 'fromProtocolString<%s>(%s)' % (self.objc_enum_name_for_anonymous_enum_parameter(domain, event_or_command_name, parameter), name)
return 'fromProtocolString<%s>(%s)' % (self.objc_enum_name_for_non_anonymous_enum(parameter.type), name)
return self.objc_protocol_import_expression_for_variable(parameter.type, name)
def objc_protocol_import_expression_for_variable(self, var_type, var_name):
category = ObjCTypeCategory.category_for_type(var_type)
if category in [ObjCTypeCategory.Simple, ObjCTypeCategory.String]:
return var_name
if category is ObjCTypeCategory.Object:
objc_class = self.objc_class_for_type(var_type)
return '[[[%s alloc] initWithInspectorObject:%s] autorelease]' % (objc_class, var_name)
if category is ObjCTypeCategory.Array:
objc_class = self.objc_class_for_type(var_type.element_type)
if objc_class is 'NSString':
return 'objcStringArray(%s)' % var_name
if objc_class is 'NSNumber': # FIXME: Integer or Double?
return 'objcIntegerArray(%s)' % var_name
return 'objcArray<%s>(%s)' % (objc_class, var_name)
# ObjC <-> JSON object conversion for types getters/setters.
# - convert a member setter from ObjC API to JSON object setter
# - convert a member getter from JSON object to ObjC API
def objc_to_protocol_expression_for_member(self, declaration, member, sub_expression):
category = ObjCTypeCategory.category_for_type(member.type)
if category in [ObjCTypeCategory.Simple, ObjCTypeCategory.String]:
if isinstance(member.type, EnumType):
return 'toProtocolString(%s)' % sub_expression
return sub_expression
if category is ObjCTypeCategory.Object:
return sub_expression
if category is ObjCTypeCategory.Array:
objc_class = self.objc_class_for_type(member.type.element_type)
if objc_class is 'NSString':
return 'inspectorStringArray(%s)' % sub_expression
if objc_class is 'NSNumber':
protocol_type = ObjCGenerator.protocol_type_for_type(member.type.element_type)
if protocol_type is 'double':
return 'inspectorDoubleArray(%s)' % sub_expression
return 'inspectorIntegerArray(%s)' % sub_expression
return 'inspectorObjectArray(%s)' % sub_expression
def protocol_to_objc_expression_for_member(self, declaration, member, sub_expression):
category = ObjCTypeCategory.category_for_type(member.type)
if category in [ObjCTypeCategory.Simple, ObjCTypeCategory.String]:
if isinstance(member.type, EnumType):
if member.type.is_anonymous:
return 'fromProtocolString<%s>(%s).value()' % (self.objc_enum_name_for_anonymous_enum_member(declaration, member), sub_expression)
return 'fromProtocolString<%s>(%s).value()' % (self.objc_enum_name_for_non_anonymous_enum(member.type), sub_expression)
return sub_expression
if category is ObjCTypeCategory.Object:
objc_class = self.objc_class_for_type(member.type)
return '[[%s alloc] initWithInspectorObject:[%s toInspectorObject].get()]' % (objc_class, sub_expression)
if category is ObjCTypeCategory.Array:
protocol_type = ObjCGenerator.protocol_type_for_type(member.type.element_type)
objc_class = self.objc_class_for_type(member.type.element_type)
if objc_class is 'NSString':
return 'objcStringArray(%s)' % sub_expression
if objc_class is 'NSNumber':
protocol_type = ObjCGenerator.protocol_type_for_type(member.type.element_type)
if protocol_type is 'double':
return 'objcDoubleArray(%s)' % sub_expression
return 'objcIntegerArray(%s)' % sub_expression
return 'objcArray<%s>(%s)' % (objc_class, sub_expression)
def payload_to_objc_expression_for_member(self, declaration, member):
_type = member.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type
if isinstance(_type, PrimitiveType):
sub_expression = 'payload[@"%s"]' % member.member_name
raw_name = _type.raw_name()
if raw_name is 'boolean':
return '[%s boolValue]' % sub_expression
if raw_name is 'integer':
return '[%s integerValue]' % sub_expression
if raw_name is 'number':
return '[%s doubleValue]' % sub_expression
if raw_name in ['any', 'object', 'array', 'string']:
return sub_expression # The setter will check the incoming value.
return None
if isinstance(member.type, EnumType):
sub_expression = 'payload[@"%s"]' % member.member_name
if member.type.is_anonymous:
return 'fromProtocolString<%s>(%s)' % (self.objc_enum_name_for_anonymous_enum_member(declaration, member), sub_expression)
else:
return 'fromProtocolString<%s>(%s)' % (self.objc_enum_name_for_non_anonymous_enum(member.type), sub_expression)
if isinstance(_type, ObjectType):
objc_class = self.objc_class_for_type(member.type)
return '[[%s alloc] initWithPayload:payload[@"%s"]]' % (objc_class, member.member_name)
if isinstance(_type, ArrayType):
objc_class = self.objc_class_for_type(member.type.element_type)
return 'objcArrayFromPayload<%s>(payload[@"%s"])' % (objc_class, member.member_name)
# JSON object setter/getter selectors for types.
@staticmethod
def objc_setter_method_for_member(declaration, member):
return ObjCGenerator.objc_setter_method_for_member_internal(member.type, declaration, member)
@staticmethod
def objc_setter_method_for_member_internal(_type, declaration, member):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
raw_name = _type.raw_name()
if raw_name is 'boolean':
return 'setBool'
if raw_name is 'integer':
return 'setInteger'
if raw_name is 'number':
return 'setDouble'
if raw_name is 'string':
return 'setString'
if raw_name in ['any', 'object']:
return 'setObject'
if raw_name is 'array':
return 'setInspectorArray'
return None
if (isinstance(_type, EnumType)):
return 'setString'
if (isinstance(_type, ObjectType)):
return 'setObject'
if (isinstance(_type, ArrayType)):
return 'setInspectorArray'
return None
@staticmethod
def objc_getter_method_for_member(declaration, member):
return ObjCGenerator.objc_getter_method_for_member_internal(member.type, declaration, member)
@staticmethod
def objc_getter_method_for_member_internal(_type, declaration, member):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
raw_name = _type.raw_name()
if raw_name is 'boolean':
return 'boolForKey'
if raw_name is 'integer':
return 'integerForKey'
if raw_name is 'number':
return 'doubleForKey'
if raw_name is 'string':
return 'stringForKey'
if raw_name in ['any', 'object']:
return 'objectForKey'
if raw_name is 'array':
return 'inspectorArrayForKey'
return None
if (isinstance(_type, EnumType)):
return 'stringForKey'
if (isinstance(_type, ObjectType)):
return 'objectForKey'
if (isinstance(_type, ArrayType)):
return 'inspectorArrayForKey'
return None
|
gpl-2.0
|
KenCoder/quarter
|
lambda/requests/compat.py
|
1039
|
1469
|
# -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
try:
import simplejson as json
except (ImportError, SyntaxError):
# simplejson does not support Python 3.2, it throws a SyntaxError
# because of u'...' Unicode literals.
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
|
epl-1.0
|
RichardLitt/wyrd-django-dev
|
django/middleware/locale.py
|
104
|
2423
|
"This is the locale selecting middleware that will look at accept headers"
from django.conf import settings
from django.core.urlresolvers import (is_valid_path, get_resolver,
LocaleRegexURLResolver)
from django.http import HttpResponseRedirect
from django.utils.cache import patch_vary_headers
from django.utils import translation
class LocaleMiddleware(object):
"""
This is a very simple middleware that parses a request
and decides what translation object to install in the current
thread context. This allows pages to be dynamically
translated to the language the user desires (if the language
is available, of course).
"""
def process_request(self, request):
check_path = self.is_language_prefix_patterns_used()
language = translation.get_language_from_request(
request, check_path=check_path)
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
language = translation.get_language()
if (response.status_code == 404 and
not translation.get_language_from_path(request.path_info)
and self.is_language_prefix_patterns_used()):
urlconf = getattr(request, 'urlconf', None)
language_path = '/%s%s' % (language, request.path_info)
if settings.APPEND_SLASH and not language_path.endswith('/'):
language_path = language_path + '/'
if is_valid_path(language_path, urlconf):
language_url = "%s://%s/%s%s" % (
request.is_secure() and 'https' or 'http',
request.get_host(), language, request.get_full_path())
return HttpResponseRedirect(language_url)
translation.deactivate()
patch_vary_headers(response, ('Accept-Language',))
if 'Content-Language' not in response:
response['Content-Language'] = language
return response
def is_language_prefix_patterns_used(self):
"""
Returns `True` if the `LocaleRegexURLResolver` is used
at root level of the urlpatterns, else it returns `False`.
"""
for url_pattern in get_resolver(None).url_patterns:
if isinstance(url_pattern, LocaleRegexURLResolver):
return True
return False
|
bsd-3-clause
|
mp-coder/translate-dev-tools
|
esprima/parser.py
|
1
|
120549
|
# -*- coding: utf-8 -*-
# Copyright JS Foundation and other contributors, https://js.foundation/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, unicode_literals
from .objects import Object
from .compat import basestring, unicode
from .utils import format
from .error_handler import ErrorHandler
from .messages import Messages
from .scanner import RawToken, Scanner, SourceLocation, Position, RegExp
from .token import Token, TokenName
from .syntax import Syntax
from . import nodes as Node
class Value(object):
def __init__(self, value):
self.value = value
class Params(object):
def __init__(self, simple=None, message=None, stricted=None, firstRestricted=None, inFor=None, paramSet=None, params=None, get=None):
self.simple = simple
self.message = message
self.stricted = stricted
self.firstRestricted = firstRestricted
self.inFor = inFor
self.paramSet = paramSet
self.params = params
self.get = get
class Config(Object):
def __init__(self, range=False, loc=False, source=None, tokens=False, comment=False, tolerant=False, **options):
self.range = range
self.loc = loc
self.source = source
self.tokens = tokens
self.comment = comment
self.tolerant = tolerant
for k, v in options.items():
setattr(self, k, v)
class Context(object):
def __init__(self, isModule=False, await=False, allowIn=True, allowStrictDirective=True, allowYield=True, firstCoverInitializedNameError=None, isAssignmentTarget=False, isBindingElement=False, inFunctionBody=False, inIteration=False, inSwitch=False, labelSet=None, strict=False):
self.isModule = isModule
self.await = await
self.allowIn = allowIn
self.allowStrictDirective = allowStrictDirective
self.allowYield = allowYield
self.firstCoverInitializedNameError = firstCoverInitializedNameError
self.isAssignmentTarget = isAssignmentTarget
self.isBindingElement = isBindingElement
self.inFunctionBody = inFunctionBody
self.inIteration = inIteration
self.inSwitch = inSwitch
self.labelSet = {} if labelSet is None else labelSet
self.strict = strict
class Marker(object):
def __init__(self, index=None, line=None, column=None):
self.index = index
self.line = line
self.column = column
class TokenEntry(Object):
def __init__(self, type=None, value=None, regex=None, range=None, loc=None):
self.type = type
self.value = value
self.regex = regex
self.range = range
self.loc = loc
class Parser(object):
def __init__(self, code, options={}, delegate=None):
self.config = Config(**options)
self.delegate = delegate
self.errorHandler = ErrorHandler()
self.errorHandler.tolerant = self.config.tolerant
self.scanner = Scanner(code, self.errorHandler)
self.scanner.trackComment = self.config.comment
self.operatorPrecedence = {
'||': 1,
'&&': 2,
'|': 3,
'^': 4,
'&': 5,
'==': 6,
'!=': 6,
'===': 6,
'!==': 6,
'<': 7,
'>': 7,
'<=': 7,
'>=': 7,
'instanceof': 7,
'in': 7,
'<<': 8,
'>>': 8,
'>>>': 8,
'+': 9,
'-': 9,
'*': 11,
'/': 11,
'%': 11,
}
self.lookahead = RawToken(
type=Token.EOF,
value='',
lineNumber=self.scanner.lineNumber,
lineStart=0,
start=0,
end=0
)
self.hasLineTerminator = False
self.context = Context(
isModule=False,
await=False,
allowIn=True,
allowStrictDirective=True,
allowYield=True,
firstCoverInitializedNameError=None,
isAssignmentTarget=False,
isBindingElement=False,
inFunctionBody=False,
inIteration=False,
inSwitch=False,
labelSet={},
strict=False
)
self.tokens = []
self.startMarker = Marker(
index=0,
line=self.scanner.lineNumber,
column=0
)
self.lastMarker = Marker(
index=0,
line=self.scanner.lineNumber,
column=0
)
self.nextToken()
self.lastMarker = Marker(
index=self.scanner.index,
line=self.scanner.lineNumber,
column=self.scanner.index - self.scanner.lineStart
)
def throwError(self, messageFormat, *args):
msg = format(messageFormat, *args)
index = self.lastMarker.index
line = self.lastMarker.line
column = self.lastMarker.column + 1
raise self.errorHandler.createError(index, line, column, msg)
def tolerateError(self, messageFormat, *args):
msg = format(messageFormat, *args)
index = self.lastMarker.index
line = self.scanner.lineNumber
column = self.lastMarker.column + 1
self.errorHandler.tolerateError(index, line, column, msg)
# Throw an exception because of the token.
def unexpectedTokenError(self, token=None, message=None):
msg = message or Messages.UnexpectedToken
if token:
if not message:
typ = token.type
if typ is Token.EOF:
msg = Messages.UnexpectedEOS
elif typ is Token.Identifier:
msg = Messages.UnexpectedIdentifier
elif typ is Token.NumericLiteral:
msg = Messages.UnexpectedNumber
elif typ is Token.StringLiteral:
msg = Messages.UnexpectedString
elif typ is Token.Template:
msg = Messages.UnexpectedTemplate
elif typ is Token.Keyword:
if self.scanner.isFutureReservedWord(token.value):
msg = Messages.UnexpectedReserved
elif self.context.strict and self.scanner.isStrictModeReservedWord(token.value):
msg = Messages.StrictReservedWord
else:
msg = Messages.UnexpectedToken
value = token.value
else:
value = 'ILLEGAL'
msg = msg.replace('%0', unicode(value), 1)
if token and isinstance(token.lineNumber, int):
index = token.start
line = token.lineNumber
lastMarkerLineStart = self.lastMarker.index - self.lastMarker.column
column = token.start - lastMarkerLineStart + 1
return self.errorHandler.createError(index, line, column, msg)
else:
index = self.lastMarker.index
line = self.lastMarker.line
column = self.lastMarker.column + 1
return self.errorHandler.createError(index, line, column, msg)
def throwUnexpectedToken(self, token=None, message=None):
raise self.unexpectedTokenError(token, message)
def tolerateUnexpectedToken(self, token=None, message=None):
self.errorHandler.tolerate(self.unexpectedTokenError(token, message))
def collectComments(self):
if not self.config.comment:
self.scanner.scanComments()
else:
comments = self.scanner.scanComments()
if comments:
for e in comments:
if e.multiLine:
node = Node.BlockComment(self.scanner.source[e.slice[0]:e.slice[1]])
else:
node = Node.LineComment(self.scanner.source[e.slice[0]:e.slice[1]])
if self.config.range:
node.range = e.range
if self.config.loc:
node.loc = e.loc
if self.delegate:
metadata = SourceLocation(
start=Position(
line=e.loc.start.line,
column=e.loc.start.column,
offset=e.range[0],
),
end=Position(
line=e.loc.end.line,
column=e.loc.end.column,
offset=e.range[1],
)
)
new_node = self.delegate(node, metadata)
if new_node is not None:
node = new_node
# From internal representation to an external structure
def getTokenRaw(self, token):
return self.scanner.source[token.start:token.end]
def convertToken(self, token):
t = TokenEntry(
type=TokenName[token.type],
value=self.getTokenRaw(token),
)
if self.config.range:
t.range = [token.start, token.end]
if self.config.loc:
t.loc = SourceLocation(
start=Position(
line=self.startMarker.line,
column=self.startMarker.column,
),
end=Position(
line=self.scanner.lineNumber,
column=self.scanner.index - self.scanner.lineStart,
),
)
if token.type is Token.RegularExpression:
t.regex = RegExp(
pattern=token.pattern,
flags=token.flags,
)
return t
def nextToken(self):
token = self.lookahead
self.lastMarker.index = self.scanner.index
self.lastMarker.line = self.scanner.lineNumber
self.lastMarker.column = self.scanner.index - self.scanner.lineStart
self.collectComments()
if self.scanner.index != self.startMarker.index:
self.startMarker.index = self.scanner.index
self.startMarker.line = self.scanner.lineNumber
self.startMarker.column = self.scanner.index - self.scanner.lineStart
next = self.scanner.lex()
self.hasLineTerminator = token.lineNumber != next.lineNumber
if next and self.context.strict and next.type is Token.Identifier:
if self.scanner.isStrictModeReservedWord(next.value):
next.type = Token.Keyword
self.lookahead = next
if self.config.tokens and next.type is not Token.EOF:
self.tokens.append(self.convertToken(next))
return token
def nextRegexToken(self):
self.collectComments()
token = self.scanner.scanRegExp()
if self.config.tokens:
# Pop the previous token, '/' or '/='
# self is added from the lookahead token.
self.tokens.pop()
self.tokens.append(self.convertToken(token))
# Prime the next lookahead.
self.lookahead = token
self.nextToken()
return token
def createNode(self):
return Marker(
index=self.startMarker.index,
line=self.startMarker.line,
column=self.startMarker.column,
)
def startNode(self, token, lastLineStart=0):
column = token.start - token.lineStart
line = token.lineNumber
if column < 0:
column += lastLineStart
line -= 1
return Marker(
index=token.start,
line=line,
column=column,
)
def finalize(self, marker, node):
if self.config.range:
node.range = [marker.index, self.lastMarker.index]
if self.config.loc:
node.loc = SourceLocation(
start=Position(
line=marker.line,
column=marker.column,
),
end=Position(
line=self.lastMarker.line,
column=self.lastMarker.column,
),
)
if self.config.source:
node.loc.source = self.config.source
if self.delegate:
metadata = SourceLocation(
start=Position(
line=marker.line,
column=marker.column,
offset=marker.index,
),
end=Position(
line=self.lastMarker.line,
column=self.lastMarker.column,
offset=self.lastMarker.index,
)
)
new_node = self.delegate(node, metadata)
if new_node is not None:
node = new_node
return node
# Expect the next token to match the specified punctuator.
# If not, an exception will be thrown.
def expect(self, value):
token = self.nextToken()
if token.type is not Token.Punctuator or token.value != value:
self.throwUnexpectedToken(token)
# Quietly expect a comma when in tolerant mode, otherwise delegates to expect().
def expectCommaSeparator(self):
if self.config.tolerant:
token = self.lookahead
if token.type is Token.Punctuator and token.value == ',':
self.nextToken()
elif token.type is Token.Punctuator and token.value == ';':
self.nextToken()
self.tolerateUnexpectedToken(token)
else:
self.tolerateUnexpectedToken(token, Messages.UnexpectedToken)
else:
self.expect(',')
# Expect the next token to match the specified keyword.
# If not, an exception will be thrown.
def expectKeyword(self, keyword):
token = self.nextToken()
if token.type is not Token.Keyword or token.value != keyword:
self.throwUnexpectedToken(token)
# Return true if the next token matches the specified punctuator.
def match(self, *value):
return self.lookahead.type is Token.Punctuator and self.lookahead.value in value
# Return true if the next token matches the specified keyword
def matchKeyword(self, *keyword):
return self.lookahead.type is Token.Keyword and self.lookahead.value in keyword
# Return true if the next token matches the specified contextual keyword
# (where an identifier is sometimes a keyword depending on the context)
def matchContextualKeyword(self, *keyword):
return self.lookahead.type is Token.Identifier and self.lookahead.value in keyword
# Return true if the next token is an assignment operator
def matchAssign(self):
if self.lookahead.type is not Token.Punctuator:
return False
op = self.lookahead.value
return op in ('=', '*=', '**=', '/=', '%=', '+=', '-=', '<<=', '>>=', '>>>=', '&=', '^=', '|=')
# Cover grammar support.
#
# When an assignment expression position starts with an left parenthesis, the determination of the type
# of the syntax is to be deferred arbitrarily long until the end of the parentheses pair (plus a lookahead)
# or the first comma. This situation also defers the determination of all the expressions nested in the pair.
#
# There are three productions that can be parsed in a parentheses pair that needs to be determined
# after the outermost pair is closed. They are:
#
# 1. AssignmentExpression
# 2. BindingElements
# 3. AssignmentTargets
#
# In order to avoid exponential backtracking, we use two flags to denote if the production can be
# binding element or assignment target.
#
# The three productions have the relationship:
#
# BindingElements ⊆ AssignmentTargets ⊆ AssignmentExpression
#
# with a single exception that CoverInitializedName when used directly in an Expression, generates
# an early error. Therefore, we need the third state, firstCoverInitializedNameError, to track the
# first usage of CoverInitializedName and report it when we reached the end of the parentheses pair.
#
# isolateCoverGrammar function runs the given parser function with a new cover grammar context, and it does not
# effect the current flags. This means the production the parser parses is only used as an expression. Therefore
# the CoverInitializedName check is conducted.
#
# inheritCoverGrammar function runs the given parse function with a new cover grammar context, and it propagates
# the flags outside of the parser. This means the production the parser parses is used as a part of a potential
# pattern. The CoverInitializedName check is deferred.
def isolateCoverGrammar(self, parseFunction):
previousIsBindingElement = self.context.isBindingElement
previousIsAssignmentTarget = self.context.isAssignmentTarget
previousFirstCoverInitializedNameError = self.context.firstCoverInitializedNameError
self.context.isBindingElement = True
self.context.isAssignmentTarget = True
self.context.firstCoverInitializedNameError = None
result = parseFunction()
if self.context.firstCoverInitializedNameError is not None:
self.throwUnexpectedToken(self.context.firstCoverInitializedNameError)
self.context.isBindingElement = previousIsBindingElement
self.context.isAssignmentTarget = previousIsAssignmentTarget
self.context.firstCoverInitializedNameError = previousFirstCoverInitializedNameError
return result
def inheritCoverGrammar(self, parseFunction):
previousIsBindingElement = self.context.isBindingElement
previousIsAssignmentTarget = self.context.isAssignmentTarget
previousFirstCoverInitializedNameError = self.context.firstCoverInitializedNameError
self.context.isBindingElement = True
self.context.isAssignmentTarget = True
self.context.firstCoverInitializedNameError = None
result = parseFunction()
self.context.isBindingElement = self.context.isBindingElement and previousIsBindingElement
self.context.isAssignmentTarget = self.context.isAssignmentTarget and previousIsAssignmentTarget
self.context.firstCoverInitializedNameError = previousFirstCoverInitializedNameError or self.context.firstCoverInitializedNameError
return result
def consumeSemicolon(self):
if self.match(';'):
self.nextToken()
elif not self.hasLineTerminator:
if self.lookahead.type is not Token.EOF and not self.match('}'):
self.throwUnexpectedToken(self.lookahead)
self.lastMarker.index = self.startMarker.index
self.lastMarker.line = self.startMarker.line
self.lastMarker.column = self.startMarker.column
# https://tc39.github.io/ecma262/#sec-primary-expression
def parsePrimaryExpression(self):
node = self.createNode()
typ = self.lookahead.type
if typ is Token.Identifier:
if (self.context.isModule or self.context.await) and self.lookahead.value == 'await':
self.tolerateUnexpectedToken(self.lookahead)
expr = self.parseFunctionExpression() if self.matchAsyncFunction() else self.finalize(node, Node.Identifier(self.nextToken().value))
elif typ in (
Token.NumericLiteral,
Token.StringLiteral,
):
if self.context.strict and self.lookahead.octal:
self.tolerateUnexpectedToken(self.lookahead, Messages.StrictOctalLiteral)
self.context.isAssignmentTarget = False
self.context.isBindingElement = False
token = self.nextToken()
raw = self.getTokenRaw(token)
expr = self.finalize(node, Node.Literal(token.value, raw))
elif typ is Token.BooleanLiteral:
self.context.isAssignmentTarget = False
self.context.isBindingElement = False
token = self.nextToken()
raw = self.getTokenRaw(token)
expr = self.finalize(node, Node.Literal(token.value == 'true', raw))
elif typ is Token.NullLiteral:
self.context.isAssignmentTarget = False
self.context.isBindingElement = False
token = self.nextToken()
raw = self.getTokenRaw(token)
expr = self.finalize(node, Node.Literal(None, raw))
elif typ is Token.Template:
expr = self.parseTemplateLiteral()
elif typ is Token.Punctuator:
value = self.lookahead.value
if value == '(':
self.context.isBindingElement = False
expr = self.inheritCoverGrammar(self.parseGroupExpression)
elif value == '[':
expr = self.inheritCoverGrammar(self.parseArrayInitializer)
elif value == '{':
expr = self.inheritCoverGrammar(self.parseObjectInitializer)
elif value in ('/', '/='):
self.context.isAssignmentTarget = False
self.context.isBindingElement = False
self.scanner.index = self.startMarker.index
token = self.nextRegexToken()
raw = self.getTokenRaw(token)
expr = self.finalize(node, Node.RegexLiteral(token.regex, raw, token.pattern, token.flags))
else:
expr = self.throwUnexpectedToken(self.nextToken())
elif typ is Token.Keyword:
if not self.context.strict and self.context.allowYield and self.matchKeyword('yield'):
expr = self.parseIdentifierName()
elif not self.context.strict and self.matchKeyword('let'):
expr = self.finalize(node, Node.Identifier(self.nextToken().value))
else:
self.context.isAssignmentTarget = False
self.context.isBindingElement = False
if self.matchKeyword('function'):
expr = self.parseFunctionExpression()
elif self.matchKeyword('this'):
self.nextToken()
expr = self.finalize(node, Node.ThisExpression())
elif self.matchKeyword('class'):
expr = self.parseClassExpression()
elif self.matchImportCall():
expr = self.parseImportCall()
else:
expr = self.throwUnexpectedToken(self.nextToken())
else:
expr = self.throwUnexpectedToken(self.nextToken())
return expr
# https://tc39.github.io/ecma262/#sec-array-initializer
def parseSpreadElement(self):
node = self.createNode()
self.expect('...')
arg = self.inheritCoverGrammar(self.parseAssignmentExpression)
return self.finalize(node, Node.SpreadElement(arg))
def parseArrayInitializer(self):
node = self.createNode()
elements = []
self.expect('[')
while not self.match(']'):
if self.match(','):
self.nextToken()
elements.append(None)
elif self.match('...'):
element = self.parseSpreadElement()
if not self.match(']'):
self.context.isAssignmentTarget = False
self.context.isBindingElement = False
self.expect(',')
elements.append(element)
else:
elements.append(self.inheritCoverGrammar(self.parseAssignmentExpression))
if not self.match(']'):
self.expect(',')
self.expect(']')
return self.finalize(node, Node.ArrayExpression(elements))
# https://tc39.github.io/ecma262/#sec-object-initializer
def parsePropertyMethod(self, params):
self.context.isAssignmentTarget = False
self.context.isBindingElement = False
previousStrict = self.context.strict
previousAllowStrictDirective = self.context.allowStrictDirective
self.context.allowStrictDirective = params.simple
body = self.isolateCoverGrammar(self.parseFunctionSourceElements)
if self.context.strict and params.firstRestricted:
self.tolerateUnexpectedToken(params.firstRestricted, params.message)
if self.context.strict and params.stricted:
self.tolerateUnexpectedToken(params.stricted, params.message)
self.context.strict = previousStrict
self.context.allowStrictDirective = previousAllowStrictDirective
return body
def parsePropertyMethodFunction(self):
isGenerator = False
node = self.createNode()
previousAllowYield = self.context.allowYield
self.context.allowYield = True
params = self.parseFormalParameters()
method = self.parsePropertyMethod(params)
self.context.allowYield = previousAllowYield
return self.finalize(node, Node.FunctionExpression(None, params.params, method, isGenerator))
def parsePropertyMethodAsyncFunction(self):
node = self.createNode()
previousAllowYield = self.context.allowYield
previousAwait = self.context.await
self.context.allowYield = False
self.context.await = True
params = self.parseFormalParameters()
method = self.parsePropertyMethod(params)
self.context.allowYield = previousAllowYield
self.context.await = previousAwait
return self.finalize(node, Node.AsyncFunctionExpression(None, params.params, method))
def parseObjectPropertyKey(self):
node = self.createNode()
token = self.nextToken()
typ = token.type
if typ in (
Token.StringLiteral,
Token.NumericLiteral,
):
if self.context.strict and token.octal:
self.tolerateUnexpectedToken(token, Messages.StrictOctalLiteral)
raw = self.getTokenRaw(token)
key = self.finalize(node, Node.Literal(token.value, raw))
elif typ in (
Token.Identifier,
Token.BooleanLiteral,
Token.NullLiteral,
Token.Keyword,
):
key = self.finalize(node, Node.Identifier(token.value))
elif typ is Token.Punctuator:
if token.value == '[':
key = self.isolateCoverGrammar(self.parseAssignmentExpression)
self.expect(']')
else:
key = self.throwUnexpectedToken(token)
else:
key = self.throwUnexpectedToken(token)
return key
def isPropertyKey(self, key, value):
return (
(key.type is Syntax.Identifier and key.name == value) or
(key.type is Syntax.Literal and key.value == value)
)
def parseObjectProperty(self, hasProto):
node = self.createNode()
token = self.lookahead
key = None
value = None
computed = False
method = False
shorthand = False
isAsync = False
if token.type is Token.Identifier:
id = token.value
self.nextToken()
computed = self.match('[')
isAsync = not self.hasLineTerminator and (id == 'async') and not (self.match(':', '(', '*'))
key = self.parseObjectPropertyKey() if isAsync else self.finalize(node, Node.Identifier(id))
elif self.match('*'):
self.nextToken()
else:
computed = self.match('[')
key = self.parseObjectPropertyKey()
lookaheadPropertyKey = self.qualifiedPropertyName(self.lookahead)
if token.type is Token.Identifier and not isAsync and token.value == 'get' and lookaheadPropertyKey:
kind = 'get'
computed = self.match('[')
key = self.parseObjectPropertyKey()
self.context.allowYield = False
value = self.parseGetterMethod()
elif token.type is Token.Identifier and not isAsync and token.value == 'set' and lookaheadPropertyKey:
kind = 'set'
computed = self.match('[')
key = self.parseObjectPropertyKey()
value = self.parseSetterMethod()
elif token.type is Token.Punctuator and token.value == '*' and lookaheadPropertyKey:
kind = 'init'
computed = self.match('[')
key = self.parseObjectPropertyKey()
value = self.parseGeneratorMethod()
method = True
else:
if not key:
self.throwUnexpectedToken(self.lookahead)
kind = 'init'
if self.match(':') and not isAsync:
if not computed and self.isPropertyKey(key, '__proto__'):
if hasProto.value:
self.tolerateError(Messages.DuplicateProtoProperty)
hasProto.value = True
self.nextToken()
value = self.inheritCoverGrammar(self.parseAssignmentExpression)
elif self.match('('):
value = self.parsePropertyMethodAsyncFunction() if isAsync else self.parsePropertyMethodFunction()
method = True
elif token.type is Token.Identifier:
id = self.finalize(node, Node.Identifier(token.value))
if self.match('='):
self.context.firstCoverInitializedNameError = self.lookahead
self.nextToken()
shorthand = True
init = self.isolateCoverGrammar(self.parseAssignmentExpression)
value = self.finalize(node, Node.AssignmentPattern(id, init))
else:
shorthand = True
value = id
else:
self.throwUnexpectedToken(self.nextToken())
return self.finalize(node, Node.Property(kind, key, computed, value, method, shorthand))
def parseObjectInitializer(self):
node = self.createNode()
self.expect('{')
properties = []
hasProto = Value(False)
while not self.match('}'):
properties.append(self.parseSpreadElement() if self.match('...') else self.parseObjectProperty(hasProto))
if not self.match('}'):
self.expectCommaSeparator()
self.expect('}')
return self.finalize(node, Node.ObjectExpression(properties))
# https://tc39.github.io/ecma262/#sec-template-literals
def parseTemplateHead(self):
assert self.lookahead.head, 'Template literal must start with a template head'
node = self.createNode()
token = self.nextToken()
raw = token.value
cooked = token.cooked
return self.finalize(node, Node.TemplateElement(raw, cooked, token.tail))
def parseTemplateElement(self):
if self.lookahead.type is not Token.Template:
self.throwUnexpectedToken()
node = self.createNode()
token = self.nextToken()
raw = token.value
cooked = token.cooked
return self.finalize(node, Node.TemplateElement(raw, cooked, token.tail))
def parseTemplateLiteral(self):
node = self.createNode()
expressions = []
quasis = []
quasi = self.parseTemplateHead()
quasis.append(quasi)
while not quasi.tail:
expressions.append(self.parseExpression())
quasi = self.parseTemplateElement()
quasis.append(quasi)
return self.finalize(node, Node.TemplateLiteral(quasis, expressions))
# https://tc39.github.io/ecma262/#sec-grouping-operator
def reinterpretExpressionAsPattern(self, expr):
typ = expr.type
if typ in (
Syntax.Identifier,
Syntax.MemberExpression,
Syntax.RestElement,
Syntax.AssignmentPattern,
):
pass
elif typ is Syntax.SpreadElement:
expr.type = Syntax.RestElement
self.reinterpretExpressionAsPattern(expr.argument)
elif typ is Syntax.ArrayExpression:
expr.type = Syntax.ArrayPattern
for elem in expr.elements:
if elem is not None:
self.reinterpretExpressionAsPattern(elem)
elif typ is Syntax.ObjectExpression:
expr.type = Syntax.ObjectPattern
for prop in expr.properties:
self.reinterpretExpressionAsPattern(prop if prop.type is Syntax.SpreadElement else prop.value)
elif typ is Syntax.AssignmentExpression:
expr.type = Syntax.AssignmentPattern
del expr.operator
self.reinterpretExpressionAsPattern(expr.left)
else:
# Allow other node type for tolerant parsing.
pass
def parseGroupExpression(self):
self.expect('(')
if self.match(')'):
self.nextToken()
if not self.match('=>'):
self.expect('=>')
expr = Node.ArrowParameterPlaceHolder([])
else:
startToken = self.lookahead
params = []
if self.match('...'):
expr = self.parseRestElement(params)
self.expect(')')
if not self.match('=>'):
self.expect('=>')
expr = Node.ArrowParameterPlaceHolder([expr])
else:
arrow = False
self.context.isBindingElement = True
expr = self.inheritCoverGrammar(self.parseAssignmentExpression)
if self.match(','):
expressions = []
self.context.isAssignmentTarget = False
expressions.append(expr)
while self.lookahead.type is not Token.EOF:
if not self.match(','):
break
self.nextToken()
if self.match(')'):
self.nextToken()
for expression in expressions:
self.reinterpretExpressionAsPattern(expression)
arrow = True
expr = Node.ArrowParameterPlaceHolder(expressions)
elif self.match('...'):
if not self.context.isBindingElement:
self.throwUnexpectedToken(self.lookahead)
expressions.append(self.parseRestElement(params))
self.expect(')')
if not self.match('=>'):
self.expect('=>')
self.context.isBindingElement = False
for expression in expressions:
self.reinterpretExpressionAsPattern(expression)
arrow = True
expr = Node.ArrowParameterPlaceHolder(expressions)
else:
expressions.append(self.inheritCoverGrammar(self.parseAssignmentExpression))
if arrow:
break
if not arrow:
expr = self.finalize(self.startNode(startToken), Node.SequenceExpression(expressions))
if not arrow:
self.expect(')')
if self.match('=>'):
if expr.type is Syntax.Identifier and expr.name == 'yield':
arrow = True
expr = Node.ArrowParameterPlaceHolder([expr])
if not arrow:
if not self.context.isBindingElement:
self.throwUnexpectedToken(self.lookahead)
if expr.type is Syntax.SequenceExpression:
for expression in expr.expressions:
self.reinterpretExpressionAsPattern(expression)
else:
self.reinterpretExpressionAsPattern(expr)
if expr.type is Syntax.SequenceExpression:
parameters = expr.expressions
else:
parameters = [expr]
expr = Node.ArrowParameterPlaceHolder(parameters)
self.context.isBindingElement = False
return expr
# https://tc39.github.io/ecma262/#sec-left-hand-side-expressions
def parseArguments(self):
self.expect('(')
args = []
if not self.match(')'):
while True:
if self.match('...'):
expr = self.parseSpreadElement()
else:
expr = self.isolateCoverGrammar(self.parseAssignmentExpression)
args.append(expr)
if self.match(')'):
break
self.expectCommaSeparator()
if self.match(')'):
break
self.expect(')')
return args
def isIdentifierName(self, token):
return (
token.type is Token.Identifier or
token.type is Token.Keyword or
token.type is Token.BooleanLiteral or
token.type is Token.NullLiteral
)
def parseIdentifierName(self):
node = self.createNode()
token = self.nextToken()
if not self.isIdentifierName(token):
self.throwUnexpectedToken(token)
return self.finalize(node, Node.Identifier(token.value))
def parseNewExpression(self):
node = self.createNode()
id = self.parseIdentifierName()
assert id.name == 'new', 'New expression must start with `new`'
if self.match('.'):
self.nextToken()
if self.lookahead.type is Token.Identifier and self.context.inFunctionBody and self.lookahead.value == 'target':
property = self.parseIdentifierName()
expr = Node.MetaProperty(id, property)
else:
self.throwUnexpectedToken(self.lookahead)
elif self.matchKeyword('import'):
self.throwUnexpectedToken(self.lookahead)
else:
callee = self.isolateCoverGrammar(self.parseLeftHandSideExpression)
args = self.parseArguments() if self.match('(') else []
expr = Node.NewExpression(callee, args)
self.context.isAssignmentTarget = False
self.context.isBindingElement = False
return self.finalize(node, expr)
def parseAsyncArgument(self):
arg = self.parseAssignmentExpression()
self.context.firstCoverInitializedNameError = None
return arg
def parseAsyncArguments(self):
self.expect('(')
args = []
if not self.match(')'):
while True:
if self.match('...'):
expr = self.parseSpreadElement()
else:
expr = self.isolateCoverGrammar(self.parseAsyncArgument)
args.append(expr)
if self.match(')'):
break
self.expectCommaSeparator()
if self.match(')'):
break
self.expect(')')
return args
def matchImportCall(self):
match = self.matchKeyword('import')
if match:
state = self.scanner.saveState()
self.scanner.scanComments()
next = self.scanner.lex()
self.scanner.restoreState(state)
match = (next.type is Token.Punctuator) and (next.value == '(')
return match
def parseImportCall(self):
node = self.createNode()
self.expectKeyword('import')
return self.finalize(node, Node.Import())
def parseLeftHandSideExpressionAllowCall(self):
startToken = self.lookahead
maybeAsync = self.matchContextualKeyword('async')
previousAllowIn = self.context.allowIn
self.context.allowIn = True
if self.matchKeyword('super') and self.context.inFunctionBody:
expr = self.createNode()
self.nextToken()
expr = self.finalize(expr, Node.Super())
if not self.match('(') and not self.match('.') and not self.match('['):
self.throwUnexpectedToken(self.lookahead)
else:
expr = self.inheritCoverGrammar(self.parseNewExpression if self.matchKeyword('new') else self.parsePrimaryExpression)
while True:
if self.match('.'):
self.context.isBindingElement = False
self.context.isAssignmentTarget = True
self.expect('.')
property = self.parseIdentifierName()
expr = self.finalize(self.startNode(startToken), Node.StaticMemberExpression(expr, property))
elif self.match('('):
asyncArrow = maybeAsync and (startToken.lineNumber == self.lookahead.lineNumber)
self.context.isBindingElement = False
self.context.isAssignmentTarget = False
if asyncArrow:
args = self.parseAsyncArguments()
else:
args = self.parseArguments()
if expr.type is Syntax.Import and len(args) != 1:
self.tolerateError(Messages.BadImportCallArity)
expr = self.finalize(self.startNode(startToken), Node.CallExpression(expr, args))
if asyncArrow and self.match('=>'):
for arg in args:
self.reinterpretExpressionAsPattern(arg)
expr = Node.AsyncArrowParameterPlaceHolder(args)
elif self.match('['):
self.context.isBindingElement = False
self.context.isAssignmentTarget = True
self.expect('[')
property = self.isolateCoverGrammar(self.parseExpression)
self.expect(']')
expr = self.finalize(self.startNode(startToken), Node.ComputedMemberExpression(expr, property))
elif self.lookahead.type is Token.Template and self.lookahead.head:
quasi = self.parseTemplateLiteral()
expr = self.finalize(self.startNode(startToken), Node.TaggedTemplateExpression(expr, quasi))
else:
break
self.context.allowIn = previousAllowIn
return expr
def parseSuper(self):
node = self.createNode()
self.expectKeyword('super')
if not self.match('[') and not self.match('.'):
self.throwUnexpectedToken(self.lookahead)
return self.finalize(node, Node.Super())
def parseLeftHandSideExpression(self):
assert self.context.allowIn, 'callee of new expression always allow in keyword.'
node = self.startNode(self.lookahead)
if self.matchKeyword('super') and self.context.inFunctionBody:
expr = self.parseSuper()
else:
expr = self.inheritCoverGrammar(self.parseNewExpression if self.matchKeyword('new') else self.parsePrimaryExpression)
while True:
if self.match('['):
self.context.isBindingElement = False
self.context.isAssignmentTarget = True
self.expect('[')
property = self.isolateCoverGrammar(self.parseExpression)
self.expect(']')
expr = self.finalize(node, Node.ComputedMemberExpression(expr, property))
elif self.match('.'):
self.context.isBindingElement = False
self.context.isAssignmentTarget = True
self.expect('.')
property = self.parseIdentifierName()
expr = self.finalize(node, Node.StaticMemberExpression(expr, property))
elif self.lookahead.type is Token.Template and self.lookahead.head:
quasi = self.parseTemplateLiteral()
expr = self.finalize(node, Node.TaggedTemplateExpression(expr, quasi))
else:
break
return expr
# https://tc39.github.io/ecma262/#sec-update-expressions
def parseUpdateExpression(self):
startToken = self.lookahead
if self.match('++', '--'):
node = self.startNode(startToken)
token = self.nextToken()
expr = self.inheritCoverGrammar(self.parseUnaryExpression)
if self.context.strict and expr.type is Syntax.Identifier and self.scanner.isRestrictedWord(expr.name):
self.tolerateError(Messages.StrictLHSPrefix)
if not self.context.isAssignmentTarget:
self.tolerateError(Messages.InvalidLHSInAssignment)
prefix = True
expr = self.finalize(node, Node.UpdateExpression(token.value, expr, prefix))
self.context.isAssignmentTarget = False
self.context.isBindingElement = False
else:
expr = self.inheritCoverGrammar(self.parseLeftHandSideExpressionAllowCall)
if not self.hasLineTerminator and self.lookahead.type is Token.Punctuator:
if self.match('++', '--'):
if self.context.strict and expr.type is Syntax.Identifier and self.scanner.isRestrictedWord(expr.name):
self.tolerateError(Messages.StrictLHSPostfix)
if not self.context.isAssignmentTarget:
self.tolerateError(Messages.InvalidLHSInAssignment)
self.context.isAssignmentTarget = False
self.context.isBindingElement = False
operator = self.nextToken().value
prefix = False
expr = self.finalize(self.startNode(startToken), Node.UpdateExpression(operator, expr, prefix))
return expr
# https://tc39.github.io/ecma262/#sec-unary-operators
def parseAwaitExpression(self):
node = self.createNode()
self.nextToken()
argument = self.parseUnaryExpression()
return self.finalize(node, Node.AwaitExpression(argument))
def parseUnaryExpression(self):
if (
self.match('+', '-', '~', '!') or
self.matchKeyword('delete', 'void', 'typeof')
):
node = self.startNode(self.lookahead)
token = self.nextToken()
expr = self.inheritCoverGrammar(self.parseUnaryExpression)
expr = self.finalize(node, Node.UnaryExpression(token.value, expr))
if self.context.strict and expr.operator == 'delete' and expr.argument.type is Syntax.Identifier:
self.tolerateError(Messages.StrictDelete)
self.context.isAssignmentTarget = False
self.context.isBindingElement = False
elif self.context.await and self.matchContextualKeyword('await'):
expr = self.parseAwaitExpression()
else:
expr = self.parseUpdateExpression()
return expr
def parseExponentiationExpression(self):
startToken = self.lookahead
expr = self.inheritCoverGrammar(self.parseUnaryExpression)
if expr.type is not Syntax.UnaryExpression and self.match('**'):
self.nextToken()
self.context.isAssignmentTarget = False
self.context.isBindingElement = False
left = expr
right = self.isolateCoverGrammar(self.parseExponentiationExpression)
expr = self.finalize(self.startNode(startToken), Node.BinaryExpression('**', left, right))
return expr
# https://tc39.github.io/ecma262/#sec-exp-operator
# https://tc39.github.io/ecma262/#sec-multiplicative-operators
# https://tc39.github.io/ecma262/#sec-additive-operators
# https://tc39.github.io/ecma262/#sec-bitwise-shift-operators
# https://tc39.github.io/ecma262/#sec-relational-operators
# https://tc39.github.io/ecma262/#sec-equality-operators
# https://tc39.github.io/ecma262/#sec-binary-bitwise-operators
# https://tc39.github.io/ecma262/#sec-binary-logical-operators
def binaryPrecedence(self, token):
op = token.value
if token.type is Token.Punctuator:
precedence = self.operatorPrecedence.get(op, 0)
elif token.type is Token.Keyword:
precedence = 7 if (op == 'instanceof' or (self.context.allowIn and op == 'in')) else 0
else:
precedence = 0
return precedence
def parseBinaryExpression(self):
startToken = self.lookahead
expr = self.inheritCoverGrammar(self.parseExponentiationExpression)
token = self.lookahead
prec = self.binaryPrecedence(token)
if prec > 0:
self.nextToken()
self.context.isAssignmentTarget = False
self.context.isBindingElement = False
markers = [startToken, self.lookahead]
left = expr
right = self.isolateCoverGrammar(self.parseExponentiationExpression)
stack = [left, token.value, right]
precedences = [prec]
while True:
prec = self.binaryPrecedence(self.lookahead)
if prec <= 0:
break
# Reduce: make a binary expression from the three topmost entries.
while len(stack) > 2 and prec <= precedences[-1]:
right = stack.pop()
operator = stack.pop()
precedences.pop()
left = stack.pop()
markers.pop()
node = self.startNode(markers[-1])
stack.append(self.finalize(node, Node.BinaryExpression(operator, left, right)))
# Shift.
stack.append(self.nextToken().value)
precedences.append(prec)
markers.append(self.lookahead)
stack.append(self.isolateCoverGrammar(self.parseExponentiationExpression))
# Final reduce to clean-up the stack.
i = len(stack) - 1
expr = stack[i]
lastMarker = markers.pop()
while i > 1:
marker = markers.pop()
lastLineStart = lastMarker.lineStart if lastMarker else 0
node = self.startNode(marker, lastLineStart)
operator = stack[i - 1]
expr = self.finalize(node, Node.BinaryExpression(operator, stack[i - 2], expr))
i -= 2
lastMarker = marker
return expr
# https://tc39.github.io/ecma262/#sec-conditional-operator
def parseConditionalExpression(self):
startToken = self.lookahead
expr = self.inheritCoverGrammar(self.parseBinaryExpression)
if self.match('?'):
self.nextToken()
previousAllowIn = self.context.allowIn
self.context.allowIn = True
consequent = self.isolateCoverGrammar(self.parseAssignmentExpression)
self.context.allowIn = previousAllowIn
self.expect(':')
alternate = self.isolateCoverGrammar(self.parseAssignmentExpression)
expr = self.finalize(self.startNode(startToken), Node.ConditionalExpression(expr, consequent, alternate))
self.context.isAssignmentTarget = False
self.context.isBindingElement = False
return expr
# https://tc39.github.io/ecma262/#sec-assignment-operators
def checkPatternParam(self, options, param):
typ = param.type
if typ is Syntax.Identifier:
self.validateParam(options, param, param.name)
elif typ is Syntax.RestElement:
self.checkPatternParam(options, param.argument)
elif typ is Syntax.AssignmentPattern:
self.checkPatternParam(options, param.left)
elif typ is Syntax.ArrayPattern:
for element in param.elements:
if element is not None:
self.checkPatternParam(options, element)
elif typ is Syntax.ObjectPattern:
for prop in param.properties:
self.checkPatternParam(options, prop if prop.type is Syntax.RestElement else prop.value)
options.simple = options.simple and isinstance(param, Node.Identifier)
def reinterpretAsCoverFormalsList(self, expr):
params = [expr]
asyncArrow = False
typ = expr.type
if typ is Syntax.Identifier:
pass
elif typ is Syntax.ArrowParameterPlaceHolder:
params = expr.params
asyncArrow = expr.async
else:
return None
options = Params(
simple=True,
paramSet={},
)
for param in params:
if param.type is Syntax.AssignmentPattern:
if param.right.type is Syntax.YieldExpression:
if param.right.argument:
self.throwUnexpectedToken(self.lookahead)
param.right.type = Syntax.Identifier
param.right.name = 'yield'
del param.right.argument
del param.right.delegate
elif asyncArrow and param.type is Syntax.Identifier and param.name == 'await':
self.throwUnexpectedToken(self.lookahead)
self.checkPatternParam(options, param)
if self.context.strict or not self.context.allowYield:
for param in params:
if param.type is Syntax.YieldExpression:
self.throwUnexpectedToken(self.lookahead)
if options.message is Messages.StrictParamDupe:
token = options.stricted if self.context.strict else options.firstRestricted
self.throwUnexpectedToken(token, options.message)
return Params(
simple=options.simple,
params=params,
stricted=options.stricted,
firstRestricted=options.firstRestricted,
message=options.message
)
def parseAssignmentExpression(self):
if not self.context.allowYield and self.matchKeyword('yield'):
expr = self.parseYieldExpression()
else:
startToken = self.lookahead
token = startToken
expr = self.parseConditionalExpression()
if token.type is Token.Identifier and (token.lineNumber == self.lookahead.lineNumber) and token.value == 'async':
if self.lookahead.type is Token.Identifier or self.matchKeyword('yield'):
arg = self.parsePrimaryExpression()
self.reinterpretExpressionAsPattern(arg)
expr = Node.AsyncArrowParameterPlaceHolder([arg])
if expr.type is Syntax.ArrowParameterPlaceHolder or self.match('=>'):
# https://tc39.github.io/ecma262/#sec-arrow-function-definitions
self.context.isAssignmentTarget = False
self.context.isBindingElement = False
isAsync = expr.async
list = self.reinterpretAsCoverFormalsList(expr)
if list:
if self.hasLineTerminator:
self.tolerateUnexpectedToken(self.lookahead)
self.context.firstCoverInitializedNameError = None
previousStrict = self.context.strict
previousAllowStrictDirective = self.context.allowStrictDirective
self.context.allowStrictDirective = list.simple
previousAllowYield = self.context.allowYield
previousAwait = self.context.await
self.context.allowYield = True
self.context.await = isAsync
node = self.startNode(startToken)
self.expect('=>')
if self.match('{'):
previousAllowIn = self.context.allowIn
self.context.allowIn = True
body = self.parseFunctionSourceElements()
self.context.allowIn = previousAllowIn
else:
body = self.isolateCoverGrammar(self.parseAssignmentExpression)
expression = body.type is not Syntax.BlockStatement
if self.context.strict and list.firstRestricted:
self.throwUnexpectedToken(list.firstRestricted, list.message)
if self.context.strict and list.stricted:
self.tolerateUnexpectedToken(list.stricted, list.message)
if isAsync:
expr = self.finalize(node, Node.AsyncArrowFunctionExpression(list.params, body, expression))
else:
expr = self.finalize(node, Node.ArrowFunctionExpression(list.params, body, expression))
self.context.strict = previousStrict
self.context.allowStrictDirective = previousAllowStrictDirective
self.context.allowYield = previousAllowYield
self.context.await = previousAwait
else:
if self.matchAssign():
if not self.context.isAssignmentTarget:
self.tolerateError(Messages.InvalidLHSInAssignment)
if self.context.strict and expr.type is Syntax.Identifier:
id = expr
if self.scanner.isRestrictedWord(id.name):
self.tolerateUnexpectedToken(token, Messages.StrictLHSAssignment)
if self.scanner.isStrictModeReservedWord(id.name):
self.tolerateUnexpectedToken(token, Messages.StrictReservedWord)
if not self.match('='):
self.context.isAssignmentTarget = False
self.context.isBindingElement = False
else:
self.reinterpretExpressionAsPattern(expr)
token = self.nextToken()
operator = token.value
right = self.isolateCoverGrammar(self.parseAssignmentExpression)
expr = self.finalize(self.startNode(startToken), Node.AssignmentExpression(operator, expr, right))
self.context.firstCoverInitializedNameError = None
return expr
# https://tc39.github.io/ecma262/#sec-comma-operator
def parseExpression(self):
startToken = self.lookahead
expr = self.isolateCoverGrammar(self.parseAssignmentExpression)
if self.match(','):
expressions = []
expressions.append(expr)
while self.lookahead.type is not Token.EOF:
if not self.match(','):
break
self.nextToken()
expressions.append(self.isolateCoverGrammar(self.parseAssignmentExpression))
expr = self.finalize(self.startNode(startToken), Node.SequenceExpression(expressions))
return expr
# https://tc39.github.io/ecma262/#sec-block
def parseStatementListItem(self):
self.context.isAssignmentTarget = True
self.context.isBindingElement = True
if self.lookahead.type is Token.Keyword:
value = self.lookahead.value
if value == 'export':
if not self.context.isModule:
self.tolerateUnexpectedToken(self.lookahead, Messages.IllegalExportDeclaration)
statement = self.parseExportDeclaration()
elif value == 'import':
if self.matchImportCall():
statement = self.parseExpressionStatement()
else:
if not self.context.isModule:
self.tolerateUnexpectedToken(self.lookahead, Messages.IllegalImportDeclaration)
statement = self.parseImportDeclaration()
elif value == 'const':
statement = self.parseLexicalDeclaration(Params(inFor=False))
elif value == 'function':
statement = self.parseFunctionDeclaration()
elif value == 'class':
statement = self.parseClassDeclaration()
elif value == 'let':
statement = self.parseLexicalDeclaration(Params(inFor=False)) if self.isLexicalDeclaration() else self.parseStatement()
else:
statement = self.parseStatement()
else:
statement = self.parseStatement()
return statement
def parseBlock(self):
node = self.createNode()
self.expect('{')
block = []
while True:
if self.match('}'):
break
block.append(self.parseStatementListItem())
self.expect('}')
return self.finalize(node, Node.BlockStatement(block))
# https://tc39.github.io/ecma262/#sec-let-and-const-declarations
def parseLexicalBinding(self, kind, options):
node = self.createNode()
params = []
id = self.parsePattern(params, kind)
if self.context.strict and id.type is Syntax.Identifier:
if self.scanner.isRestrictedWord(id.name):
self.tolerateError(Messages.StrictVarName)
init = None
if kind == 'const':
if not self.matchKeyword('in') and not self.matchContextualKeyword('of'):
if self.match('='):
self.nextToken()
init = self.isolateCoverGrammar(self.parseAssignmentExpression)
else:
self.throwError(Messages.DeclarationMissingInitializer, 'const')
elif (not options.inFor and id.type is not Syntax.Identifier) or self.match('='):
self.expect('=')
init = self.isolateCoverGrammar(self.parseAssignmentExpression)
return self.finalize(node, Node.VariableDeclarator(id, init))
def parseBindingList(self, kind, options):
lst = [self.parseLexicalBinding(kind, options)]
while self.match(','):
self.nextToken()
lst.append(self.parseLexicalBinding(kind, options))
return lst
def isLexicalDeclaration(self):
state = self.scanner.saveState()
self.scanner.scanComments()
next = self.scanner.lex()
self.scanner.restoreState(state)
return (
(next.type is Token.Identifier) or
(next.type is Token.Punctuator and next.value == '[') or
(next.type is Token.Punctuator and next.value == '{') or
(next.type is Token.Keyword and next.value == 'let') or
(next.type is Token.Keyword and next.value == 'yield')
)
def parseLexicalDeclaration(self, options):
node = self.createNode()
kind = self.nextToken().value
assert kind == 'let' or kind == 'const', 'Lexical declaration must be either or const'
declarations = self.parseBindingList(kind, options)
self.consumeSemicolon()
return self.finalize(node, Node.VariableDeclaration(declarations, kind))
# https://tc39.github.io/ecma262/#sec-destructuring-binding-patterns
def parseBindingRestElement(self, params, kind=None):
node = self.createNode()
self.expect('...')
arg = self.parsePattern(params, kind)
return self.finalize(node, Node.RestElement(arg))
def parseArrayPattern(self, params, kind=None):
node = self.createNode()
self.expect('[')
elements = []
while not self.match(']'):
if self.match(','):
self.nextToken()
elements.append(None)
else:
if self.match('...'):
elements.append(self.parseBindingRestElement(params, kind))
break
else:
elements.append(self.parsePatternWithDefault(params, kind))
if not self.match(']'):
self.expect(',')
self.expect(']')
return self.finalize(node, Node.ArrayPattern(elements))
def parsePropertyPattern(self, params, kind=None):
node = self.createNode()
computed = False
shorthand = False
method = False
key = None
if self.lookahead.type is Token.Identifier:
keyToken = self.lookahead
key = self.parseVariableIdentifier()
init = self.finalize(node, Node.Identifier(keyToken.value))
if self.match('='):
params.append(keyToken)
shorthand = True
self.nextToken()
expr = self.parseAssignmentExpression()
value = self.finalize(self.startNode(keyToken), Node.AssignmentPattern(init, expr))
elif not self.match(':'):
params.append(keyToken)
shorthand = True
value = init
else:
self.expect(':')
value = self.parsePatternWithDefault(params, kind)
else:
computed = self.match('[')
key = self.parseObjectPropertyKey()
self.expect(':')
value = self.parsePatternWithDefault(params, kind)
return self.finalize(node, Node.Property('init', key, computed, value, method, shorthand))
def parseRestProperty(self, params, kind):
node = self.createNode()
self.expect('...')
arg = self.parsePattern(params)
if self.match('='):
self.throwError(Messages.DefaultRestProperty)
if not self.match('}'):
self.throwError(Messages.PropertyAfterRestProperty)
return self.finalize(node, Node.RestElement(arg))
def parseObjectPattern(self, params, kind=None):
node = self.createNode()
properties = []
self.expect('{')
while not self.match('}'):
properties.append(self.parseRestProperty(params, kind) if self.match('...') else self.parsePropertyPattern(params, kind))
if not self.match('}'):
self.expect(',')
self.expect('}')
return self.finalize(node, Node.ObjectPattern(properties))
def parsePattern(self, params, kind=None):
if self.match('['):
pattern = self.parseArrayPattern(params, kind)
elif self.match('{'):
pattern = self.parseObjectPattern(params, kind)
else:
if self.matchKeyword('let') and (kind in ('const', 'let')):
self.tolerateUnexpectedToken(self.lookahead, Messages.LetInLexicalBinding)
params.append(self.lookahead)
pattern = self.parseVariableIdentifier(kind)
return pattern
def parsePatternWithDefault(self, params, kind=None):
startToken = self.lookahead
pattern = self.parsePattern(params, kind)
if self.match('='):
self.nextToken()
previousAllowYield = self.context.allowYield
self.context.allowYield = True
right = self.isolateCoverGrammar(self.parseAssignmentExpression)
self.context.allowYield = previousAllowYield
pattern = self.finalize(self.startNode(startToken), Node.AssignmentPattern(pattern, right))
return pattern
# https://tc39.github.io/ecma262/#sec-variable-statement
def parseVariableIdentifier(self, kind=None):
node = self.createNode()
token = self.nextToken()
if token.type is Token.Keyword and token.value == 'yield':
if self.context.strict:
self.tolerateUnexpectedToken(token, Messages.StrictReservedWord)
elif not self.context.allowYield:
self.throwUnexpectedToken(token)
elif token.type is not Token.Identifier:
if self.context.strict and token.type is Token.Keyword and self.scanner.isStrictModeReservedWord(token.value):
self.tolerateUnexpectedToken(token, Messages.StrictReservedWord)
else:
if self.context.strict or token.value != 'let' or kind != 'var':
self.throwUnexpectedToken(token)
elif (self.context.isModule or self.context.await) and token.type is Token.Identifier and token.value == 'await':
self.tolerateUnexpectedToken(token)
return self.finalize(node, Node.Identifier(token.value))
def parseVariableDeclaration(self, options):
node = self.createNode()
params = []
id = self.parsePattern(params, 'var')
if self.context.strict and id.type is Syntax.Identifier:
if self.scanner.isRestrictedWord(id.name):
self.tolerateError(Messages.StrictVarName)
init = None
if self.match('='):
self.nextToken()
init = self.isolateCoverGrammar(self.parseAssignmentExpression)
elif id.type is not Syntax.Identifier and not options.inFor:
self.expect('=')
return self.finalize(node, Node.VariableDeclarator(id, init))
def parseVariableDeclarationList(self, options):
opt = Params(inFor=options.inFor)
lst = []
lst.append(self.parseVariableDeclaration(opt))
while self.match(','):
self.nextToken()
lst.append(self.parseVariableDeclaration(opt))
return lst
def parseVariableStatement(self):
node = self.createNode()
self.expectKeyword('var')
declarations = self.parseVariableDeclarationList(Params(inFor=False))
self.consumeSemicolon()
return self.finalize(node, Node.VariableDeclaration(declarations, 'var'))
# https://tc39.github.io/ecma262/#sec-empty-statement
def parseEmptyStatement(self):
node = self.createNode()
self.expect(';')
return self.finalize(node, Node.EmptyStatement())
# https://tc39.github.io/ecma262/#sec-expression-statement
def parseExpressionStatement(self):
node = self.createNode()
expr = self.parseExpression()
self.consumeSemicolon()
return self.finalize(node, Node.ExpressionStatement(expr))
# https://tc39.github.io/ecma262/#sec-if-statement
def parseIfClause(self):
if self.context.strict and self.matchKeyword('function'):
self.tolerateError(Messages.StrictFunction)
return self.parseStatement()
def parseIfStatement(self):
node = self.createNode()
alternate = None
self.expectKeyword('if')
self.expect('(')
test = self.parseExpression()
if not self.match(')') and self.config.tolerant:
self.tolerateUnexpectedToken(self.nextToken())
consequent = self.finalize(self.createNode(), Node.EmptyStatement())
else:
self.expect(')')
consequent = self.parseIfClause()
if self.matchKeyword('else'):
self.nextToken()
alternate = self.parseIfClause()
return self.finalize(node, Node.IfStatement(test, consequent, alternate))
# https://tc39.github.io/ecma262/#sec-do-while-statement
def parseDoWhileStatement(self):
node = self.createNode()
self.expectKeyword('do')
previousInIteration = self.context.inIteration
self.context.inIteration = True
body = self.parseStatement()
self.context.inIteration = previousInIteration
self.expectKeyword('while')
self.expect('(')
test = self.parseExpression()
if not self.match(')') and self.config.tolerant:
self.tolerateUnexpectedToken(self.nextToken())
else:
self.expect(')')
if self.match(';'):
self.nextToken()
return self.finalize(node, Node.DoWhileStatement(body, test))
# https://tc39.github.io/ecma262/#sec-while-statement
def parseWhileStatement(self):
node = self.createNode()
self.expectKeyword('while')
self.expect('(')
test = self.parseExpression()
if not self.match(')') and self.config.tolerant:
self.tolerateUnexpectedToken(self.nextToken())
body = self.finalize(self.createNode(), Node.EmptyStatement())
else:
self.expect(')')
previousInIteration = self.context.inIteration
self.context.inIteration = True
body = self.parseStatement()
self.context.inIteration = previousInIteration
return self.finalize(node, Node.WhileStatement(test, body))
# https://tc39.github.io/ecma262/#sec-for-statement
# https://tc39.github.io/ecma262/#sec-for-in-and-for-of-statements
def parseForStatement(self):
init = None
test = None
update = None
forIn = True
left = None
right = None
node = self.createNode()
self.expectKeyword('for')
self.expect('(')
if self.match(';'):
self.nextToken()
else:
if self.matchKeyword('var'):
init = self.createNode()
self.nextToken()
previousAllowIn = self.context.allowIn
self.context.allowIn = False
declarations = self.parseVariableDeclarationList(Params(inFor=True))
self.context.allowIn = previousAllowIn
if len(declarations) == 1 and self.matchKeyword('in'):
decl = declarations[0]
if decl.init and (decl.id.type is Syntax.ArrayPattern or decl.id.type is Syntax.ObjectPattern or self.context.strict):
self.tolerateError(Messages.ForInOfLoopInitializer, 'for-in')
init = self.finalize(init, Node.VariableDeclaration(declarations, 'var'))
self.nextToken()
left = init
right = self.parseExpression()
init = None
elif len(declarations) == 1 and declarations[0].init is None and self.matchContextualKeyword('of'):
init = self.finalize(init, Node.VariableDeclaration(declarations, 'var'))
self.nextToken()
left = init
right = self.parseAssignmentExpression()
init = None
forIn = False
else:
init = self.finalize(init, Node.VariableDeclaration(declarations, 'var'))
self.expect(';')
elif self.matchKeyword('const', 'let'):
init = self.createNode()
kind = self.nextToken().value
if not self.context.strict and self.lookahead.value == 'in':
init = self.finalize(init, Node.Identifier(kind))
self.nextToken()
left = init
right = self.parseExpression()
init = None
else:
previousAllowIn = self.context.allowIn
self.context.allowIn = False
declarations = self.parseBindingList(kind, Params(inFor=True))
self.context.allowIn = previousAllowIn
if len(declarations) == 1 and declarations[0].init is None and self.matchKeyword('in'):
init = self.finalize(init, Node.VariableDeclaration(declarations, kind))
self.nextToken()
left = init
right = self.parseExpression()
init = None
elif len(declarations) == 1 and declarations[0].init is None and self.matchContextualKeyword('of'):
init = self.finalize(init, Node.VariableDeclaration(declarations, kind))
self.nextToken()
left = init
right = self.parseAssignmentExpression()
init = None
forIn = False
else:
self.consumeSemicolon()
init = self.finalize(init, Node.VariableDeclaration(declarations, kind))
else:
initStartToken = self.lookahead
previousAllowIn = self.context.allowIn
self.context.allowIn = False
init = self.inheritCoverGrammar(self.parseAssignmentExpression)
self.context.allowIn = previousAllowIn
if self.matchKeyword('in'):
if not self.context.isAssignmentTarget or init.type is Syntax.AssignmentExpression:
self.tolerateError(Messages.InvalidLHSInForIn)
self.nextToken()
self.reinterpretExpressionAsPattern(init)
left = init
right = self.parseExpression()
init = None
elif self.matchContextualKeyword('of'):
if not self.context.isAssignmentTarget or init.type is Syntax.AssignmentExpression:
self.tolerateError(Messages.InvalidLHSInForLoop)
self.nextToken()
self.reinterpretExpressionAsPattern(init)
left = init
right = self.parseAssignmentExpression()
init = None
forIn = False
else:
if self.match(','):
initSeq = [init]
while self.match(','):
self.nextToken()
initSeq.append(self.isolateCoverGrammar(self.parseAssignmentExpression))
init = self.finalize(self.startNode(initStartToken), Node.SequenceExpression(initSeq))
self.expect(';')
if left is None:
if not self.match(';'):
test = self.parseExpression()
self.expect(';')
if not self.match(')'):
update = self.parseExpression()
if not self.match(')') and self.config.tolerant:
self.tolerateUnexpectedToken(self.nextToken())
body = self.finalize(self.createNode(), Node.EmptyStatement())
else:
self.expect(')')
previousInIteration = self.context.inIteration
self.context.inIteration = True
body = self.isolateCoverGrammar(self.parseStatement)
self.context.inIteration = previousInIteration
if left is None:
return self.finalize(node, Node.ForStatement(init, test, update, body))
if forIn:
return self.finalize(node, Node.ForInStatement(left, right, body))
return self.finalize(node, Node.ForOfStatement(left, right, body))
# https://tc39.github.io/ecma262/#sec-continue-statement
def parseContinueStatement(self):
node = self.createNode()
self.expectKeyword('continue')
label = None
if self.lookahead.type is Token.Identifier and not self.hasLineTerminator:
id = self.parseVariableIdentifier()
label = id
key = '$' + id.name
if key not in self.context.labelSet:
self.throwError(Messages.UnknownLabel, id.name)
self.consumeSemicolon()
if label is None and not self.context.inIteration:
self.throwError(Messages.IllegalContinue)
return self.finalize(node, Node.ContinueStatement(label))
# https://tc39.github.io/ecma262/#sec-break-statement
def parseBreakStatement(self):
node = self.createNode()
self.expectKeyword('break')
label = None
if self.lookahead.type is Token.Identifier and not self.hasLineTerminator:
id = self.parseVariableIdentifier()
key = '$' + id.name
if key not in self.context.labelSet:
self.throwError(Messages.UnknownLabel, id.name)
label = id
self.consumeSemicolon()
if label is None and not self.context.inIteration and not self.context.inSwitch:
self.throwError(Messages.IllegalBreak)
return self.finalize(node, Node.BreakStatement(label))
# https://tc39.github.io/ecma262/#sec-return-statement
def parseReturnStatement(self):
if not self.context.inFunctionBody:
self.tolerateError(Messages.IllegalReturn)
node = self.createNode()
self.expectKeyword('return')
hasArgument = (
not self.match(';') and not self.match('}') and
not self.hasLineTerminator and self.lookahead.type is not Token.EOF
)
argument = self.parseExpression() if hasArgument else None
self.consumeSemicolon()
return self.finalize(node, Node.ReturnStatement(argument))
# https://tc39.github.io/ecma262/#sec-with-statement
def parseWithStatement(self):
if self.context.strict:
self.tolerateError(Messages.StrictModeWith)
node = self.createNode()
self.expectKeyword('with')
self.expect('(')
object = self.parseExpression()
if not self.match(')') and self.config.tolerant:
self.tolerateUnexpectedToken(self.nextToken())
body = self.finalize(self.createNode(), Node.EmptyStatement())
else:
self.expect(')')
body = self.parseStatement()
return self.finalize(node, Node.WithStatement(object, body))
# https://tc39.github.io/ecma262/#sec-switch-statement
def parseSwitchCase(self):
node = self.createNode()
if self.matchKeyword('default'):
self.nextToken()
test = None
else:
self.expectKeyword('case')
test = self.parseExpression()
self.expect(':')
consequent = []
while True:
if self.match('}') or self.matchKeyword('default', 'case'):
break
consequent.append(self.parseStatementListItem())
return self.finalize(node, Node.SwitchCase(test, consequent))
def parseSwitchStatement(self):
node = self.createNode()
self.expectKeyword('switch')
self.expect('(')
discriminant = self.parseExpression()
self.expect(')')
previousInSwitch = self.context.inSwitch
self.context.inSwitch = True
cases = []
defaultFound = False
self.expect('{')
while True:
if self.match('}'):
break
clause = self.parseSwitchCase()
if clause.test is None:
if defaultFound:
self.throwError(Messages.MultipleDefaultsInSwitch)
defaultFound = True
cases.append(clause)
self.expect('}')
self.context.inSwitch = previousInSwitch
return self.finalize(node, Node.SwitchStatement(discriminant, cases))
# https://tc39.github.io/ecma262/#sec-labelled-statements
def parseLabelledStatement(self):
node = self.createNode()
expr = self.parseExpression()
if expr.type is Syntax.Identifier and self.match(':'):
self.nextToken()
id = expr
key = '$' + id.name
if key in self.context.labelSet:
self.throwError(Messages.Redeclaration, 'Label', id.name)
self.context.labelSet[key] = True
if self.matchKeyword('class'):
self.tolerateUnexpectedToken(self.lookahead)
body = self.parseClassDeclaration()
elif self.matchKeyword('function'):
token = self.lookahead
declaration = self.parseFunctionDeclaration()
if self.context.strict:
self.tolerateUnexpectedToken(token, Messages.StrictFunction)
elif declaration.generator:
self.tolerateUnexpectedToken(token, Messages.GeneratorInLegacyContext)
body = declaration
else:
body = self.parseStatement()
del self.context.labelSet[key]
statement = Node.LabeledStatement(id, body)
else:
self.consumeSemicolon()
statement = Node.ExpressionStatement(expr)
return self.finalize(node, statement)
# https://tc39.github.io/ecma262/#sec-throw-statement
def parseThrowStatement(self):
node = self.createNode()
self.expectKeyword('throw')
if self.hasLineTerminator:
self.throwError(Messages.NewlineAfterThrow)
argument = self.parseExpression()
self.consumeSemicolon()
return self.finalize(node, Node.ThrowStatement(argument))
# https://tc39.github.io/ecma262/#sec-try-statement
def parseCatchClause(self):
node = self.createNode()
self.expectKeyword('catch')
self.expect('(')
if self.match(')'):
self.throwUnexpectedToken(self.lookahead)
params = []
param = self.parsePattern(params)
paramMap = {}
for p in params:
key = '$' + p.value
if key in paramMap:
self.tolerateError(Messages.DuplicateBinding, p.value)
paramMap[key] = True
if self.context.strict and param.type is Syntax.Identifier:
if self.scanner.isRestrictedWord(param.name):
self.tolerateError(Messages.StrictCatchVariable)
self.expect(')')
body = self.parseBlock()
return self.finalize(node, Node.CatchClause(param, body))
def parseFinallyClause(self):
self.expectKeyword('finally')
return self.parseBlock()
def parseTryStatement(self):
node = self.createNode()
self.expectKeyword('try')
block = self.parseBlock()
handler = self.parseCatchClause() if self.matchKeyword('catch') else None
finalizer = self.parseFinallyClause() if self.matchKeyword('finally') else None
if not handler and not finalizer:
self.throwError(Messages.NoCatchOrFinally)
return self.finalize(node, Node.TryStatement(block, handler, finalizer))
# https://tc39.github.io/ecma262/#sec-debugger-statement
def parseDebuggerStatement(self):
node = self.createNode()
self.expectKeyword('debugger')
self.consumeSemicolon()
return self.finalize(node, Node.DebuggerStatement())
# https://tc39.github.io/ecma262/#sec-ecmascript-language-statements-and-declarations
def parseStatement(self):
typ = self.lookahead.type
if typ in (
Token.BooleanLiteral,
Token.NullLiteral,
Token.NumericLiteral,
Token.StringLiteral,
Token.Template,
Token.RegularExpression,
):
statement = self.parseExpressionStatement()
elif typ is Token.Punctuator:
value = self.lookahead.value
if value == '{':
statement = self.parseBlock()
elif value == '(':
statement = self.parseExpressionStatement()
elif value == ';':
statement = self.parseEmptyStatement()
else:
statement = self.parseExpressionStatement()
elif typ is Token.Identifier:
statement = self.parseFunctionDeclaration() if self.matchAsyncFunction() else self.parseLabelledStatement()
elif typ is Token.Keyword:
value = self.lookahead.value
if value == 'break':
statement = self.parseBreakStatement()
elif value == 'continue':
statement = self.parseContinueStatement()
elif value == 'debugger':
statement = self.parseDebuggerStatement()
elif value == 'do':
statement = self.parseDoWhileStatement()
elif value == 'for':
statement = self.parseForStatement()
elif value == 'function':
statement = self.parseFunctionDeclaration()
elif value == 'if':
statement = self.parseIfStatement()
elif value == 'return':
statement = self.parseReturnStatement()
elif value == 'switch':
statement = self.parseSwitchStatement()
elif value == 'throw':
statement = self.parseThrowStatement()
elif value == 'try':
statement = self.parseTryStatement()
elif value == 'var':
statement = self.parseVariableStatement()
elif value == 'while':
statement = self.parseWhileStatement()
elif value == 'with':
statement = self.parseWithStatement()
else:
statement = self.parseExpressionStatement()
else:
statement = self.throwUnexpectedToken(self.lookahead)
return statement
# https://tc39.github.io/ecma262/#sec-function-definitions
def parseFunctionSourceElements(self):
node = self.createNode()
self.expect('{')
body = self.parseDirectivePrologues()
previousLabelSet = self.context.labelSet
previousInIteration = self.context.inIteration
previousInSwitch = self.context.inSwitch
previousInFunctionBody = self.context.inFunctionBody
self.context.labelSet = {}
self.context.inIteration = False
self.context.inSwitch = False
self.context.inFunctionBody = True
while self.lookahead.type is not Token.EOF:
if self.match('}'):
break
body.append(self.parseStatementListItem())
self.expect('}')
self.context.labelSet = previousLabelSet
self.context.inIteration = previousInIteration
self.context.inSwitch = previousInSwitch
self.context.inFunctionBody = previousInFunctionBody
return self.finalize(node, Node.BlockStatement(body))
def validateParam(self, options, param, name):
key = '$' + name
if self.context.strict:
if self.scanner.isRestrictedWord(name):
options.stricted = param
options.message = Messages.StrictParamName
if key in options.paramSet:
options.stricted = param
options.message = Messages.StrictParamDupe
elif not options.firstRestricted:
if self.scanner.isRestrictedWord(name):
options.firstRestricted = param
options.message = Messages.StrictParamName
elif self.scanner.isStrictModeReservedWord(name):
options.firstRestricted = param
options.message = Messages.StrictReservedWord
elif key in options.paramSet:
options.stricted = param
options.message = Messages.StrictParamDupe
options.paramSet[key] = True
def parseRestElement(self, params):
node = self.createNode()
self.expect('...')
arg = self.parsePattern(params)
if self.match('='):
self.throwError(Messages.DefaultRestParameter)
if not self.match(')'):
self.throwError(Messages.ParameterAfterRestParameter)
return self.finalize(node, Node.RestElement(arg))
def parseFormalParameter(self, options):
params = []
param = self.parseRestElement(params) if self.match('...') else self.parsePatternWithDefault(params)
for p in params:
self.validateParam(options, p, p.value)
options.simple = options.simple and isinstance(param, Node.Identifier)
options.params.append(param)
def parseFormalParameters(self, firstRestricted=None):
options = Params(
simple=True,
params=[],
firstRestricted=firstRestricted
)
self.expect('(')
if not self.match(')'):
options.paramSet = {}
while self.lookahead.type is not Token.EOF:
self.parseFormalParameter(options)
if self.match(')'):
break
self.expect(',')
if self.match(')'):
break
self.expect(')')
return Params(
simple=options.simple,
params=options.params,
stricted=options.stricted,
firstRestricted=options.firstRestricted,
message=options.message
)
def matchAsyncFunction(self):
match = self.matchContextualKeyword('async')
if match:
state = self.scanner.saveState()
self.scanner.scanComments()
next = self.scanner.lex()
self.scanner.restoreState(state)
match = (state.lineNumber == next.lineNumber) and (next.type is Token.Keyword) and (next.value == 'function')
return match
def parseFunctionDeclaration(self, identifierIsOptional=False):
node = self.createNode()
isAsync = self.matchContextualKeyword('async')
if isAsync:
self.nextToken()
self.expectKeyword('function')
isGenerator = False if isAsync else self.match('*')
if isGenerator:
self.nextToken()
id = None
firstRestricted = None
if not identifierIsOptional or not self.match('('):
token = self.lookahead
id = self.parseVariableIdentifier()
if self.context.strict:
if self.scanner.isRestrictedWord(token.value):
self.tolerateUnexpectedToken(token, Messages.StrictFunctionName)
else:
if self.scanner.isRestrictedWord(token.value):
firstRestricted = token
message = Messages.StrictFunctionName
elif self.scanner.isStrictModeReservedWord(token.value):
firstRestricted = token
message = Messages.StrictReservedWord
previousAllowAwait = self.context.await
previousAllowYield = self.context.allowYield
self.context.await = isAsync
self.context.allowYield = not isGenerator
formalParameters = self.parseFormalParameters(firstRestricted)
params = formalParameters.params
stricted = formalParameters.stricted
firstRestricted = formalParameters.firstRestricted
if formalParameters.message:
message = formalParameters.message
previousStrict = self.context.strict
previousAllowStrictDirective = self.context.allowStrictDirective
self.context.allowStrictDirective = formalParameters.simple
body = self.parseFunctionSourceElements()
if self.context.strict and firstRestricted:
self.throwUnexpectedToken(firstRestricted, message)
if self.context.strict and stricted:
self.tolerateUnexpectedToken(stricted, message)
self.context.strict = previousStrict
self.context.allowStrictDirective = previousAllowStrictDirective
self.context.await = previousAllowAwait
self.context.allowYield = previousAllowYield
if isAsync:
return self.finalize(node, Node.AsyncFunctionDeclaration(id, params, body))
return self.finalize(node, Node.FunctionDeclaration(id, params, body, isGenerator))
def parseFunctionExpression(self):
node = self.createNode()
isAsync = self.matchContextualKeyword('async')
if isAsync:
self.nextToken()
self.expectKeyword('function')
isGenerator = False if isAsync else self.match('*')
if isGenerator:
self.nextToken()
id = None
firstRestricted = None
previousAllowAwait = self.context.await
previousAllowYield = self.context.allowYield
self.context.await = isAsync
self.context.allowYield = not isGenerator
if not self.match('('):
token = self.lookahead
id = self.parseIdentifierName() if not self.context.strict and not isGenerator and self.matchKeyword('yield') else self.parseVariableIdentifier()
if self.context.strict:
if self.scanner.isRestrictedWord(token.value):
self.tolerateUnexpectedToken(token, Messages.StrictFunctionName)
else:
if self.scanner.isRestrictedWord(token.value):
firstRestricted = token
message = Messages.StrictFunctionName
elif self.scanner.isStrictModeReservedWord(token.value):
firstRestricted = token
message = Messages.StrictReservedWord
formalParameters = self.parseFormalParameters(firstRestricted)
params = formalParameters.params
stricted = formalParameters.stricted
firstRestricted = formalParameters.firstRestricted
if formalParameters.message:
message = formalParameters.message
previousStrict = self.context.strict
previousAllowStrictDirective = self.context.allowStrictDirective
self.context.allowStrictDirective = formalParameters.simple
body = self.parseFunctionSourceElements()
if self.context.strict and firstRestricted:
self.throwUnexpectedToken(firstRestricted, message)
if self.context.strict and stricted:
self.tolerateUnexpectedToken(stricted, message)
self.context.strict = previousStrict
self.context.allowStrictDirective = previousAllowStrictDirective
self.context.await = previousAllowAwait
self.context.allowYield = previousAllowYield
if isAsync:
return self.finalize(node, Node.AsyncFunctionExpression(id, params, body))
return self.finalize(node, Node.FunctionExpression(id, params, body, isGenerator))
# https://tc39.github.io/ecma262/#sec-directive-prologues-and-the-use-strict-directive
def parseDirective(self):
token = self.lookahead
node = self.createNode()
expr = self.parseExpression()
directive = self.getTokenRaw(token)[1:-1] if expr.type is Syntax.Literal else None
self.consumeSemicolon()
return self.finalize(node, Node.Directive(expr, directive) if directive else Node.ExpressionStatement(expr))
def parseDirectivePrologues(self):
firstRestricted = None
body = []
while True:
token = self.lookahead
if token.type is not Token.StringLiteral:
break
statement = self.parseDirective()
body.append(statement)
directive = statement.directive
if not isinstance(directive, basestring):
break
if directive == 'use strict':
self.context.strict = True
if firstRestricted:
self.tolerateUnexpectedToken(firstRestricted, Messages.StrictOctalLiteral)
if not self.context.allowStrictDirective:
self.tolerateUnexpectedToken(token, Messages.IllegalLanguageModeDirective)
else:
if not firstRestricted and token.octal:
firstRestricted = token
return body
# https://tc39.github.io/ecma262/#sec-method-definitions
def qualifiedPropertyName(self, token):
typ = token.type
if typ in (
Token.Identifier,
Token.StringLiteral,
Token.BooleanLiteral,
Token.NullLiteral,
Token.NumericLiteral,
Token.Keyword,
):
return True
elif typ is Token.Punctuator:
return token.value == '['
return False
def parseGetterMethod(self):
node = self.createNode()
isGenerator = False
previousAllowYield = self.context.allowYield
self.context.allowYield = not isGenerator
formalParameters = self.parseFormalParameters()
if len(formalParameters.params) > 0:
self.tolerateError(Messages.BadGetterArity)
method = self.parsePropertyMethod(formalParameters)
self.context.allowYield = previousAllowYield
return self.finalize(node, Node.FunctionExpression(None, formalParameters.params, method, isGenerator))
def parseSetterMethod(self):
node = self.createNode()
isGenerator = False
previousAllowYield = self.context.allowYield
self.context.allowYield = not isGenerator
formalParameters = self.parseFormalParameters()
if len(formalParameters.params) != 1:
self.tolerateError(Messages.BadSetterArity)
elif isinstance(formalParameters.params[0], Node.RestElement):
self.tolerateError(Messages.BadSetterRestParameter)
method = self.parsePropertyMethod(formalParameters)
self.context.allowYield = previousAllowYield
return self.finalize(node, Node.FunctionExpression(None, formalParameters.params, method, isGenerator))
def parseGeneratorMethod(self):
node = self.createNode()
isGenerator = True
previousAllowYield = self.context.allowYield
self.context.allowYield = True
params = self.parseFormalParameters()
self.context.allowYield = False
method = self.parsePropertyMethod(params)
self.context.allowYield = previousAllowYield
return self.finalize(node, Node.FunctionExpression(None, params.params, method, isGenerator))
# https://tc39.github.io/ecma262/#sec-generator-function-definitions
def isStartOfExpression(self):
start = True
value = self.lookahead.value
typ = self.lookahead.type
if typ is Token.Punctuator:
start = value in ('[', '(', '{', '+', '-', '!', '~', '++', '--', '/', '/=') # regular expression literal )
elif typ is Token.Keyword:
start = value in ('class', 'delete', 'function', 'let', 'new', 'super', 'this', 'typeof', 'void', 'yield')
return start
def parseYieldExpression(self):
node = self.createNode()
self.expectKeyword('yield')
argument = None
delegate = False
if not self.hasLineTerminator:
previousAllowYield = self.context.allowYield
self.context.allowYield = False
delegate = self.match('*')
if delegate:
self.nextToken()
argument = self.parseAssignmentExpression()
elif self.isStartOfExpression():
argument = self.parseAssignmentExpression()
self.context.allowYield = previousAllowYield
return self.finalize(node, Node.YieldExpression(argument, delegate))
# https://tc39.github.io/ecma262/#sec-class-definitions
def parseClassElement(self, hasConstructor):
token = self.lookahead
node = self.createNode()
kind = ''
key = None
value = None
computed = False
isStatic = False
isAsync = False
if self.match('*'):
self.nextToken()
else:
computed = self.match('[')
key = self.parseObjectPropertyKey()
id = key
if id.name == 'static' and (self.qualifiedPropertyName(self.lookahead) or self.match('*')):
token = self.lookahead
isStatic = True
computed = self.match('[')
if self.match('*'):
self.nextToken()
else:
key = self.parseObjectPropertyKey()
if token.type is Token.Identifier and not self.hasLineTerminator and token.value == 'async':
punctuator = self.lookahead.value
if punctuator != ':' and punctuator != '(' and punctuator != '*':
isAsync = True
token = self.lookahead
key = self.parseObjectPropertyKey()
if token.type is Token.Identifier:
if token.value == 'get' or token.value == 'set':
self.tolerateUnexpectedToken(token)
elif token.value == 'constructor':
self.tolerateUnexpectedToken(token, Messages.ConstructorIsAsync)
lookaheadPropertyKey = self.qualifiedPropertyName(self.lookahead)
if token.type is Token.Identifier:
if token.value == 'get' and lookaheadPropertyKey:
kind = 'get'
computed = self.match('[')
key = self.parseObjectPropertyKey()
self.context.allowYield = False
value = self.parseGetterMethod()
elif token.value == 'set' and lookaheadPropertyKey:
kind = 'set'
computed = self.match('[')
key = self.parseObjectPropertyKey()
value = self.parseSetterMethod()
elif self.config.classProperties and not self.match('('):
kind = 'init'
id = self.finalize(node, Node.Identifier(token.value))
if self.match('='):
self.nextToken()
value = self.parseAssignmentExpression()
elif token.type is Token.Punctuator and token.value == '*' and lookaheadPropertyKey:
kind = 'method'
computed = self.match('[')
key = self.parseObjectPropertyKey()
value = self.parseGeneratorMethod()
if not kind and key and self.match('('):
kind = 'method'
value = self.parsePropertyMethodAsyncFunction() if isAsync else self.parsePropertyMethodFunction()
if not kind:
self.throwUnexpectedToken(self.lookahead)
if not computed:
if isStatic and self.isPropertyKey(key, 'prototype'):
self.throwUnexpectedToken(token, Messages.StaticPrototype)
if not isStatic and self.isPropertyKey(key, 'constructor'):
if kind != 'method' or (value and value.generator):
self.throwUnexpectedToken(token, Messages.ConstructorSpecialMethod)
if hasConstructor.value:
self.throwUnexpectedToken(token, Messages.DuplicateConstructor)
else:
hasConstructor.value = True
kind = 'constructor'
if kind in ('constructor', 'method', 'get', 'set'):
return self.finalize(node, Node.MethodDefinition(key, computed, value, kind, isStatic))
else:
return self.finalize(node, Node.FieldDefinition(key, computed, value, kind, isStatic))
def parseClassElementList(self):
body = []
hasConstructor = Value(False)
self.expect('{')
while not self.match('}'):
if self.match(';'):
self.nextToken()
else:
body.append(self.parseClassElement(hasConstructor))
self.expect('}')
return body
def parseClassBody(self):
node = self.createNode()
elementList = self.parseClassElementList()
return self.finalize(node, Node.ClassBody(elementList))
def parseClassDeclaration(self, identifierIsOptional=False):
node = self.createNode()
previousStrict = self.context.strict
self.context.strict = True
self.expectKeyword('class')
id = None if identifierIsOptional and self.lookahead.type is not Token.Identifier else self.parseVariableIdentifier()
superClass = None
if self.matchKeyword('extends'):
self.nextToken()
superClass = self.isolateCoverGrammar(self.parseLeftHandSideExpressionAllowCall)
classBody = self.parseClassBody()
self.context.strict = previousStrict
return self.finalize(node, Node.ClassDeclaration(id, superClass, classBody))
def parseClassExpression(self):
node = self.createNode()
previousStrict = self.context.strict
self.context.strict = True
self.expectKeyword('class')
id = self.parseVariableIdentifier() if self.lookahead.type is Token.Identifier else None
superClass = None
if self.matchKeyword('extends'):
self.nextToken()
superClass = self.isolateCoverGrammar(self.parseLeftHandSideExpressionAllowCall)
classBody = self.parseClassBody()
self.context.strict = previousStrict
return self.finalize(node, Node.ClassExpression(id, superClass, classBody))
# https://tc39.github.io/ecma262/#sec-scripts
# https://tc39.github.io/ecma262/#sec-modules
def parseModule(self):
self.context.strict = True
self.context.isModule = True
node = self.createNode()
body = self.parseDirectivePrologues()
while self.lookahead.type is not Token.EOF:
body.append(self.parseStatementListItem())
return self.finalize(node, Node.Module(body))
def parseScript(self):
node = self.createNode()
body = self.parseDirectivePrologues()
while self.lookahead.type is not Token.EOF:
body.append(self.parseStatementListItem())
return self.finalize(node, Node.Script(body))
# https://tc39.github.io/ecma262/#sec-imports
def parseModuleSpecifier(self):
node = self.createNode()
if self.lookahead.type is not Token.StringLiteral:
self.throwError(Messages.InvalidModuleSpecifier)
token = self.nextToken()
raw = self.getTokenRaw(token)
return self.finalize(node, Node.Literal(token.value, raw))
# import {<foo as bar>} ...
def parseImportSpecifier(self):
node = self.createNode()
if self.lookahead.type is Token.Identifier:
imported = self.parseVariableIdentifier()
local = imported
if self.matchContextualKeyword('as'):
self.nextToken()
local = self.parseVariableIdentifier()
else:
imported = self.parseIdentifierName()
local = imported
if self.matchContextualKeyword('as'):
self.nextToken()
local = self.parseVariableIdentifier()
else:
self.throwUnexpectedToken(self.nextToken())
return self.finalize(node, Node.ImportSpecifier(local, imported))
# {foo, bar as bas
def parseNamedImports(self):
self.expect('{')
specifiers = []
while not self.match('}'):
specifiers.append(self.parseImportSpecifier())
if not self.match('}'):
self.expect(',')
self.expect('}')
return specifiers
# import <foo> ...
def parseImportDefaultSpecifier(self):
node = self.createNode()
local = self.parseIdentifierName()
return self.finalize(node, Node.ImportDefaultSpecifier(local))
# import <* as foo> ...
def parseImportNamespaceSpecifier(self):
node = self.createNode()
self.expect('*')
if not self.matchContextualKeyword('as'):
self.throwError(Messages.NoAsAfterImportNamespace)
self.nextToken()
local = self.parseIdentifierName()
return self.finalize(node, Node.ImportNamespaceSpecifier(local))
def parseImportDeclaration(self):
if self.context.inFunctionBody:
self.throwError(Messages.IllegalImportDeclaration)
node = self.createNode()
self.expectKeyword('import')
specifiers = []
if self.lookahead.type is Token.StringLiteral:
# import 'foo'
src = self.parseModuleSpecifier()
else:
if self.match('{'):
# import {bar
specifiers.extend(self.parseNamedImports())
elif self.match('*'):
# import * as foo
specifiers.append(self.parseImportNamespaceSpecifier())
elif self.isIdentifierName(self.lookahead) and not self.matchKeyword('default'):
# import foo
specifiers.append(self.parseImportDefaultSpecifier())
if self.match(','):
self.nextToken()
if self.match('*'):
# import foo, * as foo
specifiers.append(self.parseImportNamespaceSpecifier())
elif self.match('{'):
# import foo, {bar
specifiers.extend(self.parseNamedImports())
else:
self.throwUnexpectedToken(self.lookahead)
else:
self.throwUnexpectedToken(self.nextToken())
if not self.matchContextualKeyword('from'):
message = Messages.UnexpectedToken if self.lookahead.value else Messages.MissingFromClause
self.throwError(message, self.lookahead.value)
self.nextToken()
src = self.parseModuleSpecifier()
self.consumeSemicolon()
return self.finalize(node, Node.ImportDeclaration(specifiers, src))
# https://tc39.github.io/ecma262/#sec-exports
def parseExportSpecifier(self):
node = self.createNode()
local = self.parseIdentifierName()
exported = local
if self.matchContextualKeyword('as'):
self.nextToken()
exported = self.parseIdentifierName()
return self.finalize(node, Node.ExportSpecifier(local, exported))
def parseExportDefaultSpecifier(self):
node = self.createNode()
local = self.parseIdentifierName()
return self.finalize(node, Node.ExportDefaultSpecifier(local))
def parseExportDeclaration(self):
if self.context.inFunctionBody:
self.throwError(Messages.IllegalExportDeclaration)
node = self.createNode()
self.expectKeyword('export')
if self.matchKeyword('default'):
# export default ...
self.nextToken()
if self.matchKeyword('function'):
# export default function foo (:
# export default function (:
declaration = self.parseFunctionDeclaration(True)
exportDeclaration = self.finalize(node, Node.ExportDefaultDeclaration(declaration))
elif self.matchKeyword('class'):
# export default class foo {
declaration = self.parseClassDeclaration(True)
exportDeclaration = self.finalize(node, Node.ExportDefaultDeclaration(declaration))
elif self.matchContextualKeyword('async'):
# export default async function f (:
# export default async function (:
# export default async x => x
declaration = self.parseFunctionDeclaration(True) if self.matchAsyncFunction() else self.parseAssignmentExpression()
exportDeclaration = self.finalize(node, Node.ExportDefaultDeclaration(declaration))
else:
if self.matchContextualKeyword('from'):
self.throwError(Messages.UnexpectedToken, self.lookahead.value)
# export default {}
# export default []
# export default (1 + 2)
if self.match('{'):
declaration = self.parseObjectInitializer()
elif self.match('['):
declaration = self.parseArrayInitializer()
else:
declaration = self.parseAssignmentExpression()
self.consumeSemicolon()
exportDeclaration = self.finalize(node, Node.ExportDefaultDeclaration(declaration))
elif self.match('*'):
# export * from 'foo'
self.nextToken()
if not self.matchContextualKeyword('from'):
message = Messages.UnexpectedToken if self.lookahead.value else Messages.MissingFromClause
self.throwError(message, self.lookahead.value)
self.nextToken()
src = self.parseModuleSpecifier()
self.consumeSemicolon()
exportDeclaration = self.finalize(node, Node.ExportAllDeclaration(src))
elif self.lookahead.type is Token.Keyword:
# export var f = 1
value = self.lookahead.value
if value in (
'let',
'const',
):
declaration = self.parseLexicalDeclaration(Params(inFor=False))
elif value in (
'var',
'class',
'function',
):
declaration = self.parseStatementListItem()
else:
self.throwUnexpectedToken(self.lookahead)
exportDeclaration = self.finalize(node, Node.ExportNamedDeclaration(declaration, [], None))
elif self.matchAsyncFunction():
declaration = self.parseFunctionDeclaration()
exportDeclaration = self.finalize(node, Node.ExportNamedDeclaration(declaration, [], None))
else:
specifiers = []
source = None
isExportFromIdentifier = False
expectSpecifiers = True
if self.lookahead.type is Token.Identifier:
specifiers.append(self.parseExportDefaultSpecifier())
if self.match(','):
self.nextToken()
else:
expectSpecifiers = False
if expectSpecifiers:
self.expect('{')
while not self.match('}'):
isExportFromIdentifier = isExportFromIdentifier or self.matchKeyword('default')
specifiers.append(self.parseExportSpecifier())
if not self.match('}'):
self.expect(',')
self.expect('}')
if self.matchContextualKeyword('from'):
# export {default} from 'foo'
# export {foo} from 'foo'
self.nextToken()
source = self.parseModuleSpecifier()
self.consumeSemicolon()
elif isExportFromIdentifier:
# export {default}; # missing fromClause
message = Messages.UnexpectedToken if self.lookahead.value else Messages.MissingFromClause
self.throwError(message, self.lookahead.value)
else:
# export {foo}
self.consumeSemicolon()
exportDeclaration = self.finalize(node, Node.ExportNamedDeclaration(None, specifiers, source))
return exportDeclaration
|
mit
|
TeamEOS/external_chromium_org
|
components/test/data/autofill/merge/tools/reserialize_profiles_from_query.py
|
162
|
1177
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from autofill_merge_common import SerializeProfiles, ColumnNameToFieldType
def main():
"""Serializes the output of the query 'SELECT * from autofill_profiles;'.
"""
COLUMNS = ['GUID', 'LABEL', 'FIRST_NAME', 'MIDDLE_NAME', 'LAST_NAME', 'EMAIL',
'COMPANY_NAME', 'ADDRESS_LINE_1', 'ADDRESS_LINE_2', 'CITY',
'STATE', 'ZIPCODE', 'COUNTRY', 'PHONE', 'DATE_MODIFIED']
if len(sys.argv) != 2:
print ("Usage: python reserialize_profiles_from_query.py "
"<path/to/serialized_profiles>")
return
types = [ColumnNameToFieldType(column_name) for column_name in COLUMNS]
profiles = []
with open(sys.argv[1], 'r') as serialized_profiles:
for line in serialized_profiles:
# trim the newline if present
if line[-1] == '\n':
line = line[:-1]
values = line.split("|")
profiles.append(zip(types, values))
print SerializeProfiles(profiles)
return 0
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
bratatidas9/Impala-1
|
thirdparty/thrift-0.9.0/tutorial/php/runserver.py
|
117
|
1121
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import BaseHTTPServer
import CGIHTTPServer
# chdir(2) into the tutorial directory.
os.chdir(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
class Handler(CGIHTTPServer.CGIHTTPRequestHandler):
cgi_directories = ['/php']
BaseHTTPServer.HTTPServer(('', 8080), Handler).serve_forever()
|
apache-2.0
|
lechat/CouchPotato
|
library/imdb/parser/http/searchCompanyParser.py
|
67
|
2911
|
"""
parser.http.searchCompanyParser module (imdb package).
This module provides the HTMLSearchCompanyParser class (and the
search_company_parser instance), used to parse the results of a search
for a given company.
E.g., when searching for the name "Columbia Pictures", the parsed page would be:
http://akas.imdb.com/find?s=co;mx=20;q=Columbia+Pictures
Copyright 2008-2009 Davide Alberani <[email protected]>
2008 H. Turgut Uyar <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from imdb.utils import analyze_company_name, build_company_name
from utils import Extractor, Attribute, analyze_imdbid
from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
class DOMBasicCompanyParser(DOMBasicMovieParser):
"""Simply get the name of a company and the imdbID.
It's used by the DOMHTMLSearchCompanyParser class to return a result
for a direct match (when a search on IMDb results in a single
company, the web server sends directly the company page.
"""
_titleFunct = lambda self, x: analyze_company_name(x or u'')
class DOMHTMLSearchCompanyParser(DOMHTMLSearchMovieParser):
_BaseParser = DOMBasicCompanyParser
_notDirectHitTitle = '<title>imdb company'
_titleBuilder = lambda self, x: build_company_name(x)
_linkPrefix = '/company/co'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'name': "./a[1]/text()",
'notes': "./text()[1]"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link')),
analyze_company_name(x.get('name')+(x.get('notes')
or u''), stripNotes=True)
))]
extractors = [Extractor(label='search',
path="//td[3]/a[starts-with(@href, " \
"'/company/co')]/..",
attrs=_attrs)]
_OBJECTS = {
'search_company_parser': ((DOMHTMLSearchCompanyParser,),
{'kind': 'company', '_basic_parser': DOMBasicCompanyParser})
}
|
gpl-3.0
|
tensorflow/graphics
|
tensorflow_graphics/math/interpolation/__init__.py
|
1
|
1116
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interpolation module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_graphics.math.interpolation import bspline
from tensorflow_graphics.math.interpolation import slerp
from tensorflow_graphics.math.interpolation import trilinear
from tensorflow_graphics.math.interpolation import weighted
from tensorflow_graphics.util import export_api as _export_api
# API contains submodules of tensorflow_graphics.math.
__all__ = _export_api.get_modules()
|
apache-2.0
|
ghchinoy/tensorflow
|
tensorflow/contrib/rpc/python/ops/rpc_op.py
|
41
|
1103
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=wildcard-import,unused-import
"""RPC communication."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.rpc.python.ops.gen_rpc_op import rpc
from tensorflow.contrib.rpc.python.ops.gen_rpc_op import try_rpc
from tensorflow.python.framework import ops
ops.NotDifferentiable("Rpc")
ops.NotDifferentiable("TryRpc")
|
apache-2.0
|
hoehnp/navit_test
|
lib/python2.7/site-packages/pip/req/req_uninstall.py
|
510
|
6897
|
from __future__ import absolute_import
import logging
import os
import tempfile
from pip.compat import uses_pycache, WINDOWS, cache_from_source
from pip.exceptions import UninstallationError
from pip.utils import rmtree, ask, is_local, renames, normalize_path
from pip.utils.logging import indent_log
logger = logging.getLogger(__name__)
class UninstallPathSet(object):
"""A set of file paths to be removed in the uninstallation of a
requirement."""
def __init__(self, dist):
self.paths = set()
self._refuse = set()
self.pth = {}
self.dist = dist
self.save_dir = None
self._moved_paths = []
def _permitted(self, path):
"""
Return True if the given path is one we are permitted to
remove/modify, False otherwise.
"""
return is_local(path)
def add(self, path):
head, tail = os.path.split(path)
# we normalize the head to resolve parent directory symlinks, but not
# the tail, since we only want to uninstall symlinks, not their targets
path = os.path.join(normalize_path(head), os.path.normcase(tail))
if not os.path.exists(path):
return
if self._permitted(path):
self.paths.add(path)
else:
self._refuse.add(path)
# __pycache__ files can show up after 'installed-files.txt' is created,
# due to imports
if os.path.splitext(path)[1] == '.py' and uses_pycache:
self.add(cache_from_source(path))
def add_pth(self, pth_file, entry):
pth_file = normalize_path(pth_file)
if self._permitted(pth_file):
if pth_file not in self.pth:
self.pth[pth_file] = UninstallPthEntries(pth_file)
self.pth[pth_file].add(entry)
else:
self._refuse.add(pth_file)
def compact(self, paths):
"""Compact a path set to contain the minimal number of paths
necessary to contain all paths in the set. If /a/path/ and
/a/path/to/a/file.txt are both in the set, leave only the
shorter path."""
short_paths = set()
for path in sorted(paths, key=len):
if not any([
(path.startswith(shortpath) and
path[len(shortpath.rstrip(os.path.sep))] == os.path.sep)
for shortpath in short_paths]):
short_paths.add(path)
return short_paths
def _stash(self, path):
return os.path.join(
self.save_dir, os.path.splitdrive(path)[1].lstrip(os.path.sep))
def remove(self, auto_confirm=False):
"""Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True)."""
if not self.paths:
logger.info(
"Can't uninstall '%s'. No files were found to uninstall.",
self.dist.project_name,
)
return
logger.info(
'Uninstalling %s-%s:',
self.dist.project_name, self.dist.version
)
with indent_log():
paths = sorted(self.compact(self.paths))
if auto_confirm:
response = 'y'
else:
for path in paths:
logger.info(path)
response = ask('Proceed (y/n)? ', ('y', 'n'))
if self._refuse:
logger.info('Not removing or modifying (outside of prefix):')
for path in self.compact(self._refuse):
logger.info(path)
if response == 'y':
self.save_dir = tempfile.mkdtemp(suffix='-uninstall',
prefix='pip-')
for path in paths:
new_path = self._stash(path)
logger.debug('Removing file or directory %s', path)
self._moved_paths.append(path)
renames(path, new_path)
for pth in self.pth.values():
pth.remove()
logger.info(
'Successfully uninstalled %s-%s',
self.dist.project_name, self.dist.version
)
def rollback(self):
"""Rollback the changes previously made by remove()."""
if self.save_dir is None:
logger.error(
"Can't roll back %s; was not uninstalled",
self.dist.project_name,
)
return False
logger.info('Rolling back uninstall of %s', self.dist.project_name)
for path in self._moved_paths:
tmp_path = self._stash(path)
logger.debug('Replacing %s', path)
renames(tmp_path, path)
for pth in self.pth.values():
pth.rollback()
def commit(self):
"""Remove temporary save dir: rollback will no longer be possible."""
if self.save_dir is not None:
rmtree(self.save_dir)
self.save_dir = None
self._moved_paths = []
class UninstallPthEntries(object):
def __init__(self, pth_file):
if not os.path.isfile(pth_file):
raise UninstallationError(
"Cannot remove entries from nonexistent file %s" % pth_file
)
self.file = pth_file
self.entries = set()
self._saved_lines = None
def add(self, entry):
entry = os.path.normcase(entry)
# On Windows, os.path.normcase converts the entry to use
# backslashes. This is correct for entries that describe absolute
# paths outside of site-packages, but all the others use forward
# slashes.
if WINDOWS and not os.path.splitdrive(entry)[0]:
entry = entry.replace('\\', '/')
self.entries.add(entry)
def remove(self):
logger.debug('Removing pth entries from %s:', self.file)
with open(self.file, 'rb') as fh:
# windows uses '\r\n' with py3k, but uses '\n' with py2.x
lines = fh.readlines()
self._saved_lines = lines
if any(b'\r\n' in line for line in lines):
endline = '\r\n'
else:
endline = '\n'
for entry in self.entries:
try:
logger.debug('Removing entry: %s', entry)
lines.remove((entry + endline).encode("utf-8"))
except ValueError:
pass
with open(self.file, 'wb') as fh:
fh.writelines(lines)
def rollback(self):
if self._saved_lines is None:
logger.error(
'Cannot roll back changes to %s, none were made', self.file
)
return False
logger.debug('Rolling %s back to previous state', self.file)
with open(self.file, 'wb') as fh:
fh.writelines(self._saved_lines)
return True
|
gpl-2.0
|
hellfyre/StratumsphereStatusBot
|
jsonserver.py
|
1
|
3019
|
# -*- coding: utf-8 -*-
__author__ = 'Matthias Uschok <[email protected]>'
import json
import BaseHTTPServer
import threading
from urlparse import parse_qs, urlparse
import status
callbacks = dict()
class JsonHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
print("path:", self.path)
if self.path == '/status.json':
data = {
'api' : '0.13',
'space' : 'Stratum 0',
'logo' : 'https:\/\/stratum0.org\/mediawiki\/images\/thumb\/c\/c6\/Sanduhr-twitter-avatar-black.svg\/240px-Sanduhr-twitter-avatar-black.svg.png',
'url': 'https:\/\/stratum0.org',
'location' : {
'address': 'Hamburger Strasse 273a, 38114 Braunschweig, Germany',
'lon' : 10.5211247,
'lat' : 52.2785658
},
'state' : {
'open' : status.space['open'],
'lastchange' : status.space['last_change'],
'trigger_person' : status.space['by'],
'icon' : {
'open' : 'http:\/\/status.stratum0.org\/open_square.png',
'closed' : 'http:\/\/status.stratum0.org\/closed_square.png'
},
'ext_since' : status.space['since']
},
'contact' : {
'phone' : '+4953128769245',
'twitter' : '@stratum0',
'ml' : '[email protected]',
'issue-mail' : 'cm9oaWViK3NwYWNlYXBpLWlzc3Vlc0Byb2hpZWIubmFtZQ==',
'irc' : 'irc:\/\/irc.freenode.net\/#stratum0'
},
'issue_report_channels' : [
'issue-mail'
]
}
data_string = json.dumps(data)
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(data_string)
self.wfile.write('\n')
elif self.path.startswith('/update?'):
queryurl = urlparse(self.path)
params = parse_qs(queryurl.query)
if len(params) > 0:
by = ''
if 'by' in params:
by = params['by'][0]
status.update(params['open'][0]=='true', by)
callbacks['send_status']()
self.send_response(200)
else:
self.send_response(400)
else:
self.send_response(404)
class JsonServer(threading.Thread):
def __init__(self, address):
super(JsonServer, self).__init__()
self.address = address
self.stop_requested = False
def run(self):
self.httpd = BaseHTTPServer.HTTPServer(self.address, JsonHandler)
while not self.stop_requested:
self.httpd.handle_request()
def stop(self):
self.stop_requested = True
|
apache-2.0
|
helifu/kudu
|
src/kudu/scripts/max_skew_estimate.py
|
4
|
3087
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# The purpose of this script is to estimate the distribution of the maximum
# skew produced by Kudu's "power of two choices" placement algorithm,
# which is used to place replicas on tablet servers (at least in Kudu <= 1.7).
import math
import random
import sys
# Replicates Random::ReservoirSample from kudu/util/random.h.
def reservoir_sample(n, sample_size, avoid):
result = list()
k = 0
for i in xrange(n):
if i in avoid:
continue
k += 1
if len(result) < sample_size:
result.append(i)
continue
j = random.randrange(k)
if j < sample_size:
result[j] = i
return result
# Follows CatalogManager::SelectReplica, which implements the power of two
# choices selection algorithm, except we assume we always have a placement.
def select_replica(num_servers, avoid, counts):
two_choices = reservoir_sample(num_servers, 2, avoid)
assert(len(two_choices) > 0)
assert(len(two_choices) <= 2)
if len(two_choices) == 1:
return two_choices[0]
else:
a, b = two_choices[0], two_choices[1]
if counts[a] < counts[b]:
return a
else:
return b
# Quickly cribbed from https://stackoverflow.com/a/15589202.
# 'data' must be sorted.
def percentile(data, percentile):
size = len(data)
return data[int(math.ceil((size * percentile) / 100)) - 1]
def generate_max_skew(num_servers, num_tablets, rf):
counts = {i : 0 for i in xrange(num_servers)}
for t in xrange(num_tablets):
avoid = set()
for r in range(rf):
replica = select_replica(num_servers, avoid, counts)
avoid.add(replica)
counts[replica] += 1
return max(counts.values()) - min(counts.values())
def main():
args = sys.argv
if len(args) != 5:
print "max_skew_estimate.py <num trials> <num servers> <num_tablets> <repl factor>"
sys.exit(1)
num_trials, num_servers, num_tablets, rf = int(args[1]), int(args[2]), int(args[3]), int(args[4])
skews = [generate_max_skew(num_servers, num_tablets, rf) for _ in xrange(num_trials)]
skews.sort()
for p in [5, 25, 50, 75, 99]:
print "%02d percentile: %d" % (p, percentile(skews, p))
if __name__ == "__main__":
main()
|
apache-2.0
|
JeremyAgost/gemrb
|
gemrb/GUIScripts/bg1/GUICG12.py
|
3
|
6688
|
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#character generation, portrait (GUICG12)
import GemRB
from GUIDefines import *
from ie_stats import *
import CharGenCommon
import GUICommon
AppearanceWindow = 0
CustomWindow = 0
PortraitButton = 0
PortraitsTable = 0
LastPortrait = 0
Gender = 0
def SetPicture ():
global PortraitsTable, LastPortrait
PortraitName = PortraitsTable.GetRowName (LastPortrait)+"G"
PortraitButton.SetPicture (PortraitName)
return
def OnLoad():
global AppearanceWindow, PortraitButton, PortraitsTable, LastPortrait
global Gender
if GUICommon.CloseOtherWindow (OnLoad):
if(AppearanceWindow):
AppearanceWindow.Unload()
return
GemRB.LoadWindowPack("GUICG", 640, 480)
AppearanceWindow = GemRB.LoadWindow (11)
#Load the Gender
MyChar = GemRB.GetVar ("Slot")
Gender = GemRB.GetPlayerStat (MyChar, IE_SEX)
#Load the Portraits Table
PortraitsTable = GemRB.LoadTable ("PICTURES")
PortraitsStart = PortraitsTable.FindValue (0, 2)
FemaleCount = PortraitsTable.GetRowCount () - PortraitsStart + 1
if Gender == 2:
LastPortrait = GemRB.Roll (1, FemaleCount, PortraitsStart-1)
else:
LastPortrait = GemRB.Roll (1, PortraitsTable.GetRowCount()-FemaleCount, 0)
PortraitButton = AppearanceWindow.GetControl (1)
PortraitButton.SetFlags (IE_GUI_BUTTON_PICTURE|IE_GUI_BUTTON_NO_IMAGE,OP_SET)
PortraitButton.SetState (IE_GUI_BUTTON_LOCKED)
LeftButton = AppearanceWindow.GetControl (2)
RightButton = AppearanceWindow.GetControl (3)
BackButton = AppearanceWindow.GetControl (5)
BackButton.SetText (15416)
CustomButton = AppearanceWindow.GetControl (6)
CustomButton.SetText (17545)
DoneButton = AppearanceWindow.GetControl (0)
DoneButton.SetText (11973)
DoneButton.SetFlags (IE_GUI_BUTTON_DEFAULT,OP_OR)
RightButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, RightPress)
LeftButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, LeftPress)
BackButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, CharGenCommon.BackPress)
CustomButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, CustomPress)
DoneButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, NextPress)
flag = False
while True:
if PortraitsTable.GetValue (LastPortrait, 0) == Gender:
SetPicture ()
break
LastPortrait = LastPortrait + 1
if LastPortrait >= PortraitsTable.GetRowCount ():
LastPortrait = 0
if flag:
SetPicture ()
break
flag = True
AppearanceWindow.ShowModal(MODAL_SHADOW_NONE)
return
def RightPress():
global LastPortrait
while True:
LastPortrait = LastPortrait + 1
if LastPortrait >= PortraitsTable.GetRowCount ():
LastPortrait = 0
if PortraitsTable.GetValue (LastPortrait, 0) == Gender:
SetPicture ()
return
def LeftPress():
global LastPortrait
while True:
LastPortrait = LastPortrait - 1
if LastPortrait < 0:
LastPortrait = PortraitsTable.GetRowCount ()-1
if PortraitsTable.GetValue (LastPortrait, 0) == Gender:
SetPicture ()
return
def CustomDone():
global AppearanceWindow
Window = CustomWindow
Portrait = PortraitList1.QueryText ()
GemRB.SetToken ("LargePortrait", Portrait)
Portrait = PortraitList2.QueryText ()
GemRB.SetToken ("SmallPortrait", Portrait)
if Window:
Window.Unload ()
CharGenCommon.next()
return
def CustomAbort():
global CustomWindow
if CustomWindow:
CustomWindow.Unload ()
return
def LargeCustomPortrait():
Window = CustomWindow
Portrait = PortraitList1.QueryText ()
#small hack
if GemRB.GetVar ("Row1") == RowCount1:
return
Label = Window.GetControl (0x10000007)
Label.SetText (Portrait)
Button = Window.GetControl (6)
if Portrait=="":
Portrait = "NOPORTLG"
Button.SetState (IE_GUI_BUTTON_DISABLED)
else:
if PortraitList2.QueryText ()!="":
Button.SetState (IE_GUI_BUTTON_ENABLED)
Button = Window.GetControl (0)
Button.SetPicture (Portrait, "NOPORTLG")
return
def SmallCustomPortrait():
Window = CustomWindow
Portrait = PortraitList2.QueryText ()
#small hack
if GemRB.GetVar ("Row2") == RowCount2:
return
Label = Window.GetControl (0x10000008)
Label.SetText (Portrait)
Button = Window.GetControl (6)
if Portrait=="":
Portrait = "NOPORTSM"
Button.SetState (IE_GUI_BUTTON_DISABLED)
else:
if PortraitList1.QueryText ()!="":
Button.SetState (IE_GUI_BUTTON_ENABLED)
Button = Window.GetControl (1)
Button.SetPicture (Portrait, "NOPORTSM")
return
def CustomPress():
global PortraitList1, PortraitList2
global RowCount1, RowCount2
global CustomWindow
CustomWindow = Window = GemRB.LoadWindow (18)
PortraitList1 = Window.GetControl (2)
RowCount1 = PortraitList1.GetPortraits (0)
PortraitList1.SetEvent (IE_GUI_TEXTAREA_ON_CHANGE, LargeCustomPortrait)
GemRB.SetVar ("Row1", RowCount1)
PortraitList1.SetVarAssoc ("Row1",RowCount1)
PortraitList2 = Window.GetControl (4)
RowCount2 = PortraitList2.GetPortraits (1)
PortraitList2.SetEvent (IE_GUI_TEXTAREA_ON_CHANGE, SmallCustomPortrait)
GemRB.SetVar ("Row2", RowCount2)
PortraitList2.SetVarAssoc ("Row2",RowCount2)
Button = Window.GetControl (6)
Button.SetText (11973)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, CustomDone)
Button.SetState (IE_GUI_BUTTON_DISABLED)
Button = Window.GetControl (7)
Button.SetText (15416)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, CustomAbort)
Button = Window.GetControl (0)
PortraitName = PortraitsTable.GetRowName (LastPortrait)+"L"
Button.SetPicture (PortraitName, "NOPORTLG")
Button.SetState (IE_GUI_BUTTON_LOCKED)
Button = Window.GetControl (1)
PortraitName = PortraitsTable.GetRowName (LastPortrait)+"S"
Button.SetPicture (PortraitName, "NOPORTSM")
Button.SetState (IE_GUI_BUTTON_LOCKED)
Window.ShowModal (MODAL_SHADOW_GRAY)
return
def NextPress():
PortraitTable = GemRB.LoadTable ("pictures")
PortraitName = PortraitTable.GetRowName (LastPortrait )
GemRB.SetToken ("SmallPortrait", PortraitName+"S")
GemRB.SetToken ("LargePortrait", PortraitName+"L")
CharGenCommon.next()
#GemRB.SetVar("Step",1)
#GemRB.SetNextScript("CharGen")
#GemRB.SetNextScript ("CharGen2") #Before race
#return
|
gpl-2.0
|
ashaarunkumar/spark-tk
|
python/sparktk/frame/ops/multiclass_classification_metrics.py
|
14
|
5704
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from classification_metrics_value import ClassificationMetricsValue
def multiclass_classification_metrics(self, label_column, pred_column, beta=1.0, frequency_column=None):
"""
Statistics of accuracy, precision, and others for a multi-class classification model.
Parameters:
:param label_column: (str) The name of the column containing the correct label for each instance.
:param pred_column: (str) The name of the column containing the predicted label for each instance.
:param beta: (Optional[int]) This is the beta value to use for :math:`F_{ \beta}` measure (default F1 measure is computed);
must be greater than zero. Defaults is 1.
:param frequency_column: (Optional[str]) The name of an optional column containing the frequency of observations.
:return: (ClassificationMetricsValue) The data returned is composed of multiple components:<br>
<object>.accuracy : double<br>
<object>.confusion_matrix : table<br>
<object>.f_measure : double<br>
<object>.precision : double<br>
<object>.recall : double
Calculate the accuracy, precision, confusion_matrix, recall and :math:`F_{ \beta}` measure for a
classification model.
* The **f_measure** result is the :math:`F_{ \beta}` measure for a
classification model.
The :math:`F_{ \beta}` measure of a binary classification model is the
harmonic mean of precision and recall.
If we let:
* beta :math:`\equiv \beta`,
* :math:`T_{P}` denotes the number of true positives,
* :math:`F_{P}` denotes the number of false positives, and
* :math:`F_{N}` denotes the number of false negatives
then:
.. math::
F_{ \beta} = (1 + \beta ^ 2) * \frac{ \frac{T_{P}}{T_{P} + F_{P}} * \
\frac{T_{P}}{T_{P} + F_{N}}}{ \beta ^ 2 * \frac{T_{P}}{T_{P} + \
F_{P}} + \frac{T_{P}}{T_{P} + F_{N}}}
The :math:`F_{ \beta}` measure for a multi-class classification model is
computed as the weighted average of the :math:`F_{ \beta}` measure for
each label, where the weight is the number of instances of each label.
The determination of binary vs. multi-class is automatically inferred
from the data.
* For multi-class classification models, the **recall** measure is computed as
the weighted average of the recall for each label, where the weight is
the number of instances of each label.
The determination of binary vs. multi-class is automatically inferred
from the data.
* For multi-class classification models, the **precision** measure is computed
as the weighted average of the precision for each label, where the
weight is the number of instances of each label.
The determination of binary vs. multi-class is automatically inferred
from the data.
* The **accuracy** of a classification model is the proportion of
predictions that are correctly identified.
If we let :math:`T_{P}` denote the number of true positives,
:math:`T_{N}` denote the number of true negatives, and :math:`K` denote
the total number of classified instances, then the model accuracy is
given by: :math:`\frac{T_{P} + T_{N}}{K}`.
* The **confusion_matrix** result is a confusion matrix for a
classifier model, formatted for human readability.
Examples
--------
Consider Frame *my_frame*, which contains the data
<hide>
>>> s = [('a', str),('b', int),('labels', int),('predictions', int)]
>>> rows = [["red", 1, 0, 0], ["blue", 3, 1, 0],["green", 1, 0, 0],["green", 0, 1, 1],["red", 0, 5, 4]]
>>> my_frame = tc.frame.create(rows, s)
-etc-
</hide>
>>> my_frame.inspect()
[#] a b labels predictions
==================================
[0] red 1 0 0
[1] blue 3 1 0
[2] green 1 0 0
[3] green 0 1 1
[4] red 0 5 4
>>> cm = my_frame.multiclass_classification_metrics('labels', 'predictions')
<progress>
>>> cm.f_measure
0.5866666666666667
>>> cm.recall
0.6
>>> cm.accuracy
0.6
>>> cm.precision
0.6666666666666666
>>> cm.confusion_matrix
Predicted_0 Predicted_1 Predicted_4
Actual_0 2 0 0
Actual_1 1 1 0
Actual_5 0 0 1
"""
return ClassificationMetricsValue(self._tc, self._scala.multiClassClassificationMetrics(label_column,
pred_column,
float(beta),
self._tc.jutils.convert.to_scala_option(frequency_column)))
|
apache-2.0
|
ajoaoff/django
|
django/contrib/sessions/base_session.py
|
348
|
1623
|
"""
This module allows importing AbstractBaseSession even
when django.contrib.sessions is not in INSTALLED_APPS.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
class BaseSessionManager(models.Manager):
def encode(self, session_dict):
"""
Return the given session dictionary serialized and encoded as a string.
"""
session_store_class = self.model.get_session_store_class()
return session_store_class().encode(session_dict)
def save(self, session_key, session_dict, expire_date):
s = self.model(session_key, self.encode(session_dict), expire_date)
if session_dict:
s.save()
else:
s.delete() # Clear sessions with no data.
return s
@python_2_unicode_compatible
class AbstractBaseSession(models.Model):
session_key = models.CharField(_('session key'), max_length=40, primary_key=True)
session_data = models.TextField(_('session data'))
expire_date = models.DateTimeField(_('expire date'), db_index=True)
objects = BaseSessionManager()
class Meta:
abstract = True
verbose_name = _('session')
verbose_name_plural = _('sessions')
def __str__(self):
return self.session_key
@classmethod
def get_session_store_class(cls):
raise NotImplementedError
def get_decoded(self):
session_store_class = self.get_session_store_class()
return session_store_class().decode(self.session_data)
|
bsd-3-clause
|
xtreamerdev/linux-mips
|
tools/perf/scripts/python/check-perf-trace.py
|
948
|
2501
|
# perf trace event handlers, generated by perf trace -g python
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
gpl-2.0
|
gnmiller/craig-bot
|
craig-bot/lib/python3.6/site-packages/uritemplate/api.py
|
36
|
1911
|
"""
uritemplate.api
===============
This module contains the very simple API provided by uritemplate.
"""
from uritemplate.template import URITemplate
def expand(uri, var_dict=None, **kwargs):
"""Expand the template with the given parameters.
:param str uri: The templated URI to expand
:param dict var_dict: Optional dictionary with variables and values
:param kwargs: Alternative way to pass arguments
:returns: str
Example::
expand('https://api.github.com{/end}', {'end': 'users'})
expand('https://api.github.com{/end}', end='gists')
.. note:: Passing values by both parts, may override values in
``var_dict``. For example::
expand('https://{var}', {'var': 'val1'}, var='val2')
``val2`` will be used instead of ``val1``.
"""
return URITemplate(uri).expand(var_dict, **kwargs)
def partial(uri, var_dict=None, **kwargs):
"""Partially expand the template with the given parameters.
If all of the parameters for the template are not given, return a
partially expanded template.
:param dict var_dict: Optional dictionary with variables and values
:param kwargs: Alternative way to pass arguments
:returns: :class:`URITemplate`
Example::
t = URITemplate('https://api.github.com{/end}')
t.partial() # => URITemplate('https://api.github.com{/end}')
"""
return URITemplate(uri).partial(var_dict, **kwargs)
def variables(uri):
"""Parse the variables of the template.
This returns all of the variable names in the URI Template.
:returns: Set of variable names
:rtype: set
Example::
variables('https://api.github.com{/end})
# => {'end'}
variables('https://api.github.com/repos{/username}{/repository}')
# => {'username', 'repository'}
"""
return set(URITemplate(uri).variable_names)
|
mit
|
audiohacked/pyBusPirate
|
src/buspirate/rawwire.py
|
1
|
8032
|
# Created by Sean Nelson on 2018-08-19.
# Copyright 2018 Sean Nelson <[email protected]>
#
# This file is part of pyBusPirate.
#
# pyBusPirate is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# pyBusPirate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyBusPirate. If not, see <https://www.gnu.org/licenses/>.
""" RawWire class """
from enum import IntEnum
from buspirate.base import BusPirate
class RawWireSpeed(IntEnum):
""" Enum for RawWire Speeds """
SPEED_5KHZ = 0b000
SPEED_50KHZ = 0b001
SPEED_100KHZ = 0b010
SPEED_400KHZ = 0b011
class RawWireConfiguration(object):
""" RawWire Configuration Enum Base """
# def __getattr__(self, key):
# """
# Return an attribute of the class (fallback)
#
# :returns: returns contents of class
# :rtype: str.
# """
# pass
#
# def __getattribute__(self, key):
# """
# Return an attribute of the class
#
# :returns: returns contents of class
# :rtype: str.
# """
# pass
#
# def __str__(self) -> str:
# """
# Return string of the class
#
# :returns: returns contents of class
# :rtype: str.
# """
# return str(self.__dict__)
#
# def __eq__(self, other: object = None) -> bool:
# """
# Compare SPI Configurations
#
# :returns: returns a boolean
# :rtype: bool.
# """
# return self == other
class PinOutput(IntEnum):
""" Enum for Pin Output """
HIZ = 0b0000
V3P3 = 0b1000
PIN_HIZ = 0b0000
PIN_3P3V = 0b1000
class WireProtocol(IntEnum):
""" Enum for Wire Protocol """
PROTOCOL_2WIRE = 0b0000
PROTOCOL_3WIRE = 0b0100
class BitOrder(IntEnum):
""" Enum for Bit Order """
MSB = 0b0000
LSB = 0b0010
class NotUsed(IntEnum):
""" Enum for Position Z in RawWireConfiguration """
pass
class RawWire(BusPirate):
""" RawWire BitBanging on the BusPirate """
@property
def exit(self):
"""
Exit RawWire Mode
:returns: returns Success or Failure
"""
self.write(0x00)
return self.read(5) == "BBIO1"
@property
def enter(self):
"""
Enter RawWire Mode
:returns: returns Success or Failure
"""
self.write(0x05)
return self.read(4) == "RAW1"
@property
def start_bit(self):
"""
Start Bit
:returns: returns Success or Failure
"""
self.write(0x02)
return self.read(1) == 0x01
@property
def stop_bit(self):
"""
Stop Bit
:returns: returns Success or Failure
"""
self.write(0x03)
return self.read(1) == 0x01
@property
def cs_low(self):
"""
Toggle Chip Select Low
:returns: returns Success or Failure
"""
self.write(0x04)
return self.read(1) == 0x01
@property
def cs_high(self):
"""
Toggle Chip Select High
:returns: returns Success or Failure
"""
self.write(0x05)
return self.read(1) == 0x01
def read_byte(self):
"""
Read Byte from Bus
:returns: returns Success or Failure
"""
self.write(0x06)
return self.read(1)
def read_bit(self):
"""
Read Bit From Bus
:returns: returns Success or Failure
"""
self.write(0x07)
return self.read(1)
def peek(self):
"""
Peek at Bus without toggling CS or something
:returns: returns Success or Failure
"""
self.write(0x08)
return self.read(1)
def clock_tick(self):
"""
Jiggle Clock
:returns: returns Success or Failure
"""
self.write(0x09)
return self.read(1) == 0x01
@property
def clock_low(self):
"""
Toggle Clock Low
:returns: returns Success or Failure
"""
self.write(0x0A)
return self.read(1) == 0x01
@property
def clock_high(self):
"""
Toggle Clock High
:returns: returns Success or Failure
"""
self.write(0x0B)
return self.read(1) == 0x01
@property
def data_low(self):
"""
Toggle Data line Low
:returns: returns Success or Failure
"""
self.write(0x0C)
return self.read(1) == 0x01
@property
def data_high(self):
"""
Toggle Data line High
:returns: returns Success or Failure
"""
self.write(0x0D)
return self.read(1) == 0x01
def bulk_clock_ticks(self, count: int = 16):
"""
Send Bulk Clock ticks
:returns: returns Success or Failure
"""
if count == 0 or count > 16:
raise ValueError
self.write(0x20|count-1)
return self.read(1) == 0x01
def bulk_bits(self, count: int = 8, data_byte: int = 0x00):
"""
Send Bulk bits of a byte
:returns: returns Success or Failure
"""
if count == 0 or count > 8:
raise ValueError
self.write(0x30|count-1)
self.write(data_byte)
return self.read(1) == 0x01
def pullup_voltage_select(self) -> None:
"""
Select Pull-Up Voltage
Unimplmented!
"""
raise NotImplementedError
@property
def speed(self):
""" Speed Property Getter """
return self._speed
@speed.setter
def speed(self, value):
""" Speed Property Setter """
self._speed = value
self.rawwire_speed(value)
def rawwire_speed(self, rawwire_speed: int = RawWireSpeed.SPEED_400KHZ) -> bool:
"""
Raw Wire Speed Configuration
:param rawwire_speed: The Clock Rate
:type rawwire_speed: int.
:returns: returns Success or Failure
:rtype: bool
"""
self.write(0x60|rawwire_speed)
return self.read(1) == 0x01
@property
def config(self):
""" Configuration Property Getter """
return self._config
@config.setter
def config(self, value):
""" Configuration Property Setter """
self._config = value
p_output = value & 0b1000
protocol = value & 0b0100
bitorder = value & 0b0010
return self.rawwire_config(p_output, protocol, bitorder)
def rawwire_config(self,
pin_output: int = RawWireConfiguration.PinOutput.HIZ,
wire_protocol: int = RawWireConfiguration.WireProtocol.PROTOCOL_2WIRE,
bit_order: int = RawWireConfiguration.BitOrder.MSB) -> bool:
"""
Raw Wire Configuration
:param pin_output: The Pin Configuration for Pin Output
:type pin_output: int.
:param wire_protocol: The Raw Wire Configuration for Protocol
:type wire_protocol: int.
:param bit_order: The Raw Wire Configuration for First Bit Order
:type bit_order: int.
:returns: returns Success or Failure
:rtype: bool.
"""
rawwire_configuration = 0
rawwire_configuration += pin_output
rawwire_configuration += wire_protocol
rawwire_configuration += bit_order
self.write(0x80|rawwire_configuration)
return self.read(1) == 0x01
if __name__ == '__main__':
pass
|
gpl-2.0
|
GuillaumeGomez/servo
|
tests/wpt/css-tests/tools/pytest/_pytest/skipping.py
|
168
|
12742
|
""" support for skip/xfail functions and markers. """
import os
import sys
import traceback
import py
import pytest
from _pytest.mark import MarkInfo, MarkDecorator
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--runxfail',
action="store_true", dest="runxfail", default=False,
help="run tests even if they are marked xfail")
parser.addini("xfail_strict", "default for the strict parameter of xfail "
"markers when not given explicitly (default: "
"False)",
default=False,
type="bool")
def pytest_configure(config):
if config.option.runxfail:
old = pytest.xfail
config._cleanup.append(lambda: setattr(pytest, "xfail", old))
def nop(*args, **kwargs):
pass
nop.Exception = XFailed
setattr(pytest, "xfail", nop)
config.addinivalue_line("markers",
"skipif(condition): skip the given test function if eval(condition) "
"results in a True value. Evaluation happens within the "
"module global context. Example: skipif('sys.platform == \"win32\"') "
"skips the test if we are on the win32 platform. see "
"http://pytest.org/latest/skipping.html"
)
config.addinivalue_line("markers",
"xfail(condition, reason=None, run=True, raises=None): mark the the test function "
"as an expected failure if eval(condition) has a True value. "
"Optionally specify a reason for better reporting and run=False if "
"you don't even want to execute the test function. If only specific "
"exception(s) are expected, you can list them in raises, and if the test fails "
"in other ways, it will be reported as a true failure. "
"See http://pytest.org/latest/skipping.html"
)
def pytest_namespace():
return dict(xfail=xfail)
class XFailed(pytest.fail.Exception):
""" raised from an explicit call to pytest.xfail() """
def xfail(reason=""):
""" xfail an executing test or setup functions with the given reason."""
__tracebackhide__ = True
raise XFailed(reason)
xfail.Exception = XFailed
class MarkEvaluator:
def __init__(self, item, name):
self.item = item
self.name = name
@property
def holder(self):
return self.item.keywords.get(self.name)
def __bool__(self):
return bool(self.holder)
__nonzero__ = __bool__
def wasvalid(self):
return not hasattr(self, 'exc')
def invalidraise(self, exc):
raises = self.get('raises')
if not raises:
return
return not isinstance(exc, raises)
def istrue(self):
try:
return self._istrue()
except Exception:
self.exc = sys.exc_info()
if isinstance(self.exc[1], SyntaxError):
msg = [" " * (self.exc[1].offset + 4) + "^",]
msg.append("SyntaxError: invalid syntax")
else:
msg = traceback.format_exception_only(*self.exc[:2])
pytest.fail("Error evaluating %r expression\n"
" %s\n"
"%s"
%(self.name, self.expr, "\n".join(msg)),
pytrace=False)
def _getglobals(self):
d = {'os': os, 'sys': sys, 'config': self.item.config}
func = self.item.obj
try:
d.update(func.__globals__)
except AttributeError:
d.update(func.func_globals)
return d
def _istrue(self):
if hasattr(self, 'result'):
return self.result
if self.holder:
d = self._getglobals()
if self.holder.args:
self.result = False
# "holder" might be a MarkInfo or a MarkDecorator; only
# MarkInfo keeps track of all parameters it received in an
# _arglist attribute
if hasattr(self.holder, '_arglist'):
arglist = self.holder._arglist
else:
arglist = [(self.holder.args, self.holder.kwargs)]
for args, kwargs in arglist:
for expr in args:
self.expr = expr
if isinstance(expr, py.builtin._basestring):
result = cached_eval(self.item.config, expr, d)
else:
if "reason" not in kwargs:
# XXX better be checked at collection time
msg = "you need to specify reason=STRING " \
"when using booleans as conditions."
pytest.fail(msg)
result = bool(expr)
if result:
self.result = True
self.reason = kwargs.get('reason', None)
self.expr = expr
return self.result
else:
self.result = True
return getattr(self, 'result', False)
def get(self, attr, default=None):
return self.holder.kwargs.get(attr, default)
def getexplanation(self):
expl = getattr(self, 'reason', None) or self.get('reason', None)
if not expl:
if not hasattr(self, 'expr'):
return ""
else:
return "condition: " + str(self.expr)
return expl
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
# Check if skip or skipif are specified as pytest marks
skipif_info = item.keywords.get('skipif')
if isinstance(skipif_info, (MarkInfo, MarkDecorator)):
eval_skipif = MarkEvaluator(item, 'skipif')
if eval_skipif.istrue():
item._evalskip = eval_skipif
pytest.skip(eval_skipif.getexplanation())
skip_info = item.keywords.get('skip')
if isinstance(skip_info, (MarkInfo, MarkDecorator)):
item._evalskip = True
if 'reason' in skip_info.kwargs:
pytest.skip(skip_info.kwargs['reason'])
elif skip_info.args:
pytest.skip(skip_info.args[0])
else:
pytest.skip("unconditional skip")
item._evalxfail = MarkEvaluator(item, 'xfail')
check_xfail_no_run(item)
@pytest.mark.hookwrapper
def pytest_pyfunc_call(pyfuncitem):
check_xfail_no_run(pyfuncitem)
outcome = yield
passed = outcome.excinfo is None
if passed:
check_strict_xfail(pyfuncitem)
def check_xfail_no_run(item):
"""check xfail(run=False)"""
if not item.config.option.runxfail:
evalxfail = item._evalxfail
if evalxfail.istrue():
if not evalxfail.get('run', True):
pytest.xfail("[NOTRUN] " + evalxfail.getexplanation())
def check_strict_xfail(pyfuncitem):
"""check xfail(strict=True) for the given PASSING test"""
evalxfail = pyfuncitem._evalxfail
if evalxfail.istrue():
strict_default = pyfuncitem.config.getini('xfail_strict')
is_strict_xfail = evalxfail.get('strict', strict_default)
if is_strict_xfail:
del pyfuncitem._evalxfail
explanation = evalxfail.getexplanation()
pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False)
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
evalxfail = getattr(item, '_evalxfail', None)
evalskip = getattr(item, '_evalskip', None)
# unitttest special case, see setting of _unexpectedsuccess
if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
# we need to translate into how pytest encodes xpass
rep.wasxfail = "reason: " + repr(item._unexpectedsuccess)
rep.outcome = "failed"
elif item.config.option.runxfail:
pass # don't interefere
elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception):
rep.wasxfail = "reason: " + call.excinfo.value.msg
rep.outcome = "skipped"
elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \
evalxfail.istrue():
if call.excinfo:
if evalxfail.invalidraise(call.excinfo.value):
rep.outcome = "failed"
else:
rep.outcome = "skipped"
rep.wasxfail = evalxfail.getexplanation()
elif call.when == "call":
rep.outcome = "failed" # xpass outcome
rep.wasxfail = evalxfail.getexplanation()
elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple:
# skipped by mark.skipif; change the location of the failure
# to point to the item definition, otherwise it will display
# the location of where the skip exception was raised within pytest
filename, line, reason = rep.longrepr
filename, line = item.location[:2]
rep.longrepr = filename, line, reason
# called by terminalreporter progress reporting
def pytest_report_teststatus(report):
if hasattr(report, "wasxfail"):
if report.skipped:
return "xfailed", "x", "xfail"
elif report.failed:
return "xpassed", "X", ("XPASS", {'yellow': True})
# called by the terminalreporter instance/plugin
def pytest_terminal_summary(terminalreporter):
tr = terminalreporter
if not tr.reportchars:
#for name in "xfailed skipped failed xpassed":
# if not tr.stats.get(name, 0):
# tr.write_line("HINT: use '-r' option to see extra "
# "summary info about tests")
# break
return
lines = []
for char in tr.reportchars:
if char == "x":
show_xfailed(terminalreporter, lines)
elif char == "X":
show_xpassed(terminalreporter, lines)
elif char in "fF":
show_simple(terminalreporter, lines, 'failed', "FAIL %s")
elif char in "sS":
show_skipped(terminalreporter, lines)
elif char == "E":
show_simple(terminalreporter, lines, 'error', "ERROR %s")
elif char == 'p':
show_simple(terminalreporter, lines, 'passed', "PASSED %s")
if lines:
tr._tw.sep("=", "short test summary info")
for line in lines:
tr._tw.line(line)
def show_simple(terminalreporter, lines, stat, format):
failed = terminalreporter.stats.get(stat)
if failed:
for rep in failed:
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
lines.append(format %(pos,))
def show_xfailed(terminalreporter, lines):
xfailed = terminalreporter.stats.get("xfailed")
if xfailed:
for rep in xfailed:
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
reason = rep.wasxfail
lines.append("XFAIL %s" % (pos,))
if reason:
lines.append(" " + str(reason))
def show_xpassed(terminalreporter, lines):
xpassed = terminalreporter.stats.get("xpassed")
if xpassed:
for rep in xpassed:
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
reason = rep.wasxfail
lines.append("XPASS %s %s" %(pos, reason))
def cached_eval(config, expr, d):
if not hasattr(config, '_evalcache'):
config._evalcache = {}
try:
return config._evalcache[expr]
except KeyError:
import _pytest._code
exprcode = _pytest._code.compile(expr, mode="eval")
config._evalcache[expr] = x = eval(exprcode, d)
return x
def folded_skips(skipped):
d = {}
for event in skipped:
key = event.longrepr
assert len(key) == 3, (event, key)
d.setdefault(key, []).append(event)
l = []
for key, events in d.items():
l.append((len(events),) + key)
return l
def show_skipped(terminalreporter, lines):
tr = terminalreporter
skipped = tr.stats.get('skipped', [])
if skipped:
#if not tr.hasopt('skipped'):
# tr.write_line(
# "%d skipped tests, specify -rs for more info" %
# len(skipped))
# return
fskips = folded_skips(skipped)
if fskips:
#tr.write_sep("_", "skipped test summary")
for num, fspath, lineno, reason in fskips:
if reason.startswith("Skipped: "):
reason = reason[9:]
lines.append("SKIP [%d] %s:%d: %s" %
(num, fspath, lineno, reason))
|
mpl-2.0
|
petrjasek/superdesk-core
|
superdesk/data_updates/00008_20180404-165521_archive.py
|
2
|
1636
|
# -*- coding: utf-8; -*-
# This file is part of Superdesk.
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
#
# Author : Jérôme
# Creation: 2018-04-04 16:55
from superdesk.commands.data_updates import BaseDataUpdate
class DataUpdate(BaseDataUpdate):
resource = "archive" # will use multiple resources, keeping this here so validation passes
def forwards(self, mongodb_collection, mongodb_database):
for resource in ["archive", "archive_autosave", "published"]:
collection = mongodb_database[resource]
for item in collection.find({"editor_state": {"$exists": True}}):
state = item["editor_state"]
fields_meta = {"body_html": {"draftjsState": state}}
print(
collection.update(
{"_id": item["_id"]}, {"$set": {"fields_meta": fields_meta}, "$unset": {"editor_state": ""}}
)
)
def backwards(self, mongodb_collection, mongodb_database):
for resource in ["archive", "archive_autosave", "published"]:
collection = mongodb_database[resource]
for item in collection.find({"fields_meta": {"$exists": True}}):
state = item["fields_meta"]["body_html"]["draftjsState"]
print(
collection.update(
{"_id": item["_id"]}, {"$set": {"editor_state": state}, "$unset": {"fields_meta": ""}}
)
)
|
agpl-3.0
|
vuntz/glance
|
glance/db/sqlalchemy/migrate_repo/versions/010_default_update_at.py
|
19
|
2424
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import * # noqa
from sqlalchemy import * # noqa
from glance.db.sqlalchemy.migrate_repo.schema import from_migration_import
def get_images_table(meta):
"""
No changes to the images table from 008...
"""
(get_images_table,) = from_migration_import(
'008_add_image_members_table', ['get_images_table'])
images = get_images_table(meta)
return images
def get_image_properties_table(meta):
"""
No changes to the image properties table from 008...
"""
(get_image_properties_table,) = from_migration_import(
'008_add_image_members_table', ['get_image_properties_table'])
image_properties = get_image_properties_table(meta)
return image_properties
def get_image_members_table(meta):
"""
No changes to the image members table from 008...
"""
(get_image_members_table,) = from_migration_import(
'008_add_image_members_table', ['get_image_members_table'])
images = get_image_members_table(meta)
return images
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images_table = get_images_table(meta)
# set updated_at to created_at if equal to None
conn = migrate_engine.connect()
conn.execute(
images_table.update(
images_table.c.updated_at == None,
{images_table.c.updated_at: images_table.c.created_at}))
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images_table = get_images_table(meta)
# set updated_at to None if equal to created_at
conn = migrate_engine.connect()
conn.execute(
images_table.update(
images_table.c.updated_at == images_table.c.created_at,
{images_table.c.updated_at: None}))
|
apache-2.0
|
xieta/mincoin
|
qa/rpc-tests/test_framework/netutil.py
|
98
|
5030
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Linux network utilities
import sys
import socket
import fcntl
import struct
import array
import os
from binascii import unhexlify, hexlify
# Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
STATE_ESTABLISHED = '01'
STATE_SYN_SENT = '02'
STATE_SYN_RECV = '03'
STATE_FIN_WAIT1 = '04'
STATE_FIN_WAIT2 = '05'
STATE_TIME_WAIT = '06'
STATE_CLOSE = '07'
STATE_CLOSE_WAIT = '08'
STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = unhexlify(host)
host_out = ''
for x in range(0, len(host) // 4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r',encoding='utf8') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: http://code.activestate.com/recipes/439093/
def all_interfaces():
'''
Return all interfaces that are up
'''
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', b'\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split(b'\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return hexlify(bytearray(addr)).decode('ascii')
def test_ipv6_local():
'''
Check for (local) IPv6 support.
'''
import socket
# By using SOCK_DGRAM this will not actually make a connection, but it will
# fail if there is no route to IPv6 localhost.
have_ipv6 = True
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(('::1', 0))
except socket.error:
have_ipv6 = False
return have_ipv6
|
mit
|
syhost/android_kernel_zte_nx503a
|
scripts/rt-tester/rt-tester.py
|
11005
|
5307
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
gpl-2.0
|
zpao/buck
|
third-party/py/pathlib/test_pathlib.py
|
57
|
77535
|
import collections
import io
import os
import errno
import pathlib
import pickle
import shutil
import socket
import stat
import sys
import tempfile
import unittest
from contextlib import contextmanager
if sys.version_info < (2, 7):
try:
import unittest2 as unittest
except ImportError:
raise ImportError("unittest2 is required for tests on pre-2.7")
try:
from test import support
except ImportError:
from test import test_support as support
TESTFN = support.TESTFN
try:
import grp, pwd
except ImportError:
grp = pwd = None
# Backported from 3.4
def fs_is_case_insensitive(directory):
"""Detects if the file system for the specified directory is case-insensitive."""
base_fp, base_path = tempfile.mkstemp(dir=directory)
case_path = base_path.upper()
if case_path == base_path:
case_path = base_path.lower()
try:
return os.path.samefile(base_path, case_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
return False
finally:
os.unlink(base_path)
support.fs_is_case_insensitive = fs_is_case_insensitive
class _BaseFlavourTest(object):
def _check_parse_parts(self, arg, expected):
f = self.flavour.parse_parts
sep = self.flavour.sep
altsep = self.flavour.altsep
actual = f([x.replace('/', sep) for x in arg])
self.assertEqual(actual, expected)
if altsep:
actual = f([x.replace('/', altsep) for x in arg])
self.assertEqual(actual, expected)
drv, root, parts = actual
# neither bytes (py3) nor unicode (py2)
self.assertIsInstance(drv, str)
self.assertIsInstance(root, str)
for p in parts:
self.assertIsInstance(p, str)
def test_parse_parts_common(self):
check = self._check_parse_parts
sep = self.flavour.sep
# Unanchored parts
check([], ('', '', []))
check(['a'], ('', '', ['a']))
check(['a/'], ('', '', ['a']))
check(['a', 'b'], ('', '', ['a', 'b']))
# Expansion
check(['a/b'], ('', '', ['a', 'b']))
check(['a/b/'], ('', '', ['a', 'b']))
check(['a', 'b/c', 'd'], ('', '', ['a', 'b', 'c', 'd']))
# Collapsing and stripping excess slashes
check(['a', 'b//c', 'd'], ('', '', ['a', 'b', 'c', 'd']))
check(['a', 'b/c/', 'd'], ('', '', ['a', 'b', 'c', 'd']))
# Eliminating standalone dots
check(['.'], ('', '', []))
check(['.', '.', 'b'], ('', '', ['b']))
check(['a', '.', 'b'], ('', '', ['a', 'b']))
check(['a', '.', '.'], ('', '', ['a']))
# The first part is anchored
check(['/a/b'], ('', sep, [sep, 'a', 'b']))
check(['/a', 'b'], ('', sep, [sep, 'a', 'b']))
check(['/a/', 'b'], ('', sep, [sep, 'a', 'b']))
# Ignoring parts before an anchored part
check(['a', '/b', 'c'], ('', sep, [sep, 'b', 'c']))
check(['a', '/b', '/c'], ('', sep, [sep, 'c']))
class PosixFlavourTest(_BaseFlavourTest, unittest.TestCase):
flavour = pathlib._posix_flavour
def test_parse_parts(self):
check = self._check_parse_parts
# Collapsing of excess leading slashes, except for the double-slash
# special case.
check(['//a', 'b'], ('', '//', ['//', 'a', 'b']))
check(['///a', 'b'], ('', '/', ['/', 'a', 'b']))
check(['////a', 'b'], ('', '/', ['/', 'a', 'b']))
# Paths which look like NT paths aren't treated specially
check(['c:a'], ('', '', ['c:a']))
check(['c:\\a'], ('', '', ['c:\\a']))
check(['\\a'], ('', '', ['\\a']))
def test_splitroot(self):
f = self.flavour.splitroot
self.assertEqual(f(''), ('', '', ''))
self.assertEqual(f('a'), ('', '', 'a'))
self.assertEqual(f('a/b'), ('', '', 'a/b'))
self.assertEqual(f('a/b/'), ('', '', 'a/b/'))
self.assertEqual(f('/a'), ('', '/', 'a'))
self.assertEqual(f('/a/b'), ('', '/', 'a/b'))
self.assertEqual(f('/a/b/'), ('', '/', 'a/b/'))
# The root is collapsed when there are redundant slashes
# except when there are exactly two leading slashes, which
# is a special case in POSIX.
self.assertEqual(f('//a'), ('', '//', 'a'))
self.assertEqual(f('///a'), ('', '/', 'a'))
self.assertEqual(f('///a/b'), ('', '/', 'a/b'))
# Paths which look like NT paths aren't treated specially
self.assertEqual(f('c:/a/b'), ('', '', 'c:/a/b'))
self.assertEqual(f('\\/a/b'), ('', '', '\\/a/b'))
self.assertEqual(f('\\a\\b'), ('', '', '\\a\\b'))
class NTFlavourTest(_BaseFlavourTest, unittest.TestCase):
flavour = pathlib._windows_flavour
def test_parse_parts(self):
check = self._check_parse_parts
# First part is anchored
check(['c:'], ('c:', '', ['c:']))
check(['c:\\'], ('c:', '\\', ['c:\\']))
check(['\\'], ('', '\\', ['\\']))
check(['c:a'], ('c:', '', ['c:', 'a']))
check(['c:\\a'], ('c:', '\\', ['c:\\', 'a']))
check(['\\a'], ('', '\\', ['\\', 'a']))
# UNC paths
check(['\\\\a\\b'], ('\\\\a\\b', '\\', ['\\\\a\\b\\']))
check(['\\\\a\\b\\'], ('\\\\a\\b', '\\', ['\\\\a\\b\\']))
check(['\\\\a\\b\\c'], ('\\\\a\\b', '\\', ['\\\\a\\b\\', 'c']))
# Second part is anchored, so that the first part is ignored
check(['a', 'Z:b', 'c'], ('Z:', '', ['Z:', 'b', 'c']))
check(['a', 'Z:\\b', 'c'], ('Z:', '\\', ['Z:\\', 'b', 'c']))
check(['a', '\\b', 'c'], ('', '\\', ['\\', 'b', 'c']))
# UNC paths
check(['a', '\\\\b\\c', 'd'], ('\\\\b\\c', '\\', ['\\\\b\\c\\', 'd']))
# Collapsing and stripping excess slashes
check(['a', 'Z:\\\\b\\\\c\\', 'd\\'], ('Z:', '\\', ['Z:\\', 'b', 'c', 'd']))
# UNC paths
check(['a', '\\\\b\\c\\\\', 'd'], ('\\\\b\\c', '\\', ['\\\\b\\c\\', 'd']))
# Extended paths
check(['\\\\?\\c:\\'], ('\\\\?\\c:', '\\', ['\\\\?\\c:\\']))
check(['\\\\?\\c:\\a'], ('\\\\?\\c:', '\\', ['\\\\?\\c:\\', 'a']))
# Extended UNC paths (format is "\\?\UNC\server\share")
check(['\\\\?\\UNC\\b\\c'], ('\\\\?\\UNC\\b\\c', '\\', ['\\\\?\\UNC\\b\\c\\']))
check(['\\\\?\\UNC\\b\\c\\d'], ('\\\\?\\UNC\\b\\c', '\\', ['\\\\?\\UNC\\b\\c\\', 'd']))
def test_splitroot(self):
f = self.flavour.splitroot
self.assertEqual(f(''), ('', '', ''))
self.assertEqual(f('a'), ('', '', 'a'))
self.assertEqual(f('a\\b'), ('', '', 'a\\b'))
self.assertEqual(f('\\a'), ('', '\\', 'a'))
self.assertEqual(f('\\a\\b'), ('', '\\', 'a\\b'))
self.assertEqual(f('c:a\\b'), ('c:', '', 'a\\b'))
self.assertEqual(f('c:\\a\\b'), ('c:', '\\', 'a\\b'))
# Redundant slashes in the root are collapsed
self.assertEqual(f('\\\\a'), ('', '\\', 'a'))
self.assertEqual(f('\\\\\\a/b'), ('', '\\', 'a/b'))
self.assertEqual(f('c:\\\\a'), ('c:', '\\', 'a'))
self.assertEqual(f('c:\\\\\\a/b'), ('c:', '\\', 'a/b'))
# Valid UNC paths
self.assertEqual(f('\\\\a\\b'), ('\\\\a\\b', '\\', ''))
self.assertEqual(f('\\\\a\\b\\'), ('\\\\a\\b', '\\', ''))
self.assertEqual(f('\\\\a\\b\\c\\d'), ('\\\\a\\b', '\\', 'c\\d'))
# These are non-UNC paths (according to ntpath.py and test_ntpath)
# However, command.com says such paths are invalid, so it's
# difficult to know what the right semantics are
self.assertEqual(f('\\\\\\a\\b'), ('', '\\', 'a\\b'))
self.assertEqual(f('\\\\a'), ('', '\\', 'a'))
#
# Tests for the pure classes
#
with_fsencode = unittest.skipIf(sys.version_info < (3, 2),
'os.fsencode has been introduced in version 3.2')
class _BasePurePathTest(object):
# keys are canonical paths, values are list of tuples of arguments
# supposed to produce equal paths
equivalences = {
'a/b': [
('a', 'b'), ('a/', 'b'), ('a', 'b/'), ('a/', 'b/'),
('a/b/',), ('a//b',), ('a//b//',),
# empty components get removed
('', 'a', 'b'), ('a', '', 'b'), ('a', 'b', ''),
],
'/b/c/d': [
('a', '/b/c', 'd'), ('a', '///b//c', 'd/'),
('/a', '/b/c', 'd'),
# empty components get removed
('/', 'b', '', 'c/d'), ('/', '', 'b/c/d'), ('', '/b/c/d'),
],
}
def setUp(self):
p = self.cls('a')
self.flavour = p._flavour
self.sep = self.flavour.sep
self.altsep = self.flavour.altsep
def test_constructor_common(self):
P = self.cls
p = P('a')
self.assertIsInstance(p, P)
P('a', 'b', 'c')
P('/a', 'b', 'c')
P('a/b/c')
P('/a/b/c')
self.assertEqual(P(P('a')), P('a'))
self.assertEqual(P(P('a'), 'b'), P('a/b'))
self.assertEqual(P(P('a'), P('b')), P('a/b'))
def test_join_common(self):
P = self.cls
p = P('a/b')
pp = p.joinpath('c')
self.assertEqual(pp, P('a/b/c'))
self.assertIs(type(pp), type(p))
pp = p.joinpath('c', 'd')
self.assertEqual(pp, P('a/b/c/d'))
pp = p.joinpath(P('c'))
self.assertEqual(pp, P('a/b/c'))
pp = p.joinpath('/c')
self.assertEqual(pp, P('/c'))
def test_div_common(self):
# Basically the same as joinpath()
P = self.cls
p = P('a/b')
pp = p / 'c'
self.assertEqual(pp, P('a/b/c'))
self.assertIs(type(pp), type(p))
pp = p / 'c/d'
self.assertEqual(pp, P('a/b/c/d'))
pp = p / 'c' / 'd'
self.assertEqual(pp, P('a/b/c/d'))
pp = 'c' / p / 'd'
self.assertEqual(pp, P('c/a/b/d'))
pp = p / P('c')
self.assertEqual(pp, P('a/b/c'))
pp = p/ '/c'
self.assertEqual(pp, P('/c'))
def _check_str(self, expected, args):
p = self.cls(*args)
s = str(p)
self.assertEqual(s, expected.replace('/', self.sep))
self.assertIsInstance(s, str)
def test_str_common(self):
# Canonicalized paths roundtrip
for pathstr in ('a', 'a/b', 'a/b/c', '/', '/a/b', '/a/b/c'):
self._check_str(pathstr, (pathstr,))
# Special case for the empty path
self._check_str('.', ('',))
# Other tests for str() are in test_equivalences()
def test_as_posix_common(self):
P = self.cls
for pathstr in ('a', 'a/b', 'a/b/c', '/', '/a/b', '/a/b/c'):
self.assertEqual(P(pathstr).as_posix(), pathstr)
# Other tests for as_posix() are in test_equivalences()
@with_fsencode
def test_as_bytes_common(self):
sep = os.fsencode(self.sep)
P = self.cls
self.assertEqual(bytes(P('a/b')), b'a' + sep + b'b')
def test_as_uri_common(self):
P = self.cls
with self.assertRaises(ValueError):
P('a').as_uri()
with self.assertRaises(ValueError):
P().as_uri()
def test_repr_common(self):
for pathstr in ('a', 'a/b', 'a/b/c', '/', '/a/b', '/a/b/c'):
p = self.cls(pathstr)
clsname = p.__class__.__name__
r = repr(p)
self.assertIsInstance(r, str)
# The repr() is in the form ClassName("forward-slashes path")
self.assertTrue(r.startswith(clsname + '('), r)
self.assertTrue(r.endswith(')'), r)
inner = r[len(clsname) + 1 : -1]
self.assertEqual(eval(inner), p.as_posix())
# The repr() roundtrips
q = eval(r, pathlib.__dict__)
self.assertIs(q.__class__, p.__class__)
self.assertEqual(q, p)
self.assertEqual(repr(q), r)
def test_eq_common(self):
P = self.cls
self.assertEqual(P('a/b'), P('a/b'))
self.assertEqual(P('a/b'), P('a', 'b'))
self.assertNotEqual(P('a/b'), P('a'))
self.assertNotEqual(P('a/b'), P('/a/b'))
self.assertNotEqual(P('a/b'), P())
self.assertNotEqual(P('/a/b'), P('/'))
self.assertNotEqual(P(), P('/'))
self.assertNotEqual(P(), "")
self.assertNotEqual(P(), {})
self.assertNotEqual(P(), int)
def test_match_common(self):
P = self.cls
self.assertRaises(ValueError, P('a').match, '')
self.assertRaises(ValueError, P('a').match, '.')
# Simple relative pattern
self.assertTrue(P('b.py').match('b.py'))
self.assertTrue(P('a/b.py').match('b.py'))
self.assertTrue(P('/a/b.py').match('b.py'))
self.assertFalse(P('a.py').match('b.py'))
self.assertFalse(P('b/py').match('b.py'))
self.assertFalse(P('/a.py').match('b.py'))
self.assertFalse(P('b.py/c').match('b.py'))
# Wilcard relative pattern
self.assertTrue(P('b.py').match('*.py'))
self.assertTrue(P('a/b.py').match('*.py'))
self.assertTrue(P('/a/b.py').match('*.py'))
self.assertFalse(P('b.pyc').match('*.py'))
self.assertFalse(P('b./py').match('*.py'))
self.assertFalse(P('b.py/c').match('*.py'))
# Multi-part relative pattern
self.assertTrue(P('ab/c.py').match('a*/*.py'))
self.assertTrue(P('/d/ab/c.py').match('a*/*.py'))
self.assertFalse(P('a.py').match('a*/*.py'))
self.assertFalse(P('/dab/c.py').match('a*/*.py'))
self.assertFalse(P('ab/c.py/d').match('a*/*.py'))
# Absolute pattern
self.assertTrue(P('/b.py').match('/*.py'))
self.assertFalse(P('b.py').match('/*.py'))
self.assertFalse(P('a/b.py').match('/*.py'))
self.assertFalse(P('/a/b.py').match('/*.py'))
# Multi-part absolute pattern
self.assertTrue(P('/a/b.py').match('/a/*.py'))
self.assertFalse(P('/ab.py').match('/a/*.py'))
self.assertFalse(P('/a/b/c.py').match('/a/*.py'))
# Double-star wildcard absolute pattern
self.assertTrue(P('/a.py').match('**/*.py'))
self.assertTrue(P('/a/b.py').match('**'))
self.assertTrue(P('/a/b.py').match('**/*'))
self.assertTrue(P('/a/b.py').match('**/*.py'))
self.assertTrue(P('/a/b/c.py').match('**/*.py'))
self.assertTrue(P('/a/b/c/d.py').match('**/*.py'))
self.assertFalse(P('/a/b/c/d.spam').match('**/*.py'))
# Double-star wildcard relative pattern
self.assertTrue(P('a.py').match('**/*.py'))
self.assertTrue(P('a/b.py').match('**'))
self.assertTrue(P('a/b.py').match('**/*'))
self.assertTrue(P('a/b.py').match('**/*.py'))
self.assertTrue(P('a/b/c.py').match('**/*py'))
self.assertTrue(P('a/b/c/d.py').match('**/*py'))
self.assertFalse(P('a/b/c/d.spam').match('**/*.py'))
# Double-star wildcard absolute pattern with prefix
self.assertTrue(P('/a/b.py').match('/a/**'))
self.assertTrue(P('/a/b.py').match('/a/**/*'))
self.assertTrue(P('/a/b.py').match('/a/**/*.py'))
self.assertTrue(P('/a/b/c.py').match('/a/**/*py'))
self.assertTrue(P('/a/b/c/d.py').match('/a/**/*py'))
# Failed lookahead absolute pattern with prefix
self.assertTrue(P('/a/b/c/b/c').match('/a/b/**'))
self.assertFalse(P('/a/spam/c/b/c').match('/a/b/**'))
# Double-star wildcard relative pattern with prefix
self.assertTrue(P('a/b.py').match('a/**'))
self.assertTrue(P('a/b.py').match('a/**/*'))
self.assertTrue(P('a/b.py').match('a/**/*.py'))
self.assertTrue(P('a/b/c.py').match('a/**/*py'))
self.assertTrue(P('a/b/c/d.py').match('a/**/*py'))
self.assertFalse(P('a/b/c/d.spam').match('a/**/*py'))
self.assertFalse(P('a/b/c/d.py').match('e/**'))
# Failed lookahead relative pattern with prefix
self.assertTrue(P('a/b/c/b/c').match('a/b/**'))
self.assertFalse(P('a/spam/c/b/c').match('a/b/**'))
# Double-star wildcard pattern with suffix
self.assertTrue(P('/c/a/c/a/b').match('**/a/b'))
self.assertTrue(P('c/a/c/a/b').match('**/a/b'))
self.assertFalse(P('c/a/c/spam/b').match('**/a/b'))
# Double-star with multiple path components
self.assertTrue(P('a/b/c/food/e.py').match('**/b/*/foo*/*.py'))
self.assertTrue(P('a/b/c/d.py').match('**/b/**/*.py'))
# Double-star with single path component
self.assertTrue(P('foo').match('**/*'))
self.assertTrue(P('foo').match('**/**'))
self.assertTrue(P('foo').match('**/**/**'))
# Match entire relative path
self.assertTrue(P('foo/a.py').match('foo/*.py', match_entire=True))
self.assertFalse(P('bar/foo/a.py').match('foo/*.py', match_entire=True))
def test_ordering_common(self):
# Ordering is tuple-alike
def assertLess(a, b):
self.assertLess(a, b)
self.assertGreater(b, a)
P = self.cls
a = P('a')
b = P('a/b')
c = P('abc')
d = P('b')
assertLess(a, b)
assertLess(a, c)
assertLess(a, d)
assertLess(b, c)
assertLess(c, d)
P = self.cls
a = P('/a')
b = P('/a/b')
c = P('/abc')
d = P('/b')
assertLess(a, b)
assertLess(a, c)
assertLess(a, d)
assertLess(b, c)
assertLess(c, d)
if sys.version_info > (3,):
with self.assertRaises(TypeError):
P() < {}
else:
P() < {}
def test_parts_common(self):
# `parts` returns a tuple
sep = self.sep
P = self.cls
p = P('a/b')
parts = p.parts
self.assertEqual(parts, ('a', 'b'))
for part in parts:
self.assertIsInstance(part, str)
# The object gets reused
self.assertIs(parts, p.parts)
# When the path is absolute, the anchor is a separate part
p = P('/a/b')
parts = p.parts
self.assertEqual(parts, (sep, 'a', 'b'))
def test_equivalences(self):
for k, tuples in self.equivalences.items():
canon = k.replace('/', self.sep)
posix = k.replace(self.sep, '/')
if canon != posix:
tuples = tuples + [
tuple(part.replace('/', self.sep) for part in t)
for t in tuples
]
tuples.append((posix, ))
pcanon = self.cls(canon)
for t in tuples:
p = self.cls(*t)
self.assertEqual(p, pcanon, "failed with args {0}".format(t))
self.assertEqual(hash(p), hash(pcanon))
self.assertEqual(str(p), canon)
self.assertEqual(p.as_posix(), posix)
def test_parent_common(self):
# Relative
P = self.cls
p = P('a/b/c')
self.assertEqual(p.parent, P('a/b'))
self.assertEqual(p.parent.parent, P('a'))
self.assertEqual(p.parent.parent.parent, P())
self.assertEqual(p.parent.parent.parent.parent, P())
# Anchored
p = P('/a/b/c')
self.assertEqual(p.parent, P('/a/b'))
self.assertEqual(p.parent.parent, P('/a'))
self.assertEqual(p.parent.parent.parent, P('/'))
self.assertEqual(p.parent.parent.parent.parent, P('/'))
def test_parents_common(self):
# Relative
P = self.cls
p = P('a/b/c')
par = p.parents
self.assertEqual(len(par), 3)
self.assertEqual(par[0], P('a/b'))
self.assertEqual(par[1], P('a'))
self.assertEqual(par[2], P('.'))
self.assertEqual(list(par), [P('a/b'), P('a'), P('.')])
with self.assertRaises(IndexError):
par[-1]
with self.assertRaises(IndexError):
par[3]
with self.assertRaises(TypeError):
par[0] = p
# Anchored
p = P('/a/b/c')
par = p.parents
self.assertEqual(len(par), 3)
self.assertEqual(par[0], P('/a/b'))
self.assertEqual(par[1], P('/a'))
self.assertEqual(par[2], P('/'))
self.assertEqual(list(par), [P('/a/b'), P('/a'), P('/')])
with self.assertRaises(IndexError):
par[3]
def test_drive_common(self):
P = self.cls
self.assertEqual(P('a/b').drive, '')
self.assertEqual(P('/a/b').drive, '')
self.assertEqual(P('').drive, '')
def test_root_common(self):
P = self.cls
sep = self.sep
self.assertEqual(P('').root, '')
self.assertEqual(P('a/b').root, '')
self.assertEqual(P('/').root, sep)
self.assertEqual(P('/a/b').root, sep)
def test_anchor_common(self):
P = self.cls
sep = self.sep
self.assertEqual(P('').anchor, '')
self.assertEqual(P('a/b').anchor, '')
self.assertEqual(P('/').anchor, sep)
self.assertEqual(P('/a/b').anchor, sep)
def test_name_common(self):
P = self.cls
self.assertEqual(P('').name, '')
self.assertEqual(P('.').name, '')
self.assertEqual(P('/').name, '')
self.assertEqual(P('a/b').name, 'b')
self.assertEqual(P('/a/b').name, 'b')
self.assertEqual(P('/a/b/.').name, 'b')
self.assertEqual(P('a/b.py').name, 'b.py')
self.assertEqual(P('/a/b.py').name, 'b.py')
def test_suffix_common(self):
P = self.cls
self.assertEqual(P('').suffix, '')
self.assertEqual(P('.').suffix, '')
self.assertEqual(P('..').suffix, '')
self.assertEqual(P('/').suffix, '')
self.assertEqual(P('a/b').suffix, '')
self.assertEqual(P('/a/b').suffix, '')
self.assertEqual(P('/a/b/.').suffix, '')
self.assertEqual(P('a/b.py').suffix, '.py')
self.assertEqual(P('/a/b.py').suffix, '.py')
self.assertEqual(P('a/.hgrc').suffix, '')
self.assertEqual(P('/a/.hgrc').suffix, '')
self.assertEqual(P('a/.hg.rc').suffix, '.rc')
self.assertEqual(P('/a/.hg.rc').suffix, '.rc')
self.assertEqual(P('a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('/a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('a/Some name. Ending with a dot.').suffix, '')
self.assertEqual(P('/a/Some name. Ending with a dot.').suffix, '')
def test_suffixes_common(self):
P = self.cls
self.assertEqual(P('').suffixes, [])
self.assertEqual(P('.').suffixes, [])
self.assertEqual(P('/').suffixes, [])
self.assertEqual(P('a/b').suffixes, [])
self.assertEqual(P('/a/b').suffixes, [])
self.assertEqual(P('/a/b/.').suffixes, [])
self.assertEqual(P('a/b.py').suffixes, ['.py'])
self.assertEqual(P('/a/b.py').suffixes, ['.py'])
self.assertEqual(P('a/.hgrc').suffixes, [])
self.assertEqual(P('/a/.hgrc').suffixes, [])
self.assertEqual(P('a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('/a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('/a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('a/Some name. Ending with a dot.').suffixes, [])
self.assertEqual(P('/a/Some name. Ending with a dot.').suffixes, [])
def test_stem_common(self):
P = self.cls
self.assertEqual(P('').stem, '')
self.assertEqual(P('.').stem, '')
self.assertEqual(P('..').stem, '..')
self.assertEqual(P('/').stem, '')
self.assertEqual(P('a/b').stem, 'b')
self.assertEqual(P('a/b.py').stem, 'b')
self.assertEqual(P('a/.hgrc').stem, '.hgrc')
self.assertEqual(P('a/.hg.rc').stem, '.hg')
self.assertEqual(P('a/b.tar.gz').stem, 'b.tar')
self.assertEqual(P('a/Some name. Ending with a dot.').stem,
'Some name. Ending with a dot.')
def test_with_name_common(self):
P = self.cls
self.assertEqual(P('a/b').with_name('d.xml'), P('a/d.xml'))
self.assertEqual(P('/a/b').with_name('d.xml'), P('/a/d.xml'))
self.assertEqual(P('a/b.py').with_name('d.xml'), P('a/d.xml'))
self.assertEqual(P('/a/b.py').with_name('d.xml'), P('/a/d.xml'))
self.assertEqual(P('a/Dot ending.').with_name('d.xml'), P('a/d.xml'))
self.assertEqual(P('/a/Dot ending.').with_name('d.xml'), P('/a/d.xml'))
self.assertRaises(ValueError, P('').with_name, 'd.xml')
self.assertRaises(ValueError, P('.').with_name, 'd.xml')
self.assertRaises(ValueError, P('/').with_name, 'd.xml')
def test_with_suffix_common(self):
P = self.cls
self.assertEqual(P('a/b').with_suffix('.gz'), P('a/b.gz'))
self.assertEqual(P('/a/b').with_suffix('.gz'), P('/a/b.gz'))
self.assertEqual(P('a/b.py').with_suffix('.gz'), P('a/b.gz'))
self.assertEqual(P('/a/b.py').with_suffix('.gz'), P('/a/b.gz'))
# Path doesn't have a "filename" component
self.assertRaises(ValueError, P('').with_suffix, '.gz')
self.assertRaises(ValueError, P('.').with_suffix, '.gz')
self.assertRaises(ValueError, P('/').with_suffix, '.gz')
# Invalid suffix
self.assertRaises(ValueError, P('a/b').with_suffix, 'gz')
self.assertRaises(ValueError, P('a/b').with_suffix, '/')
self.assertRaises(ValueError, P('a/b').with_suffix, '/.gz')
self.assertRaises(ValueError, P('a/b').with_suffix, 'c/d')
self.assertRaises(ValueError, P('a/b').with_suffix, '.c/.d')
def test_relative_to_common(self):
P = self.cls
p = P('a/b')
self.assertRaises(TypeError, p.relative_to)
if sys.version_info > (3,):
self.assertRaises(TypeError, p.relative_to, b'a')
self.assertEqual(p.relative_to(P()), P('a/b'))
self.assertEqual(p.relative_to(''), P('a/b'))
self.assertEqual(p.relative_to(P('a')), P('b'))
self.assertEqual(p.relative_to('a'), P('b'))
self.assertEqual(p.relative_to('a/'), P('b'))
self.assertEqual(p.relative_to(P('a/b')), P())
self.assertEqual(p.relative_to('a/b'), P())
# With several args
self.assertEqual(p.relative_to('a', 'b'), P())
# Unrelated paths
self.assertRaises(ValueError, p.relative_to, P('c'))
self.assertRaises(ValueError, p.relative_to, P('a/b/c'))
self.assertRaises(ValueError, p.relative_to, P('a/c'))
self.assertRaises(ValueError, p.relative_to, P('/a'))
p = P('/a/b')
self.assertEqual(p.relative_to(P('/')), P('a/b'))
self.assertEqual(p.relative_to('/'), P('a/b'))
self.assertEqual(p.relative_to(P('/a')), P('b'))
self.assertEqual(p.relative_to('/a'), P('b'))
self.assertEqual(p.relative_to('/a/'), P('b'))
self.assertEqual(p.relative_to(P('/a/b')), P())
self.assertEqual(p.relative_to('/a/b'), P())
# Unrelated paths
self.assertRaises(ValueError, p.relative_to, P('/c'))
self.assertRaises(ValueError, p.relative_to, P('/a/b/c'))
self.assertRaises(ValueError, p.relative_to, P('/a/c'))
self.assertRaises(ValueError, p.relative_to, P())
self.assertRaises(ValueError, p.relative_to, '')
self.assertRaises(ValueError, p.relative_to, P('a'))
def test_pickling_common(self):
P = self.cls
p = P('/a/b')
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
dumped = pickle.dumps(p, proto)
pp = pickle.loads(dumped)
self.assertIs(pp.__class__, p.__class__)
self.assertEqual(pp, p)
self.assertEqual(hash(pp), hash(p))
self.assertEqual(str(pp), str(p))
class PurePosixPathTest(_BasePurePathTest, unittest.TestCase):
cls = pathlib.PurePosixPath
def test_root(self):
P = self.cls
self.assertEqual(P('/a/b').root, '/')
self.assertEqual(P('///a/b').root, '/')
# POSIX special case for two leading slashes
self.assertEqual(P('//a/b').root, '//')
def test_eq(self):
P = self.cls
self.assertNotEqual(P('a/b'), P('A/b'))
self.assertEqual(P('/a'), P('///a'))
self.assertNotEqual(P('/a'), P('//a'))
def test_as_uri(self):
P = self.cls
self.assertEqual(P('/').as_uri(), 'file:///')
self.assertEqual(P('/a/b.c').as_uri(), 'file:///a/b.c')
self.assertEqual(P('/a/b%#c').as_uri(), 'file:///a/b%25%23c')
@with_fsencode
def test_as_uri_non_ascii(self):
from urllib.parse import quote_from_bytes
P = self.cls
try:
os.fsencode('\xe9')
except UnicodeEncodeError:
self.skipTest("\\xe9 cannot be encoded to the filesystem encoding")
self.assertEqual(P('/a/b\xe9').as_uri(),
'file:///a/b' + quote_from_bytes(os.fsencode('\xe9')))
def test_match(self):
P = self.cls
self.assertFalse(P('A.py').match('a.PY'))
def test_is_absolute(self):
P = self.cls
self.assertFalse(P().is_absolute())
self.assertFalse(P('a').is_absolute())
self.assertFalse(P('a/b/').is_absolute())
self.assertTrue(P('/').is_absolute())
self.assertTrue(P('/a').is_absolute())
self.assertTrue(P('/a/b/').is_absolute())
self.assertTrue(P('//a').is_absolute())
self.assertTrue(P('//a/b').is_absolute())
def test_is_reserved(self):
P = self.cls
self.assertIs(False, P('').is_reserved())
self.assertIs(False, P('/').is_reserved())
self.assertIs(False, P('/foo/bar').is_reserved())
self.assertIs(False, P('/dev/con/PRN/NUL').is_reserved())
def test_join(self):
P = self.cls
p = P('//a')
pp = p.joinpath('b')
self.assertEqual(pp, P('//a/b'))
pp = P('/a').joinpath('//c')
self.assertEqual(pp, P('//c'))
pp = P('//a').joinpath('/c')
self.assertEqual(pp, P('/c'))
def test_div(self):
# Basically the same as joinpath()
P = self.cls
p = P('//a')
pp = p / 'b'
self.assertEqual(pp, P('//a/b'))
pp = P('/a') / '//c'
self.assertEqual(pp, P('//c'))
pp = P('//a') / '/c'
self.assertEqual(pp, P('/c'))
class PureWindowsPathTest(_BasePurePathTest, unittest.TestCase):
cls = pathlib.PureWindowsPath
equivalences = _BasePurePathTest.equivalences.copy()
equivalences.update({
'c:a': [ ('c:', 'a'), ('c:', 'a/'), ('/', 'c:', 'a') ],
'c:/a': [
('c:/', 'a'), ('c:', '/', 'a'), ('c:', '/a'),
('/z', 'c:/', 'a'), ('//x/y', 'c:/', 'a'),
],
'//a/b/': [ ('//a/b',) ],
'//a/b/c': [
('//a/b', 'c'), ('//a/b/', 'c'),
],
})
def test_str(self):
p = self.cls('a/b/c')
self.assertEqual(str(p), 'a\\b\\c')
p = self.cls('c:/a/b/c')
self.assertEqual(str(p), 'c:\\a\\b\\c')
p = self.cls('//a/b')
self.assertEqual(str(p), '\\\\a\\b\\')
p = self.cls('//a/b/c')
self.assertEqual(str(p), '\\\\a\\b\\c')
p = self.cls('//a/b/c/d')
self.assertEqual(str(p), '\\\\a\\b\\c\\d')
def test_eq(self):
P = self.cls
self.assertEqual(P('c:a/b'), P('c:a/b'))
self.assertEqual(P('c:a/b'), P('c:', 'a', 'b'))
self.assertNotEqual(P('c:a/b'), P('d:a/b'))
self.assertNotEqual(P('c:a/b'), P('c:/a/b'))
self.assertNotEqual(P('/a/b'), P('c:/a/b'))
# Case-insensitivity
self.assertEqual(P('a/B'), P('A/b'))
self.assertEqual(P('C:a/B'), P('c:A/b'))
self.assertEqual(P('//Some/SHARE/a/B'), P('//somE/share/A/b'))
@with_fsencode
def test_as_uri(self):
P = self.cls
with self.assertRaises(ValueError):
P('/a/b').as_uri()
with self.assertRaises(ValueError):
P('c:a/b').as_uri()
self.assertEqual(P('c:/').as_uri(), 'file:///c:/')
self.assertEqual(P('c:/a/b.c').as_uri(), 'file:///c:/a/b.c')
self.assertEqual(P('c:/a/b%#c').as_uri(), 'file:///c:/a/b%25%23c')
self.assertEqual(P('c:/a/b\xe9').as_uri(), 'file:///c:/a/b%C3%A9')
self.assertEqual(P('//some/share/').as_uri(), 'file://some/share/')
self.assertEqual(P('//some/share/a/b.c').as_uri(),
'file://some/share/a/b.c')
self.assertEqual(P('//some/share/a/b%#c\xe9').as_uri(),
'file://some/share/a/b%25%23c%C3%A9')
def test_match_common(self):
P = self.cls
# Absolute patterns
self.assertTrue(P('c:/').match('/'))
self.assertTrue(P('c:/b.py').match('/*.py'))
self.assertTrue(P('c:/b.py').match('c:*.py'))
self.assertTrue(P('c:/b.py').match('c:/*.py'))
self.assertFalse(P('d:/b.py').match('c:/*.py')) # wrong drive
self.assertFalse(P('b.py').match('/*.py'))
self.assertFalse(P('b.py').match('c:*.py'))
self.assertFalse(P('b.py').match('c:/*.py'))
self.assertFalse(P('c:b.py').match('/*.py'))
self.assertFalse(P('c:b.py').match('c:/*.py'))
self.assertFalse(P('/b.py').match('c:*.py'))
self.assertFalse(P('/b.py').match('c:/*.py'))
# UNC patterns
self.assertTrue(P('//some/share/a.py').match('/*.py'))
self.assertTrue(P('//some/share/a.py').match('//some/share/*.py'))
self.assertFalse(P('//other/share/a.py').match('//some/share/*.py'))
self.assertFalse(P('//some/share/a/b.py').match('//some/share/*.py'))
# Case-insensitivity
self.assertTrue(P('B.py').match('b.PY'))
self.assertTrue(P('c:/a/B.Py').match('C:/A/*.pY'))
self.assertTrue(P('//Some/Share/B.Py').match('//somE/sharE/*.pY'))
def test_ordering_common(self):
# Case-insensitivity
def assertOrderedEqual(a, b):
self.assertLessEqual(a, b)
self.assertGreaterEqual(b, a)
P = self.cls
p = P('c:A/b')
q = P('C:a/B')
assertOrderedEqual(p, q)
self.assertFalse(p < q)
self.assertFalse(p > q)
p = P('//some/Share/A/b')
q = P('//Some/SHARE/a/B')
assertOrderedEqual(p, q)
self.assertFalse(p < q)
self.assertFalse(p > q)
def test_parts(self):
P = self.cls
p = P('c:a/b')
parts = p.parts
self.assertEqual(parts, ('c:', 'a', 'b'))
p = P('c:/a/b')
parts = p.parts
self.assertEqual(parts, ('c:\\', 'a', 'b'))
p = P('//a/b/c/d')
parts = p.parts
self.assertEqual(parts, ('\\\\a\\b\\', 'c', 'd'))
def test_parent(self):
# Anchored
P = self.cls
p = P('z:a/b/c')
self.assertEqual(p.parent, P('z:a/b'))
self.assertEqual(p.parent.parent, P('z:a'))
self.assertEqual(p.parent.parent.parent, P('z:'))
self.assertEqual(p.parent.parent.parent.parent, P('z:'))
p = P('z:/a/b/c')
self.assertEqual(p.parent, P('z:/a/b'))
self.assertEqual(p.parent.parent, P('z:/a'))
self.assertEqual(p.parent.parent.parent, P('z:/'))
self.assertEqual(p.parent.parent.parent.parent, P('z:/'))
p = P('//a/b/c/d')
self.assertEqual(p.parent, P('//a/b/c'))
self.assertEqual(p.parent.parent, P('//a/b'))
self.assertEqual(p.parent.parent.parent, P('//a/b'))
def test_parents(self):
# Anchored
P = self.cls
p = P('z:a/b/')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('z:a'))
self.assertEqual(par[1], P('z:'))
self.assertEqual(list(par), [P('z:a'), P('z:')])
with self.assertRaises(IndexError):
par[2]
p = P('z:/a/b/')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('z:/a'))
self.assertEqual(par[1], P('z:/'))
self.assertEqual(list(par), [P('z:/a'), P('z:/')])
with self.assertRaises(IndexError):
par[2]
p = P('//a/b/c/d')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('//a/b/c'))
self.assertEqual(par[1], P('//a/b'))
self.assertEqual(list(par), [P('//a/b/c'), P('//a/b')])
with self.assertRaises(IndexError):
par[2]
def test_drive(self):
P = self.cls
self.assertEqual(P('c:').drive, 'c:')
self.assertEqual(P('c:a/b').drive, 'c:')
self.assertEqual(P('c:/').drive, 'c:')
self.assertEqual(P('c:/a/b/').drive, 'c:')
self.assertEqual(P('//a/b').drive, '\\\\a\\b')
self.assertEqual(P('//a/b/').drive, '\\\\a\\b')
self.assertEqual(P('//a/b/c/d').drive, '\\\\a\\b')
def test_root(self):
P = self.cls
self.assertEqual(P('c:').root, '')
self.assertEqual(P('c:a/b').root, '')
self.assertEqual(P('c:/').root, '\\')
self.assertEqual(P('c:/a/b/').root, '\\')
self.assertEqual(P('//a/b').root, '\\')
self.assertEqual(P('//a/b/').root, '\\')
self.assertEqual(P('//a/b/c/d').root, '\\')
def test_anchor(self):
P = self.cls
self.assertEqual(P('c:').anchor, 'c:')
self.assertEqual(P('c:a/b').anchor, 'c:')
self.assertEqual(P('c:/').anchor, 'c:\\')
self.assertEqual(P('c:/a/b/').anchor, 'c:\\')
self.assertEqual(P('//a/b').anchor, '\\\\a\\b\\')
self.assertEqual(P('//a/b/').anchor, '\\\\a\\b\\')
self.assertEqual(P('//a/b/c/d').anchor, '\\\\a\\b\\')
def test_name(self):
P = self.cls
self.assertEqual(P('c:').name, '')
self.assertEqual(P('c:/').name, '')
self.assertEqual(P('c:a/b').name, 'b')
self.assertEqual(P('c:/a/b').name, 'b')
self.assertEqual(P('c:a/b.py').name, 'b.py')
self.assertEqual(P('c:/a/b.py').name, 'b.py')
self.assertEqual(P('//My.py/Share.php').name, '')
self.assertEqual(P('//My.py/Share.php/a/b').name, 'b')
def test_suffix(self):
P = self.cls
self.assertEqual(P('c:').suffix, '')
self.assertEqual(P('c:/').suffix, '')
self.assertEqual(P('c:a/b').suffix, '')
self.assertEqual(P('c:/a/b').suffix, '')
self.assertEqual(P('c:a/b.py').suffix, '.py')
self.assertEqual(P('c:/a/b.py').suffix, '.py')
self.assertEqual(P('c:a/.hgrc').suffix, '')
self.assertEqual(P('c:/a/.hgrc').suffix, '')
self.assertEqual(P('c:a/.hg.rc').suffix, '.rc')
self.assertEqual(P('c:/a/.hg.rc').suffix, '.rc')
self.assertEqual(P('c:a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('c:/a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('c:a/Some name. Ending with a dot.').suffix, '')
self.assertEqual(P('c:/a/Some name. Ending with a dot.').suffix, '')
self.assertEqual(P('//My.py/Share.php').suffix, '')
self.assertEqual(P('//My.py/Share.php/a/b').suffix, '')
def test_suffixes(self):
P = self.cls
self.assertEqual(P('c:').suffixes, [])
self.assertEqual(P('c:/').suffixes, [])
self.assertEqual(P('c:a/b').suffixes, [])
self.assertEqual(P('c:/a/b').suffixes, [])
self.assertEqual(P('c:a/b.py').suffixes, ['.py'])
self.assertEqual(P('c:/a/b.py').suffixes, ['.py'])
self.assertEqual(P('c:a/.hgrc').suffixes, [])
self.assertEqual(P('c:/a/.hgrc').suffixes, [])
self.assertEqual(P('c:a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('c:/a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('c:a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('c:/a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('//My.py/Share.php').suffixes, [])
self.assertEqual(P('//My.py/Share.php/a/b').suffixes, [])
self.assertEqual(P('c:a/Some name. Ending with a dot.').suffixes, [])
self.assertEqual(P('c:/a/Some name. Ending with a dot.').suffixes, [])
def test_stem(self):
P = self.cls
self.assertEqual(P('c:').stem, '')
self.assertEqual(P('c:.').stem, '')
self.assertEqual(P('c:..').stem, '..')
self.assertEqual(P('c:/').stem, '')
self.assertEqual(P('c:a/b').stem, 'b')
self.assertEqual(P('c:a/b.py').stem, 'b')
self.assertEqual(P('c:a/.hgrc').stem, '.hgrc')
self.assertEqual(P('c:a/.hg.rc').stem, '.hg')
self.assertEqual(P('c:a/b.tar.gz').stem, 'b.tar')
self.assertEqual(P('c:a/Some name. Ending with a dot.').stem,
'Some name. Ending with a dot.')
def test_with_name(self):
P = self.cls
self.assertEqual(P('c:a/b').with_name('d.xml'), P('c:a/d.xml'))
self.assertEqual(P('c:/a/b').with_name('d.xml'), P('c:/a/d.xml'))
self.assertEqual(P('c:a/Dot ending.').with_name('d.xml'), P('c:a/d.xml'))
self.assertEqual(P('c:/a/Dot ending.').with_name('d.xml'), P('c:/a/d.xml'))
self.assertRaises(ValueError, P('c:').with_name, 'd.xml')
self.assertRaises(ValueError, P('c:/').with_name, 'd.xml')
self.assertRaises(ValueError, P('//My/Share').with_name, 'd.xml')
def test_with_suffix(self):
P = self.cls
self.assertEqual(P('c:a/b').with_suffix('.gz'), P('c:a/b.gz'))
self.assertEqual(P('c:/a/b').with_suffix('.gz'), P('c:/a/b.gz'))
self.assertEqual(P('c:a/b.py').with_suffix('.gz'), P('c:a/b.gz'))
self.assertEqual(P('c:/a/b.py').with_suffix('.gz'), P('c:/a/b.gz'))
# Path doesn't have a "filename" component
self.assertRaises(ValueError, P('').with_suffix, '.gz')
self.assertRaises(ValueError, P('.').with_suffix, '.gz')
self.assertRaises(ValueError, P('/').with_suffix, '.gz')
self.assertRaises(ValueError, P('//My/Share').with_suffix, '.gz')
# Invalid suffix
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '/')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '\\')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c:')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '/.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '\\.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c:.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c/d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c\\d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '.c/d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '.c\\d')
def test_relative_to(self):
P = self.cls
p = P('C:Foo/Bar')
self.assertEqual(p.relative_to(P('c:')), P('Foo/Bar'))
self.assertEqual(p.relative_to('c:'), P('Foo/Bar'))
self.assertEqual(p.relative_to(P('c:foO')), P('Bar'))
self.assertEqual(p.relative_to('c:foO'), P('Bar'))
self.assertEqual(p.relative_to('c:foO/'), P('Bar'))
self.assertEqual(p.relative_to(P('c:foO/baR')), P())
self.assertEqual(p.relative_to('c:foO/baR'), P())
# Unrelated paths
self.assertRaises(ValueError, p.relative_to, P())
self.assertRaises(ValueError, p.relative_to, '')
self.assertRaises(ValueError, p.relative_to, P('d:'))
self.assertRaises(ValueError, p.relative_to, P('/'))
self.assertRaises(ValueError, p.relative_to, P('Foo'))
self.assertRaises(ValueError, p.relative_to, P('/Foo'))
self.assertRaises(ValueError, p.relative_to, P('C:/Foo'))
self.assertRaises(ValueError, p.relative_to, P('C:Foo/Bar/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:Foo/Baz'))
p = P('C:/Foo/Bar')
self.assertEqual(p.relative_to(P('c:')), P('/Foo/Bar'))
self.assertEqual(p.relative_to('c:'), P('/Foo/Bar'))
self.assertEqual(str(p.relative_to(P('c:'))), '\\Foo\\Bar')
self.assertEqual(str(p.relative_to('c:')), '\\Foo\\Bar')
self.assertEqual(p.relative_to(P('c:/')), P('Foo/Bar'))
self.assertEqual(p.relative_to('c:/'), P('Foo/Bar'))
self.assertEqual(p.relative_to(P('c:/foO')), P('Bar'))
self.assertEqual(p.relative_to('c:/foO'), P('Bar'))
self.assertEqual(p.relative_to('c:/foO/'), P('Bar'))
self.assertEqual(p.relative_to(P('c:/foO/baR')), P())
self.assertEqual(p.relative_to('c:/foO/baR'), P())
# Unrelated paths
self.assertRaises(ValueError, p.relative_to, P('C:/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:/Foo/Bar/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:/Foo/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:Foo'))
self.assertRaises(ValueError, p.relative_to, P('d:'))
self.assertRaises(ValueError, p.relative_to, P('d:/'))
self.assertRaises(ValueError, p.relative_to, P('/'))
self.assertRaises(ValueError, p.relative_to, P('/Foo'))
self.assertRaises(ValueError, p.relative_to, P('//C/Foo'))
# UNC paths
p = P('//Server/Share/Foo/Bar')
self.assertEqual(p.relative_to(P('//sErver/sHare')), P('Foo/Bar'))
self.assertEqual(p.relative_to('//sErver/sHare'), P('Foo/Bar'))
self.assertEqual(p.relative_to('//sErver/sHare/'), P('Foo/Bar'))
self.assertEqual(p.relative_to(P('//sErver/sHare/Foo')), P('Bar'))
self.assertEqual(p.relative_to('//sErver/sHare/Foo'), P('Bar'))
self.assertEqual(p.relative_to('//sErver/sHare/Foo/'), P('Bar'))
self.assertEqual(p.relative_to(P('//sErver/sHare/Foo/Bar')), P())
self.assertEqual(p.relative_to('//sErver/sHare/Foo/Bar'), P())
# Unrelated paths
self.assertRaises(ValueError, p.relative_to, P('/Server/Share/Foo'))
self.assertRaises(ValueError, p.relative_to, P('c:/Server/Share/Foo'))
self.assertRaises(ValueError, p.relative_to, P('//z/Share/Foo'))
self.assertRaises(ValueError, p.relative_to, P('//Server/z/Foo'))
def test_is_absolute(self):
P = self.cls
# Under NT, only paths with both a drive and a root are absolute
self.assertFalse(P().is_absolute())
self.assertFalse(P('a').is_absolute())
self.assertFalse(P('a/b/').is_absolute())
self.assertFalse(P('/').is_absolute())
self.assertFalse(P('/a').is_absolute())
self.assertFalse(P('/a/b/').is_absolute())
self.assertFalse(P('c:').is_absolute())
self.assertFalse(P('c:a').is_absolute())
self.assertFalse(P('c:a/b/').is_absolute())
self.assertTrue(P('c:/').is_absolute())
self.assertTrue(P('c:/a').is_absolute())
self.assertTrue(P('c:/a/b/').is_absolute())
# UNC paths are absolute by definition
self.assertTrue(P('//a/b').is_absolute())
self.assertTrue(P('//a/b/').is_absolute())
self.assertTrue(P('//a/b/c').is_absolute())
self.assertTrue(P('//a/b/c/d').is_absolute())
def test_join(self):
P = self.cls
p = P('C:/a/b')
pp = p.joinpath('x/y')
self.assertEqual(pp, P('C:/a/b/x/y'))
pp = p.joinpath('/x/y')
self.assertEqual(pp, P('C:/x/y'))
# Joining with a different drive => the first path is ignored, even
# if the second path is relative.
pp = p.joinpath('D:x/y')
self.assertEqual(pp, P('D:x/y'))
pp = p.joinpath('D:/x/y')
self.assertEqual(pp, P('D:/x/y'))
pp = p.joinpath('//host/share/x/y')
self.assertEqual(pp, P('//host/share/x/y'))
# Joining with the same drive => the first path is appended to if
# the second path is relative.
pp = p.joinpath('c:x/y')
self.assertEqual(pp, P('C:/a/b/x/y'))
pp = p.joinpath('c:/x/y')
self.assertEqual(pp, P('C:/x/y'))
def test_div(self):
# Basically the same as joinpath()
P = self.cls
p = P('C:/a/b')
self.assertEqual(p / 'x/y', P('C:/a/b/x/y'))
self.assertEqual(p / 'x' / 'y', P('C:/a/b/x/y'))
self.assertEqual(p / '/x/y', P('C:/x/y'))
self.assertEqual(p / '/x' / 'y', P('C:/x/y'))
# Joining with a different drive => the first path is ignored, even
# if the second path is relative.
self.assertEqual(p / 'D:x/y', P('D:x/y'))
self.assertEqual(p / 'D:' / 'x/y', P('D:x/y'))
self.assertEqual(p / 'D:/x/y', P('D:/x/y'))
self.assertEqual(p / 'D:' / '/x/y', P('D:/x/y'))
self.assertEqual(p / '//host/share/x/y', P('//host/share/x/y'))
# Joining with the same drive => the first path is appended to if
# the second path is relative.
self.assertEqual(p / 'c:x/y', P('C:/a/b/x/y'))
self.assertEqual(p / 'c:/x/y', P('C:/x/y'))
def test_is_reserved(self):
P = self.cls
self.assertIs(False, P('').is_reserved())
self.assertIs(False, P('/').is_reserved())
self.assertIs(False, P('/foo/bar').is_reserved())
self.assertIs(True, P('con').is_reserved())
self.assertIs(True, P('NUL').is_reserved())
self.assertIs(True, P('NUL.txt').is_reserved())
self.assertIs(True, P('com1').is_reserved())
self.assertIs(True, P('com9.bar').is_reserved())
self.assertIs(False, P('bar.com9').is_reserved())
self.assertIs(True, P('lpt1').is_reserved())
self.assertIs(True, P('lpt9.bar').is_reserved())
self.assertIs(False, P('bar.lpt9').is_reserved())
# Only the last component matters
self.assertIs(False, P('c:/NUL/con/baz').is_reserved())
# UNC paths are never reserved
self.assertIs(False, P('//my/share/nul/con/aux').is_reserved())
class PurePathTest(_BasePurePathTest, unittest.TestCase):
cls = pathlib.PurePath
def test_concrete_class(self):
p = self.cls('a')
self.assertIs(type(p),
pathlib.PureWindowsPath if os.name == 'nt' else pathlib.PurePosixPath)
def test_different_flavours_unequal(self):
p = pathlib.PurePosixPath('a')
q = pathlib.PureWindowsPath('a')
self.assertNotEqual(p, q)
@unittest.skipIf(sys.version_info < (3, 0),
'Most types are orderable in Python 2')
def test_different_flavours_unordered(self):
p = pathlib.PurePosixPath('a')
q = pathlib.PureWindowsPath('a')
with self.assertRaises(TypeError):
p < q
with self.assertRaises(TypeError):
p <= q
with self.assertRaises(TypeError):
p > q
with self.assertRaises(TypeError):
p >= q
#
# Tests for the concrete classes
#
# Make sure any symbolic links in the base test path are resolved
BASE = os.path.realpath(TESTFN)
join = lambda *x: os.path.join(BASE, *x)
rel_join = lambda *x: os.path.join(TESTFN, *x)
def symlink_skip_reason():
if not pathlib.supports_symlinks:
return "no system support for symlinks"
try:
os.symlink(__file__, BASE)
except OSError as e:
return str(e)
else:
support.unlink(BASE)
return None
symlink_skip_reason = symlink_skip_reason()
only_nt = unittest.skipIf(os.name != 'nt',
'test requires a Windows-compatible system')
only_posix = unittest.skipIf(os.name == 'nt',
'test requires a POSIX-compatible system')
with_symlinks = unittest.skipIf(symlink_skip_reason, symlink_skip_reason)
@only_posix
class PosixPathAsPureTest(PurePosixPathTest):
cls = pathlib.PosixPath
@only_nt
class WindowsPathAsPureTest(PureWindowsPathTest):
cls = pathlib.WindowsPath
class _BasePathTest(object):
"""Tests for the FS-accessing functionalities of the Path classes."""
# (BASE)
# |
# |-- dirA/
# |-- linkC -> "../dirB"
# |-- dirB/
# | |-- fileB
# |-- linkD -> "../dirB"
# |-- dirC/
# | |-- fileC
# | |-- fileD
# |-- fileA
# |-- linkA -> "fileA"
# |-- linkB -> "dirB"
#
def setUp(self):
os.mkdir(BASE)
self.addCleanup(shutil.rmtree, BASE)
os.mkdir(join('dirA'))
os.mkdir(join('dirB'))
os.mkdir(join('dirC'))
os.mkdir(join('dirC', 'dirD'))
with open(join('fileA'), 'wb') as f:
f.write(b"this is file A\n")
with open(join('dirB', 'fileB'), 'wb') as f:
f.write(b"this is file B\n")
with open(join('dirC', 'fileC'), 'wb') as f:
f.write(b"this is file C\n")
with open(join('dirC', 'dirD', 'fileD'), 'wb') as f:
f.write(b"this is file D\n")
if not symlink_skip_reason:
# Relative symlinks
os.symlink('fileA', join('linkA'))
os.symlink('non-existing', join('brokenLink'))
self.dirlink('dirB', join('linkB'))
self.dirlink(os.path.join('..', 'dirB'), join('dirA', 'linkC'))
# This one goes upwards but doesn't create a loop
self.dirlink(os.path.join('..', 'dirB'), join('dirB', 'linkD'))
if os.name == 'nt':
# Workaround for http://bugs.python.org/issue13772
def dirlink(self, src, dest):
os.symlink(src, dest, target_is_directory=True)
else:
def dirlink(self, src, dest):
os.symlink(src, dest)
def assertSame(self, path_a, path_b):
self.assertTrue(os.path.samefile(str(path_a), str(path_b)),
"%r and %r don't point to the same file" %
(path_a, path_b))
def assertFileNotFound(self, func, *args, **kwargs):
exc = FileNotFoundError if sys.version_info >= (3, 3) else EnvironmentError
with self.assertRaises(exc) as cm:
# Python 2.6 kludge for http://bugs.python.org/issue7853
try:
func(*args, **kwargs)
except:
raise
self.assertEqual(cm.exception.errno, errno.ENOENT)
def _test_cwd(self, p):
q = self.cls(os.getcwd())
self.assertEqual(p, q)
self.assertEqual(str(p), str(q))
self.assertIs(type(p), type(q))
self.assertTrue(p.is_absolute())
def test_cwd(self):
p = self.cls.cwd()
self._test_cwd(p)
def test_empty_path(self):
# The empty path points to '.'
p = self.cls('')
self.assertEqual(p.stat(), os.stat('.'))
def test_exists(self):
P = self.cls
p = P(BASE)
self.assertIs(True, p.exists())
self.assertIs(True, (p / 'dirA').exists())
self.assertIs(True, (p / 'fileA').exists())
if not symlink_skip_reason:
self.assertIs(True, (p / 'linkA').exists())
self.assertIs(True, (p / 'linkB').exists())
self.assertIs(False, (p / 'foo').exists())
self.assertIs(False, P('/xyzzy').exists())
def test_open_common(self):
p = self.cls(BASE)
with (p / 'fileA').open('r') as f:
self.assertIsInstance(f, io.TextIOBase)
self.assertEqual(f.read(), "this is file A\n")
with (p / 'fileA').open('rb') as f:
self.assertIsInstance(f, io.BufferedIOBase)
self.assertEqual(f.read().strip(), b"this is file A")
with (p / 'fileA').open('rb', buffering=0) as f:
self.assertIsInstance(f, io.RawIOBase)
self.assertEqual(f.read().strip(), b"this is file A")
def test_iterdir(self):
P = self.cls
p = P(BASE)
it = p.iterdir()
paths = set(it)
expected = ['dirA', 'dirB', 'dirC', 'fileA']
if not symlink_skip_reason:
expected += ['linkA', 'linkB', 'brokenLink']
self.assertEqual(paths, set( P(BASE, q) for q in expected ))
@with_symlinks
def test_iterdir_symlink(self):
# __iter__ on a symlink to a directory
P = self.cls
p = P(BASE, 'linkB')
paths = set(p.iterdir())
expected = set( P(BASE, 'linkB', q) for q in ['fileB', 'linkD'] )
self.assertEqual(paths, expected)
def test_iterdir_nodir(self):
# __iter__ on something that is not a directory
p = self.cls(BASE, 'fileA')
with self.assertRaises(OSError) as cm:
# Python 2.6 kludge for http://bugs.python.org/issue7853
try:
next(p.iterdir())
except:
raise
# ENOENT or EINVAL under Windows, ENOTDIR otherwise
# (see issue #12802)
self.assertIn(cm.exception.errno, (errno.ENOTDIR,
errno.ENOENT, errno.EINVAL))
def test_glob_common(self):
def _check(glob, expected):
self.assertEqual(set(glob), set( P(BASE, q) for q in expected ))
P = self.cls
p = P(BASE)
it = p.glob("fileA")
self.assertIsInstance(it, collections.Iterator)
_check(it, ["fileA"])
_check(p.glob("fileB"), [])
_check(p.glob("dir*/file*"), ["dirB/fileB", "dirC/fileC"])
if symlink_skip_reason:
_check(p.glob("*A"), ['dirA', 'fileA'])
else:
_check(p.glob("*A"), ['dirA', 'fileA', 'linkA'])
if symlink_skip_reason:
_check(p.glob("*B/*"), ['dirB/fileB'])
else:
_check(p.glob("*B/*"), ['dirB/fileB', 'dirB/linkD',
'linkB/fileB', 'linkB/linkD'])
if symlink_skip_reason:
_check(p.glob("*/fileB"), ['dirB/fileB'])
else:
_check(p.glob("*/fileB"), ['dirB/fileB', 'linkB/fileB'])
_check(p.glob("dirC/**"), ['dirC/fileC', 'dirC/dirD', 'dirC/dirD/fileD'])
def test_rglob_common(self):
def _check(glob, expected):
self.assertEqual(set(glob), set( P(BASE, q) for q in expected ))
P = self.cls
p = P(BASE)
it = p.rglob("fileA")
self.assertIsInstance(it, collections.Iterator)
# XXX cannot test because of symlink loops in the test setup
#_check(it, ["fileA"])
#_check(p.rglob("fileB"), ["dirB/fileB"])
#_check(p.rglob("*/fileA"), [""])
#_check(p.rglob("*/fileB"), ["dirB/fileB"])
#_check(p.rglob("file*"), ["fileA", "dirB/fileB"])
# No symlink loops here
p = P(BASE, "dirC")
_check(p.rglob("file*"), ["dirC/fileC", "dirC/dirD/fileD"])
_check(p.rglob("*/*"), ["dirC/dirD/fileD"])
def test_glob_dotdot(self):
# ".." is not special in globs
P = self.cls
p = P(BASE)
self.assertEqual(set(p.glob("..")), set([ P(BASE, "..") ]))
self.assertEqual(set(p.glob("dirA/../file*")), set([ P(BASE, "dirA/../fileA") ]))
self.assertEqual(set(p.glob("../xyzzy")), set())
def _check_resolve_relative(self, p, expected):
q = p.resolve()
self.assertEqual(q, expected)
def _check_resolve_absolute(self, p, expected):
q = p.resolve()
self.assertEqual(q, expected)
@with_symlinks
def test_resolve_common(self):
P = self.cls
p = P(BASE, 'foo')
with self.assertRaises(OSError) as cm:
p.resolve()
self.assertEqual(cm.exception.errno, errno.ENOENT)
# These are all relative symlinks
p = P(BASE, 'dirB', 'fileB')
self._check_resolve_relative(p, p)
p = P(BASE, 'linkA')
self._check_resolve_relative(p, P(BASE, 'fileA'))
p = P(BASE, 'dirA', 'linkC', 'fileB')
self._check_resolve_relative(p, P(BASE, 'dirB', 'fileB'))
p = P(BASE, 'dirB', 'linkD', 'fileB')
self._check_resolve_relative(p, P(BASE, 'dirB', 'fileB'))
# Now create absolute symlinks
d = tempfile.mkdtemp(suffix='-dirD')
self.addCleanup(shutil.rmtree, d)
os.symlink(os.path.join(d), join('dirA', 'linkX'))
os.symlink(join('dirB'), os.path.join(d, 'linkY'))
p = P(BASE, 'dirA', 'linkX', 'linkY', 'fileB')
self._check_resolve_absolute(p, P(BASE, 'dirB', 'fileB'))
@with_symlinks
def test_resolve_dot(self):
# See https://bitbucket.org/pitrou/pathlib/issue/9/pathresolve-fails-on-complex-symlinks
p = self.cls(BASE)
self.dirlink('.', join('0'))
self.dirlink(os.path.join('0', '0'), join('1'))
self.dirlink(os.path.join('1', '1'), join('2'))
q = p / '2'
self.assertEqual(q.resolve(), p)
def test_chmod(self):
p = self.cls(BASE) / 'fileA'
mode = p.stat().st_mode
# Clear writable bit
new_mode = mode & ~0o222
p.chmod(new_mode)
self.assertEqual(p.stat().st_mode, new_mode)
# Set writable bit
new_mode = mode | 0o222
p.chmod(new_mode)
self.assertEqual(p.stat().st_mode, new_mode)
# XXX also need a test for lchmod
def test_stat(self):
p = self.cls(BASE) / 'fileA'
st = p.stat()
self.assertEqual(p.stat(), st)
# Change file mode by flipping write bit
p.chmod(st.st_mode ^ 0o222)
self.addCleanup(p.chmod, st.st_mode)
self.assertNotEqual(p.stat(), st)
@with_symlinks
def test_lstat(self):
p = self.cls(BASE)/ 'linkA'
st = p.stat()
self.assertNotEqual(st, p.lstat())
def test_lstat_nosymlink(self):
p = self.cls(BASE) / 'fileA'
st = p.stat()
self.assertEqual(st, p.lstat())
@unittest.skipUnless(pwd, "the pwd module is needed for this test")
def test_owner(self):
p = self.cls(BASE) / 'fileA'
uid = p.stat().st_uid
try:
name = pwd.getpwuid(uid).pw_name
except KeyError:
self.skipTest(
"user %d doesn't have an entry in the system database" % uid)
self.assertEqual(name, p.owner())
@unittest.skipUnless(grp, "the grp module is needed for this test")
def test_group(self):
p = self.cls(BASE) / 'fileA'
gid = p.stat().st_gid
try:
name = grp.getgrgid(gid).gr_name
except KeyError:
self.skipTest(
"group %d doesn't have an entry in the system database" % gid)
self.assertEqual(name, p.group())
def test_unlink(self):
p = self.cls(BASE) / 'fileA'
p.unlink()
self.assertFileNotFound(p.stat)
self.assertFileNotFound(p.unlink)
def test_rmdir(self):
p = self.cls(BASE) / 'dirA'
for q in p.iterdir():
q.unlink()
p.rmdir()
self.assertFileNotFound(p.stat)
self.assertFileNotFound(p.unlink)
def test_rename(self):
P = self.cls(BASE)
p = P / 'fileA'
size = p.stat().st_size
# Renaming to another path
q = P / 'dirA' / 'fileAA'
p.rename(q)
self.assertEqual(q.stat().st_size, size)
self.assertFileNotFound(p.stat)
# Renaming to a str of a relative path
r = rel_join('fileAAA')
q.rename(r)
self.assertEqual(os.stat(r).st_size, size)
self.assertFileNotFound(q.stat)
def test_replace(self):
P = self.cls(BASE)
p = P / 'fileA'
if sys.version_info < (3, 3):
self.assertRaises(NotImplementedError, p.replace, p)
return
size = p.stat().st_size
# Replacing a non-existing path
q = P / 'dirA' / 'fileAA'
p.replace(q)
self.assertEqual(q.stat().st_size, size)
self.assertFileNotFound(p.stat)
# Replacing another (existing) path
r = rel_join('dirB', 'fileB')
q.replace(r)
self.assertEqual(os.stat(r).st_size, size)
self.assertFileNotFound(q.stat)
def test_touch_common(self):
P = self.cls(BASE)
p = P / 'newfileA'
self.assertFalse(p.exists())
p.touch()
self.assertTrue(p.exists())
old_mtime = p.stat().st_mtime
# Rewind the mtime sufficiently far in the past to work around
# filesystem-specific timestamp granularity.
os.utime(str(p), (old_mtime - 10, old_mtime - 10))
# The file mtime is refreshed by calling touch() again
p.touch()
self.assertGreaterEqual(p.stat().st_mtime, old_mtime)
p = P / 'newfileB'
self.assertFalse(p.exists())
p.touch(mode=0o700, exist_ok=False)
self.assertTrue(p.exists())
self.assertRaises(OSError, p.touch, exist_ok=False)
def test_touch_nochange(self):
P = self.cls(BASE)
p = P / 'fileA'
p.touch()
with p.open('rb') as f:
self.assertEqual(f.read().strip(), b"this is file A")
def test_mkdir(self):
P = self.cls(BASE)
p = P / 'newdirA'
self.assertFalse(p.exists())
p.mkdir()
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
with self.assertRaises(OSError) as cm:
# Python 2.6 kludge for http://bugs.python.org/issue7853
try:
p.mkdir()
except:
raise
self.assertEqual(cm.exception.errno, errno.EEXIST)
def test_mkdir_parents(self):
# Creating a chain of directories
p = self.cls(BASE, 'newdirB', 'newdirC')
self.assertFalse(p.exists())
with self.assertRaises(OSError) as cm:
# Python 2.6 kludge for http://bugs.python.org/issue7853
try:
p.mkdir()
except:
raise
self.assertEqual(cm.exception.errno, errno.ENOENT)
p.mkdir(parents=True)
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
with self.assertRaises(OSError) as cm:
try:
p.mkdir(parents=True)
except:
raise
self.assertEqual(cm.exception.errno, errno.EEXIST)
# test `mode` arg
mode = stat.S_IMODE(p.stat().st_mode) # default mode
p = self.cls(BASE, 'newdirD', 'newdirE')
p.mkdir(0o555, parents=True)
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
if os.name != 'nt':
# the directory's permissions follow the mode argument
self.assertEqual(stat.S_IMODE(p.stat().st_mode), 0o7555 & mode)
# the parent's permissions follow the default process settings
self.assertEqual(stat.S_IMODE(p.parent.stat().st_mode), mode)
@with_symlinks
def test_symlink_to(self):
P = self.cls(BASE)
target = P / 'fileA'
# Symlinking a path target
link = P / 'dirA' / 'linkAA'
link.symlink_to(target)
self.assertEqual(link.stat(), target.stat())
self.assertNotEqual(link.lstat(), target.stat())
# Symlinking a str target
link = P / 'dirA' / 'linkAAA'
link.symlink_to(str(target))
self.assertEqual(link.stat(), target.stat())
self.assertNotEqual(link.lstat(), target.stat())
self.assertFalse(link.is_dir())
# Symlinking to a directory
target = P / 'dirB'
link = P / 'dirA' / 'linkAAAA'
link.symlink_to(target, target_is_directory=True)
self.assertEqual(link.stat(), target.stat())
self.assertNotEqual(link.lstat(), target.stat())
self.assertTrue(link.is_dir())
self.assertTrue(list(link.iterdir()))
def test_is_dir(self):
P = self.cls(BASE)
self.assertTrue((P / 'dirA').is_dir())
self.assertFalse((P / 'fileA').is_dir())
self.assertFalse((P / 'non-existing').is_dir())
if not symlink_skip_reason:
self.assertFalse((P / 'linkA').is_dir())
self.assertTrue((P / 'linkB').is_dir())
self.assertFalse((P/ 'brokenLink').is_dir())
def test_is_file(self):
P = self.cls(BASE)
self.assertTrue((P / 'fileA').is_file())
self.assertFalse((P / 'dirA').is_file())
self.assertFalse((P / 'non-existing').is_file())
if not symlink_skip_reason:
self.assertTrue((P / 'linkA').is_file())
self.assertFalse((P / 'linkB').is_file())
self.assertFalse((P/ 'brokenLink').is_file())
def test_is_symlink(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_symlink())
self.assertFalse((P / 'dirA').is_symlink())
self.assertFalse((P / 'non-existing').is_symlink())
if not symlink_skip_reason:
self.assertTrue((P / 'linkA').is_symlink())
self.assertTrue((P / 'linkB').is_symlink())
self.assertTrue((P/ 'brokenLink').is_symlink())
def test_is_fifo_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_fifo())
self.assertFalse((P / 'dirA').is_fifo())
self.assertFalse((P / 'non-existing').is_fifo())
@unittest.skipUnless(hasattr(os, "mkfifo"), "os.mkfifo() required")
def test_is_fifo_true(self):
P = self.cls(BASE, 'myfifo')
os.mkfifo(str(P))
self.assertTrue(P.is_fifo())
self.assertFalse(P.is_socket())
self.assertFalse(P.is_file())
def test_is_socket_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_socket())
self.assertFalse((P / 'dirA').is_socket())
self.assertFalse((P / 'non-existing').is_socket())
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
def test_is_socket_true(self):
P = self.cls(BASE, 'mysock')
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(sock.close)
try:
sock.bind(str(P))
except IOError as e:
if "AF_UNIX path too long" in str(e):
self.skipTest("cannot bind Unix socket: " + str(e))
raise e
self.assertTrue(P.is_socket())
self.assertFalse(P.is_fifo())
self.assertFalse(P.is_file())
def test_is_block_device_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_block_device())
self.assertFalse((P / 'dirA').is_block_device())
self.assertFalse((P / 'non-existing').is_block_device())
def test_is_char_device_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_char_device())
self.assertFalse((P / 'dirA').is_char_device())
self.assertFalse((P / 'non-existing').is_char_device())
def test_is_char_device_true(self):
# Under Unix, /dev/null should generally be a char device
P = self.cls('/dev/null')
if not P.exists():
self.skipTest("/dev/null required")
self.assertTrue(P.is_char_device())
self.assertFalse(P.is_block_device())
self.assertFalse(P.is_file())
def test_pickling_common(self):
p = self.cls(BASE, 'fileA')
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
dumped = pickle.dumps(p, proto)
pp = pickle.loads(dumped)
self.assertEqual(pp.stat(), p.stat())
def test_parts_interning(self):
P = self.cls
p = P('/usr/bin/foo')
q = P('/usr/local/bin')
# 'usr'
self.assertIs(p.parts[1], q.parts[1])
# 'bin'
self.assertIs(p.parts[2], q.parts[3])
def _check_complex_symlinks(self, link0_target):
# Test solving a non-looping chain of symlinks (issue #19887)
P = self.cls(BASE)
self.dirlink(os.path.join('link0', 'link0'), join('link1'))
self.dirlink(os.path.join('link1', 'link1'), join('link2'))
self.dirlink(os.path.join('link2', 'link2'), join('link3'))
self.dirlink(link0_target, join('link0'))
# Resolve absolute paths
p = (P / 'link0').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = (P / 'link1').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = (P / 'link2').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = (P / 'link3').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
# Resolve relative paths
old_path = os.getcwd()
os.chdir(BASE)
try:
p = self.cls('link0').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = self.cls('link1').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = self.cls('link2').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
p = self.cls('link3').resolve()
self.assertEqual(p, P)
self.assertEqual(str(p), BASE)
finally:
os.chdir(old_path)
@with_symlinks
def test_complex_symlinks_absolute(self):
self._check_complex_symlinks(BASE)
@with_symlinks
def test_complex_symlinks_relative(self):
self._check_complex_symlinks('.')
@with_symlinks
def test_complex_symlinks_relative_dot_dot(self):
self._check_complex_symlinks(os.path.join('dirA', '..'))
class PathTest(_BasePathTest, unittest.TestCase):
cls = pathlib.Path
def test_concrete_class(self):
p = self.cls('a')
self.assertIs(type(p),
pathlib.WindowsPath if os.name == 'nt' else pathlib.PosixPath)
def test_unsupported_flavour(self):
if os.name == 'nt':
self.assertRaises(NotImplementedError, pathlib.PosixPath)
else:
self.assertRaises(NotImplementedError, pathlib.WindowsPath)
@only_posix
class PosixPathTest(_BasePathTest, unittest.TestCase):
cls = pathlib.PosixPath
def _check_symlink_loop(self, *args):
path = self.cls(*args)
with self.assertRaises(RuntimeError):
print(path.resolve())
def test_open_mode(self):
old_mask = os.umask(0)
self.addCleanup(os.umask, old_mask)
p = self.cls(BASE)
with (p / 'new_file').open('wb'):
pass
st = os.stat(join('new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o666)
os.umask(0o022)
with (p / 'other_new_file').open('wb'):
pass
st = os.stat(join('other_new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o644)
def test_touch_mode(self):
old_mask = os.umask(0)
self.addCleanup(os.umask, old_mask)
p = self.cls(BASE)
(p / 'new_file').touch()
st = os.stat(join('new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o666)
os.umask(0o022)
(p / 'other_new_file').touch()
st = os.stat(join('other_new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o644)
(p / 'masked_new_file').touch(mode=0o750)
st = os.stat(join('masked_new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o750)
@with_symlinks
def test_resolve_loop(self):
# Loop detection for broken symlinks under POSIX
P = self.cls
# Loops with relative symlinks
os.symlink('linkX/inside', join('linkX'))
self._check_symlink_loop(BASE, 'linkX')
os.symlink('linkY', join('linkY'))
self._check_symlink_loop(BASE, 'linkY')
os.symlink('linkZ/../linkZ', join('linkZ'))
self._check_symlink_loop(BASE, 'linkZ')
# Loops with absolute symlinks
os.symlink(join('linkU/inside'), join('linkU'))
self._check_symlink_loop(BASE, 'linkU')
os.symlink(join('linkV'), join('linkV'))
self._check_symlink_loop(BASE, 'linkV')
os.symlink(join('linkW/../linkW'), join('linkW'))
self._check_symlink_loop(BASE, 'linkW')
def test_glob(self):
P = self.cls
p = P(BASE)
given = set(p.glob("FILEa"))
expect = set() if not support.fs_is_case_insensitive(BASE) else given
self.assertEqual(given, expect)
self.assertEqual(set(p.glob("FILEa*")), set())
def test_rglob(self):
P = self.cls
p = P(BASE, "dirC")
given = set(p.rglob("FILEd"))
expect = set() if not support.fs_is_case_insensitive(BASE) else given
self.assertEqual(given, expect)
self.assertEqual(set(p.rglob("FILEd*")), set())
@only_nt
class WindowsPathTest(_BasePathTest, unittest.TestCase):
cls = pathlib.WindowsPath
def test_glob(self):
P = self.cls
p = P(BASE)
self.assertEqual(set(p.glob("FILEa")), set([P(BASE, "fileA")]))
def test_rglob(self):
P = self.cls
p = P(BASE, "dirC")
self.assertEqual(set(p.rglob("FILEd")), set([P(BASE, "dirC/dirD/fileD")]))
def test_precise_glob_preserves_case(self):
P = self.cls
p = P(BASE)
self.assertEqual(set(p.glob("dirB/FileB")), set([P(BASE, "dirB/FileB")]))
def test_precise_rglob_preserves_case(self):
P = self.cls
p = P(BASE)
self.assertEqual(set(p.rglob("dirB/FileB")), set([P(BASE, "dirB/FileB")]))
def main():
unittest.main(__name__)
if __name__ == "__main__":
main()
|
apache-2.0
|
k1-hedayati/snippets
|
get_cards_from_flashcardmachine.py
|
1
|
1090
|
from glob import glob
import sys
from urllib.request import urlopen
from pyquery import PyQuery as pq
def extract(page_url):
extracted_cards = []
page = pq(url=page_url,
opener=lambda url, **kw: urlopen(url).read())
front_list = page('div#cards>div>table>tr>td:even')
back_list = page('div#cards>div>table>tr>td:odd')
for card in zip(front_list, back_list):
front = card[0].xpath('.//div[@class="cardContentWrapper"]/text()')
back = card[1].xpath('.//div[@class="cardContentWrapper"]/text()')
front = '<br />'.join(front) if isinstance(front, list) else front
back = '<br />'.join(back) if isinstance(back, list) else back
extracted_cards.append([front, back])
return extracted_cards
def write_csv(cards, filename):
with open(filename, 'w') as file:
for front, back in cards:
file.write(front + '\\' + back + '\n')
files = glob('*')
for file in files:
url = 'file:///home/mrgee/flashcard/' + file
res = extract(url)
write_csv(res, url[url.rfind('/') + 1:url.find('.')])
|
gpl-2.0
|
malkoto1/just_cook
|
SQLAlchemy-1.0.4/test/orm/test_validators.py
|
28
|
9951
|
from test.orm import _fixtures
from sqlalchemy.testing import fixtures, assert_raises, eq_, ne_
from sqlalchemy.orm import mapper, Session, validates, relationship
from sqlalchemy.testing.mock import Mock, call
class ValidatorTest(_fixtures.FixtureTest):
def test_scalar(self):
users = self.tables.users
canary = Mock()
class User(fixtures.ComparableEntity):
@validates('name')
def validate_name(self, key, name):
canary(key, name)
ne_(name, 'fred')
return name + ' modified'
mapper(User, users)
sess = Session()
u1 = User(name='ed')
eq_(u1.name, 'ed modified')
assert_raises(AssertionError, setattr, u1, "name", "fred")
eq_(u1.name, 'ed modified')
eq_(canary.mock_calls, [call('name', 'ed'), call('name', 'fred')])
sess.add(u1)
sess.commit()
eq_(
sess.query(User).filter_by(name='ed modified').one(),
User(name='ed')
)
def test_collection(self):
users, addresses, Address = (self.tables.users,
self.tables.addresses,
self.classes.Address)
canary = Mock()
class User(fixtures.ComparableEntity):
@validates('addresses')
def validate_address(self, key, ad):
canary(key, ad)
assert '@' in ad.email_address
return ad
mapper(User, users, properties={
'addresses': relationship(Address)}
)
mapper(Address, addresses)
sess = Session()
u1 = User(name='edward')
a0 = Address(email_address='noemail')
assert_raises(AssertionError, u1.addresses.append, a0)
a1 = Address(id=15, email_address='[email protected]')
u1.addresses.append(a1)
eq_(canary.mock_calls, [call('addresses', a0), call('addresses', a1)])
sess.add(u1)
sess.commit()
eq_(
sess.query(User).filter_by(name='edward').one(),
User(name='edward', addresses=[Address(email_address='[email protected]')])
)
def test_validators_dict(self):
users, addresses, Address = (self.tables.users,
self.tables.addresses,
self.classes.Address)
class User(fixtures.ComparableEntity):
@validates('name')
def validate_name(self, key, name):
ne_(name, 'fred')
return name + ' modified'
@validates('addresses')
def validate_address(self, key, ad):
assert '@' in ad.email_address
return ad
def simple_function(self, key, value):
return key, value
u_m = mapper(User, users, properties={
'addresses': relationship(Address)
}
)
mapper(Address, addresses)
eq_(
dict((k, v[0].__name__) for k, v in list(u_m.validators.items())),
{'name': 'validate_name',
'addresses': 'validate_address'}
)
def test_validator_w_removes(self):
users, addresses, Address = (self.tables.users,
self.tables.addresses,
self.classes.Address)
canary = Mock()
class User(fixtures.ComparableEntity):
@validates('name', include_removes=True)
def validate_name(self, key, item, remove):
canary(key, item, remove)
return item
@validates('addresses', include_removes=True)
def validate_address(self, key, item, remove):
canary(key, item, remove)
return item
mapper(User, users, properties={
'addresses': relationship(Address)
})
mapper(Address, addresses)
u1 = User()
u1.name = "ed"
u1.name = "mary"
del u1.name
a1, a2, a3 = Address(), Address(), Address()
u1.addresses.append(a1)
u1.addresses.remove(a1)
u1.addresses = [a1, a2]
u1.addresses = [a2, a3]
eq_(canary.mock_calls, [
call('name', 'ed', False),
call('name', 'mary', False),
call('name', 'mary', True),
# append a1
call('addresses', a1, False),
# remove a1
call('addresses', a1, True),
# set to [a1, a2] - this is two appends
call('addresses', a1, False), call('addresses', a2, False),
# set to [a2, a3] - this is a remove of a1,
# append of a3. the appends are first.
call('addresses', a3, False),
call('addresses', a1, True),
]
)
def test_validator_wo_backrefs_wo_removes(self):
self._test_validator_backrefs(False, False)
def test_validator_wo_backrefs_w_removes(self):
self._test_validator_backrefs(False, True)
def test_validator_w_backrefs_wo_removes(self):
self._test_validator_backrefs(True, False)
def test_validator_w_backrefs_w_removes(self):
self._test_validator_backrefs(True, True)
def _test_validator_backrefs(self, include_backrefs, include_removes):
users, addresses = (self.tables.users,
self.tables.addresses)
canary = Mock()
class User(fixtures.ComparableEntity):
if include_removes:
@validates('addresses', include_removes=True,
include_backrefs=include_backrefs)
def validate_address(self, key, item, remove):
canary(key, item, remove)
return item
else:
@validates('addresses', include_removes=False,
include_backrefs=include_backrefs)
def validate_address(self, key, item):
canary(key, item)
return item
class Address(fixtures.ComparableEntity):
if include_removes:
@validates('user', include_backrefs=include_backrefs,
include_removes=True)
def validate_user(self, key, item, remove):
canary(key, item, remove)
return item
else:
@validates('user', include_backrefs=include_backrefs)
def validate_user(self, key, item):
canary(key, item)
return item
mapper(User, users, properties={
'addresses': relationship(Address, backref="user")
})
mapper(Address, addresses)
u1 = User()
u2 = User()
a1, a2 = Address(), Address()
# 3 append/set, two removes
u1.addresses.append(a1)
u1.addresses.append(a2)
a2.user = u2
del a1.user
u2.addresses.remove(a2)
# copy, so that generation of the
# comparisons don't get caught
calls = list(canary.mock_calls)
if include_backrefs:
if include_removes:
eq_(calls,
[
# append #1
call('addresses', Address(), False),
# backref for append
call('user', User(addresses=[]), False),
# append #2
call('addresses', Address(user=None), False),
# backref for append
call('user', User(addresses=[]), False),
# assign a2.user = u2
call('user', User(addresses=[]), False),
# backref for u1.addresses.remove(a2)
call('addresses', Address(user=None), True),
# backref for u2.addresses.append(a2)
call('addresses', Address(user=None), False),
# del a1.user
call('user', User(addresses=[]), True),
# backref for u1.addresses.remove(a1)
call('addresses', Address(), True),
# u2.addresses.remove(a2)
call('addresses', Address(user=None), True),
# backref for a2.user = None
call('user', None, False)
]
)
else:
eq_(calls,
[
call('addresses', Address()),
call('user', User(addresses=[])),
call('addresses', Address(user=None)),
call('user', User(addresses=[])),
call('user', User(addresses=[])),
call('addresses', Address(user=None)),
call('user', None)
]
)
else:
if include_removes:
eq_(calls,
[
call('addresses', Address(), False),
call('addresses', Address(user=None), False),
call('user', User(addresses=[]), False),
call('user', User(addresses=[]), True),
call('addresses', Address(user=None), True)
]
)
else:
eq_(calls,
[
call('addresses', Address()),
call('addresses', Address(user=None)),
call('user', User(addresses=[]))
]
)
|
gpl-2.0
|
overtherain/scriptfile
|
software/googleAppEngine/lib/django_1_2/tests/regressiontests/test_utils/tests.py
|
66
|
2307
|
r"""
# Some checks of the doctest output normalizer.
# Standard doctests do fairly
>>> from django.utils import simplejson
>>> from django.utils.xmlutils import SimplerXMLGenerator
>>> from StringIO import StringIO
>>> def produce_long():
... return 42L
>>> def produce_int():
... return 42
>>> def produce_json():
... return simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2), 'whiz': 42}])
>>> def produce_xml():
... stream = StringIO()
... xml = SimplerXMLGenerator(stream, encoding='utf-8')
... xml.startDocument()
... xml.startElement("foo", {"aaa" : "1.0", "bbb": "2.0"})
... xml.startElement("bar", {"ccc" : "3.0"})
... xml.characters("Hello")
... xml.endElement("bar")
... xml.startElement("whiz", {})
... xml.characters("Goodbye")
... xml.endElement("whiz")
... xml.endElement("foo")
... xml.endDocument()
... return stream.getvalue()
>>> def produce_xml_fragment():
... stream = StringIO()
... xml = SimplerXMLGenerator(stream, encoding='utf-8')
... xml.startElement("foo", {"aaa": "1.0", "bbb": "2.0"})
... xml.characters("Hello")
... xml.endElement("foo")
... xml.startElement("bar", {"ccc": "3.0", "ddd": "4.0"})
... xml.endElement("bar")
... return stream.getvalue()
# Long values are normalized and are comparable to normal integers ...
>>> produce_long()
42
# ... and vice versa
>>> produce_int()
42L
# JSON output is normalized for field order, so it doesn't matter
# which order json dictionary attributes are listed in output
>>> produce_json()
'["foo", {"bar": ["baz", null, 1.0, 2], "whiz": 42}]'
>>> produce_json()
'["foo", {"whiz": 42, "bar": ["baz", null, 1.0, 2]}]'
# XML output is normalized for attribute order, so it doesn't matter
# which order XML element attributes are listed in output
>>> produce_xml()
'<?xml version="1.0" encoding="UTF-8"?>\n<foo aaa="1.0" bbb="2.0"><bar ccc="3.0">Hello</bar><whiz>Goodbye</whiz></foo>'
>>> produce_xml()
'<?xml version="1.0" encoding="UTF-8"?>\n<foo bbb="2.0" aaa="1.0"><bar ccc="3.0">Hello</bar><whiz>Goodbye</whiz></foo>'
>>> produce_xml_fragment()
'<foo aaa="1.0" bbb="2.0">Hello</foo><bar ccc="3.0" ddd="4.0"></bar>'
>>> produce_xml_fragment()
'<foo bbb="2.0" aaa="1.0">Hello</foo><bar ddd="4.0" ccc="3.0"></bar>'
"""
|
mit
|
18padx08/PPTex
|
PPTexEnv_x86_64/lib/python2.7/site-packages/scipy/signal/spectral.py
|
9
|
13829
|
"""Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import lombscargle
import warnings
from scipy.lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle']
def periodogram(x, fs=1.0, window=None, nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series in units of Hz. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is an array it will be used
directly as the window. Defaults to None; equivalent to 'boxcar'.
nfft : int, optional
Length of the FFT used. If None the length of `x` will be used.
detrend : str or function or False, optional
Specifies how to detrend `x` prior to computing the spectrum. If
`detrend` is a string, it is passed as the ``type`` argument to
`detrend`. If it is a function, it should return a detrended array.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz if `x` is measured in V and computing
the power spectrum ('spectrum') where `Pxx` has units of V**2 if `x` is
measured in V. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral density
by dividing the data into overlapping segments, computing a modified
periodogram for each segment and averaging the periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series in units of Hz. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg / 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where Pxx has units of V**2/Hz if x is measured in V and computing
the power spectrum ('spectrum') where Pxx has units of V**2 if x is
measured in V. Defaults to 'density'.
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if x.shape[-1] < nperseg:
warnings.warn('nperseg = %d, is greater than x.shape[%d] = %d, using '
'nperseg = x.shape[%d]'
% (nperseg, axis, x.shape[axis], axis))
nperseg = x.shape[-1]
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] > x.shape[-1]:
raise ValueError('window is longer than x.')
nperseg = win.shape[0]
# numpy 1.5.1 doesn't have result_type.
outdtype = (np.array([x[0]]) * np.array([1], 'f')).dtype.char.lower()
if win.dtype != outdtype:
win = win.astype(outdtype)
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if noverlap is None:
noverlap = nperseg // 2
elif noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
if not detrend:
detrend_func = lambda seg: seg
elif not hasattr(detrend, '__call__'):
detrend_func = lambda seg: signaltools.detrend(seg, type=detrend)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(seg):
seg = np.rollaxis(seg, -1, axis)
seg = detrend(seg)
return np.rollaxis(seg, axis, len(seg.shape))
else:
detrend_func = detrend
step = nperseg - noverlap
indices = np.arange(0, x.shape[-1]-nperseg+1, step)
if np.isrealobj(x) and return_onesided:
outshape = list(x.shape)
if nfft % 2 == 0: # even
outshape[-1] = nfft // 2 + 1
Pxx = np.empty(outshape, outdtype)
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.rfft(x_dt*win, nfft)
# fftpack.rfft returns the positive frequency part of the fft
# as real values, packed r r i r i r i ...
# this indexing is to extract the matching real and imaginary
# parts, while also handling the pure real zero and nyquist
# frequencies.
if k == 0:
Pxx[..., (0,-1)] = xft[..., (0,-1)]**2
Pxx[..., 1:-1] = xft[..., 1:-1:2]**2 + xft[..., 2::2]**2
else:
Pxx *= k/(k+1.0)
Pxx[..., (0,-1)] += xft[..., (0,-1)]**2 / (k+1.0)
Pxx[..., 1:-1] += (xft[..., 1:-1:2]**2 + xft[..., 2::2]**2) \
/ (k+1.0)
else: # odd
outshape[-1] = (nfft+1) // 2
Pxx = np.empty(outshape, outdtype)
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.rfft(x_dt*win, nfft)
if k == 0:
Pxx[..., 0] = xft[..., 0]**2
Pxx[..., 1:] = xft[..., 1::2]**2 + xft[..., 2::2]**2
else:
Pxx *= k/(k+1.0)
Pxx[..., 0] += xft[..., 0]**2 / (k+1)
Pxx[..., 1:] += (xft[..., 1::2]**2 + xft[..., 2::2]**2) \
/ (k+1.0)
Pxx[..., 1:-1] *= 2*scale
Pxx[..., (0,-1)] *= scale
f = np.arange(Pxx.shape[-1]) * (fs/nfft)
else:
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.fft(x_dt*win, nfft)
if k == 0:
Pxx = (xft * xft.conj()).real
else:
Pxx *= k/(k+1.0)
Pxx += (xft * xft.conj()).real / (k+1.0)
Pxx *= scale
f = fftpack.fftfreq(nfft, 1.0/fs)
if axis != -1:
Pxx = np.rollaxis(Pxx, -1, axis)
return f, Pxx
|
mit
|
cuilishen/cuilishenMissionPlanner
|
Lib/lib2to3/fixes/fix_itertools.py
|
148
|
1549
|
""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and
itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363)
imports from itertools are fixed in fix_itertools_import.py
If itertools is imported as something else (ie: import itertools as it;
it.izip(spam, eggs)) method calls will not get fixed.
"""
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixItertools(fixer_base.BaseFix):
BM_compatible = True
it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')"
PATTERN = """
power< it='itertools'
trailer<
dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
|
power< func=%(it_funcs)s trailer< '(' [any] ')' > >
""" %(locals())
# Needs to be run after fix_(map|zip|filter)
run_order = 6
def transform(self, node, results):
prefix = None
func = results['func'][0]
if ('it' in results and
func.value not in (u'ifilterfalse', u'izip_longest')):
dot, it = (results['dot'], results['it'])
# Remove the 'itertools'
prefix = it.prefix
it.remove()
# Replace the node wich contains ('.', 'function') with the
# function (to be consistant with the second part of the pattern)
dot.remove()
func.parent.replace(func)
prefix = prefix or func.prefix
func.replace(Name(func.value[1:], prefix=prefix))
|
gpl-3.0
|
dragondjf/musicplayer
|
gui/mainwindow/mainwindow.py
|
1
|
3267
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from gui.menus import SettingsMenu
from gui.functionpages import MainTitleBar, MusicLeftBar, MusicBottomBar, MusicStackPage
from gui.dwidgets import DMainWindow, DMainFrame
from gui.utils import collectView, setSkinForApp
from config import constants
import config
class MainWindow(DMainFrame):
viewID = "MainWindow"
@collectView
def __init__(self):
super(MainWindow, self).__init__()
self.setObjectName(self.viewID)
self.initUI()
self.setskin()
def initUI(self):
self.initSize()
self.setWindowIcon(config.windowIcon)
self.setWindowTitle(config.windowTitle)
self.initMenus()
self.initCentralWidget()
self.initSizeGrip()
self.setSystemTrayMenu(self.settingsMenu)
def initSize(self):
self.resize(constants.MainWindow_Width, constants.MainWindow_Height)
self.moveCenter()
def initMenus(self):
self.settingsMenu = SettingsMenu(self)
def initCentralWidget(self):
self.initTitleBar()
self.initLeftBar()
self.initMusicStackPage()
self.initBottomBar()
centralWidget = QFrame(self)
pageLayout = QVBoxLayout()
pageLayout.addWidget(self.mainTitleBar)
pageLayout.addWidget(self.musicStackPage)
pageLayout.setContentsMargins(0, 0, 1, 0)
pageLayout.setSpacing(0)
controlLayout = QHBoxLayout()
controlLayout.addWidget(self.musicLeftBar)
controlLayout.addLayout(pageLayout)
controlLayout.setContentsMargins(0, 0, 0, 0)
controlLayout.setSpacing(0)
mainLayout = QVBoxLayout()
mainLayout.addLayout(controlLayout)
mainLayout.addWidget(self.musicBottomBar)
mainLayout.setContentsMargins(0, 0, 0, 0)
mainLayout.setSpacing(0)
centralWidget.setLayout(mainLayout)
self.setCentralWidget(centralWidget)
def initTitleBar(self):
self.mainTitleBar = MainTitleBar(self)
self.mainTitleBar.settingDownButton.setMenu(self.settingsMenu)
def initLeftBar(self):
self.musicLeftBar = MusicLeftBar(self)
def initMusicStackPage(self):
self.musicStackPage = MusicStackPage(self)
def initBottomBar(self):
self.musicBottomBar = MusicBottomBar(self)
def initSizeGrip(self):
self.sizeGrip = QSizeGrip(self)
self.sizeGrip.show()
def setskin(self, skinID="default"):
setSkinForApp('gui/skin/qss/%s.qss' % skinID) # 设置主窗口样式
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
self.guimanger.actionExit()
elif event.key() == Qt.Key_F11:
pass
elif event.key() == Qt.Key_F9:
pass
elif event.key() == Qt.Key_F8:
pass
elif event.key() == Qt.Key_F12:
self.guimanger.actionObjectView()
else:
super(MainWindow, self).keyPressEvent(event)
def resizeEvent(self, event):
super(MainWindow, self).resizeEvent(event)
self.sizeGrip.move(
self.size().width() - 100, self.size().height() - 30)
|
gpl-2.0
|
jinankjain/linux
|
scripts/rt-tester/rt-tester.py
|
11005
|
5307
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
gpl-2.0
|
tadebayo/myedge
|
myvenv/Lib/site-packages/django/db/backends/postgresql/operations.py
|
15
|
11062
|
from __future__ import unicode_literals
from psycopg2.extras import Inet
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
class DatabaseOperations(BaseDatabaseOperations):
def unification_cast_sql(self, output_field):
internal_type = output_field.get_internal_type()
if internal_type in ("GenericIPAddressField", "IPAddressField", "TimeField", "UUIDField"):
# PostgreSQL will resolve a union as type 'text' if input types are
# 'unknown'.
# https://www.postgresql.org/docs/current/static/typeconv-union-case.html
# These fields cannot be implicitly cast back in the default
# PostgreSQL configuration so we need to explicitly cast them.
# We must also remove components of the type within brackets:
# varchar(255) -> varchar.
return 'CAST(%%s AS %s)' % output_field.db_type(self.connection).split('(')[0]
return '%s'
def date_extract_sql(self, lookup_type, field_name):
# https://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
return "EXTRACT('dow' FROM %s) + 1" % field_name
else:
return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
# https://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def _convert_field_to_tz(self, field_name, tzname):
if settings.USE_TZ:
field_name = "%s AT TIME ZONE %%s" % field_name
params = [tzname]
else:
params = []
return field_name, params
def datetime_cast_date_sql(self, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = '(%s)::date' % field_name
return sql, params
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = self.date_extract_sql(lookup_type, field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
# https://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
sql = "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
return sql, params
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def fetch_returned_insert_ids(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, return the
list of newly created IDs.
"""
return [item[0] for item in cursor.fetchall()]
def lookup_cast(self, lookup_type, internal_type=None):
lookup = '%s'
# Cast text lookups to text to allow things like filter(x__contains=4)
if lookup_type in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
if internal_type in ('IPAddressField', 'GenericIPAddressField'):
lookup = "HOST(%s)"
else:
lookup = "%s::text"
# Use UPPER(x) for case-insensitive lookups; it's faster.
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
lookup = 'UPPER(%s)' % lookup
return lookup
def last_insert_id(self, cursor, table_name, pk_name):
# Use pg_get_serial_sequence to get the underlying sequence name
# from the table name and column name (available since PostgreSQL 8)
cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % (
self.quote_name(table_name), pk_name))
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def prepare_sql_script(self, sql):
return [sql]
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def set_time_zone_sql(self):
return "SET TIME ZONE %s"
def sql_flush(self, style, tables, sequences, allow_cascade=False):
if tables:
# Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows
# us to truncate tables referenced by a foreign key in any other
# table.
tables_sql = ', '.join(
style.SQL_FIELD(self.quote_name(table)) for table in tables)
if allow_cascade:
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
style.SQL_KEYWORD('CASCADE'),
)]
else:
sql = ['%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
)]
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
# to reset sequence indices
sql = []
for sequence_info in sequences:
table_name = sequence_info['table']
column_name = sequence_info['column']
if not (column_name and len(column_name) > 0):
# This will be the case if it's an m2m using an autogenerated
# intermediate table (see BaseDatabaseIntrospection.sequence_list)
column_name = 'id'
sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(self.quote_name(table_name)),
style.SQL_FIELD(column_name),
))
return sql
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
qn = self.quote_name
for model in model_list:
# Use `coalesce` to set the sequence for each model to the max pk value if there are records,
# or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
# if there are records (as the max pk value is already in use), otherwise set it to false.
# Use pg_get_serial_sequence to get the underlying sequence name from the table name
# and column name (available since PostgreSQL 8)
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append(
"%s setval(pg_get_serial_sequence('%s','%s'), "
"coalesce(max(%s), 1), max(%s) %s null) %s %s;" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(model._meta.db_table)),
style.SQL_FIELD(f.column),
style.SQL_FIELD(qn(f.column)),
style.SQL_FIELD(qn(f.column)),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(model._meta.db_table)),
)
)
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
if not f.remote_field.through:
output.append(
"%s setval(pg_get_serial_sequence('%s','%s'), "
"coalesce(max(%s), 1), max(%s) %s null) %s %s;" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(f.m2m_db_table())),
style.SQL_FIELD('id'),
style.SQL_FIELD(qn('id')),
style.SQL_FIELD(qn('id')),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(f.m2m_db_table()))
)
)
return output
def prep_for_iexact_query(self, x):
return x
def max_name_length(self):
"""
Returns the maximum length of an identifier.
Note that the maximum length of an identifier is 63 by default, but can
be changed by recompiling PostgreSQL after editing the NAMEDATALEN
macro in src/include/pg_config_manual.h .
This implementation simply returns 63, but can easily be overridden by a
custom database backend that inherits most of its behavior from this one.
"""
return 63
def distinct_sql(self, fields):
if fields:
return 'DISTINCT ON (%s)' % ', '.join(fields)
else:
return 'DISTINCT'
def last_executed_query(self, cursor, sql, params):
# http://initd.org/psycopg/docs/cursor.html#cursor.query
# The query attribute is a Psycopg extension to the DB API 2.0.
if cursor.query is not None:
return cursor.query.decode('utf-8')
return None
def return_insert_id(self):
return "RETURNING %s", ()
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
return "VALUES " + values_sql
def adapt_datefield_value(self, value):
return value
def adapt_datetimefield_value(self, value):
return value
def adapt_timefield_value(self, value):
return value
def adapt_ipaddressfield_value(self, value):
if value:
return Inet(value)
return None
def subtract_temporals(self, internal_type, lhs, rhs):
if internal_type == 'DateField':
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
return "age(%s, %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
return super(DatabaseOperations, self).subtract_temporals(internal_type, lhs, rhs)
def fulltext_search_sql(self, field_name):
raise NotImplementedError(
"Add 'django.contrib.postgres' to settings.INSTALLED_APPS to use "
"the search operator."
)
|
mit
|
hfinucane/ansible
|
lib/ansible/plugins/lookup/nested.py
|
157
|
2100
|
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
class LookupModule(LookupBase):
def _lookup_variables(self, terms, variables):
results = []
for x in terms:
try:
intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader, fail_on_undefined=True)
except UndefinedError as e:
raise AnsibleUndefinedVariable("One of the nested variables was undefined. The error was: %s" % e)
results.append(intermediate)
return results
def run(self, terms, variables=None, **kwargs):
terms = self._lookup_variables(terms, variables)
my_list = terms[:]
my_list.reverse()
result = []
if len(my_list) == 0:
raise AnsibleError("with_nested requires at least one element in the nested list")
result = my_list.pop()
while len(my_list) > 0:
result2 = self._combine(result, my_list.pop())
result = result2
new_result = []
for x in result:
new_result.append(self._flatten(x))
return new_result
|
gpl-3.0
|
cowlicks/numpy
|
numpy/random/setup.py
|
58
|
2425
|
from __future__ import division, print_function
from os.path import join, split, dirname
import os
import sys
from distutils.dep_util import newer
from distutils.msvccompiler import get_build_version as get_msvc_build_version
def needs_mingw_ftime_workaround():
# We need the mingw workaround for _ftime if the msvc runtime version is
# 7.1 or above and we build with mingw ...
# ... but we can't easily detect compiler version outside distutils command
# context, so we will need to detect in randomkit whether we build with gcc
msver = get_msvc_build_version()
if msver and msver >= 8:
return True
return False
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, get_mathlibs
config = Configuration('random', parent_package, top_path)
def generate_libraries(ext, build_dir):
config_cmd = config.get_config_cmd()
libs = get_mathlibs()
tc = testcode_wincrypt()
if config_cmd.try_run(tc):
libs.append('Advapi32')
ext.libraries.extend(libs)
return None
# enable unix large file support on 32 bit systems
# (64 bit off_t, lseek -> lseek64 etc.)
defs = [('_FILE_OFFSET_BITS', '64'),
('_LARGEFILE_SOURCE', '1'),
('_LARGEFILE64_SOURCE', '1')]
if needs_mingw_ftime_workaround():
defs.append(("NPY_NEEDS_MINGW_TIME_WORKAROUND", None))
libs = []
# Configure mtrand
config.add_extension('mtrand',
sources=[join('mtrand', x) for x in
['mtrand.c', 'randomkit.c', 'initarray.c',
'distributions.c']]+[generate_libraries],
libraries=libs,
depends=[join('mtrand', '*.h'),
join('mtrand', '*.pyx'),
join('mtrand', '*.pxi'),],
define_macros=defs,
)
config.add_data_files(('.', join('mtrand', 'randomkit.h')))
config.add_data_dir('tests')
return config
def testcode_wincrypt():
return """\
/* check to see if _WIN32 is defined */
int main(int argc, char *argv[])
{
#ifdef _WIN32
return 0;
#else
return 1;
#endif
}
"""
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
|
bsd-3-clause
|
hainm/statsmodels
|
statsmodels/tsa/vector_ar/var_model.py
|
25
|
50516
|
"""
Vector Autoregression (VAR) processes
References
----------
Lutkepohl (2005) New Introduction to Multiple Time Series Analysis
"""
from __future__ import division, print_function
from statsmodels.compat.python import (range, lrange, string_types, StringIO, iteritems,
cStringIO)
from collections import defaultdict
import numpy as np
import numpy.linalg as npl
from numpy.linalg import cholesky as chol, solve
import scipy.stats as stats
import scipy.linalg as L
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.tools import chain_dot
from statsmodels.tools.linalg import logdet_symm
from statsmodels.tsa.tsatools import vec, unvec
from statsmodels.tsa.vector_ar.irf import IRAnalysis
from statsmodels.tsa.vector_ar.output import VARSummary
import statsmodels.tsa.tsatools as tsa
import statsmodels.tsa.vector_ar.output as output
import statsmodels.tsa.vector_ar.plotting as plotting
import statsmodels.tsa.vector_ar.util as util
import statsmodels.tsa.base.tsa_model as tsbase
import statsmodels.base.wrapper as wrap
mat = np.array
#-------------------------------------------------------------------------------
# VAR process routines
def ma_rep(coefs, maxn=10):
r"""
MA(\infty) representation of VAR(p) process
Parameters
----------
coefs : ndarray (p x k x k)
maxn : int
Number of MA matrices to compute
Notes
-----
VAR(p) process as
.. math:: y_t = A_1 y_{t-1} + \ldots + A_p y_{t-p} + u_t
can be equivalently represented as
.. math:: y_t = \mu + \sum_{i=0}^\infty \Phi_i u_{t-i}
e.g. can recursively compute the \Phi_i matrices with \Phi_0 = I_k
Returns
-------
phis : ndarray (maxn + 1 x k x k)
"""
p, k, k = coefs.shape
phis = np.zeros((maxn+1, k, k))
phis[0] = np.eye(k)
# recursively compute Phi matrices
for i in range(1, maxn + 1):
for j in range(1, i+1):
if j > p:
break
phis[i] += np.dot(phis[i-j], coefs[j-1])
return phis
def is_stable(coefs, verbose=False):
"""
Determine stability of VAR(p) system by examining the eigenvalues of the
VAR(1) representation
Parameters
----------
coefs : ndarray (p x k x k)
Returns
-------
is_stable : bool
"""
A_var1 = util.comp_matrix(coefs)
eigs = np.linalg.eigvals(A_var1)
if verbose:
print('Eigenvalues of VAR(1) rep')
for val in np.abs(eigs):
print(val)
return (np.abs(eigs) <= 1).all()
def var_acf(coefs, sig_u, nlags=None):
"""
Compute autocovariance function ACF_y(h) up to nlags of stable VAR(p)
process
Parameters
----------
coefs : ndarray (p x k x k)
Coefficient matrices A_i
sig_u : ndarray (k x k)
Covariance of white noise process u_t
nlags : int, optional
Defaults to order p of system
Notes
-----
Ref: Lutkepohl p.28-29
Returns
-------
acf : ndarray, (p, k, k)
"""
p, k, _ = coefs.shape
if nlags is None:
nlags = p
# p x k x k, ACF for lags 0, ..., p-1
result = np.zeros((nlags + 1, k, k))
result[:p] = _var_acf(coefs, sig_u)
# yule-walker equations
for h in range(p, nlags + 1):
# compute ACF for lag=h
# G(h) = A_1 G(h-1) + ... + A_p G(h-p)
for j in range(p):
result[h] += np.dot(coefs[j], result[h-j-1])
return result
def _var_acf(coefs, sig_u):
"""
Compute autocovariance function ACF_y(h) for h=1,...,p
Notes
-----
Lutkepohl (2005) p.29
"""
p, k, k2 = coefs.shape
assert(k == k2)
A = util.comp_matrix(coefs)
# construct VAR(1) noise covariance
SigU = np.zeros((k*p, k*p))
SigU[:k,:k] = sig_u
# vec(ACF) = (I_(kp)^2 - kron(A, A))^-1 vec(Sigma_U)
vecACF = L.solve(np.eye((k*p)**2) - np.kron(A, A), vec(SigU))
acf = unvec(vecACF)
acf = acf[:k].T.reshape((p, k, k))
return acf
def forecast(y, coefs, intercept, steps):
"""
Produce linear MSE forecast
Parameters
----------
y :
coefs :
intercept :
steps :
Returns
-------
forecasts : ndarray (steps x neqs)
Notes
-----
Lutkepohl p. 37
Also used by DynamicVAR class
"""
p = len(coefs)
k = len(coefs[0])
# initial value
forcs = np.zeros((steps, k)) + intercept
# h=0 forecast should be latest observation
# forcs[0] = y[-1]
# make indices easier to think about
for h in range(1, steps + 1):
# y_t(h) = intercept + sum_1^p A_i y_t_(h-i)
f = forcs[h - 1]
for i in range(1, p + 1):
# slightly hackish
if h - i <= 0:
# e.g. when h=1, h-1 = 0, which is y[-1]
prior_y = y[h - i - 1]
else:
# e.g. when h=2, h-1=1, which is forcs[0]
prior_y = forcs[h - i - 1]
# i=1 is coefs[0]
f = f + np.dot(coefs[i - 1], prior_y)
forcs[h - 1] = f
return forcs
def forecast_cov(ma_coefs, sig_u, steps):
"""
Compute theoretical forecast error variance matrices
Parameters
----------
Returns
-------
forc_covs : ndarray (steps x neqs x neqs)
"""
k = len(sig_u)
forc_covs = np.zeros((steps, k, k))
prior = np.zeros((k, k))
for h in range(steps):
# Sigma(h) = Sigma(h-1) + Phi Sig_u Phi'
phi = ma_coefs[h]
var = chain_dot(phi, sig_u, phi.T)
forc_covs[h] = prior = prior + var
return forc_covs
def var_loglike(resid, omega, nobs):
r"""
Returns the value of the VAR(p) log-likelihood.
Parameters
----------
resid : ndarray (T x K)
omega : ndarray
Sigma hat matrix. Each element i,j is the average product of the
OLS residual for variable i and the OLS residual for variable j or
np.dot(resid.T,resid)/nobs. There should be no correction for the
degrees of freedom.
nobs : int
Returns
-------
llf : float
The value of the loglikelihood function for a VAR(p) model
Notes
-----
The loglikelihood function for the VAR(p) is
.. math::
-\left(\frac{T}{2}\right)
\left(\ln\left|\Omega\right|-K\ln\left(2\pi\right)-K\right)
"""
logdet = logdet_symm(np.asarray(omega))
neqs = len(omega)
part1 = - (nobs * neqs / 2) * np.log(2 * np.pi)
part2 = - (nobs / 2) * (logdet + neqs)
return part1 + part2
def _reordered(self, order):
#Create new arrays to hold rearranged results from .fit()
endog = self.endog
endog_lagged = self.endog_lagged
params = self.params
sigma_u = self.sigma_u
names = self.names
k_ar = self.k_ar
endog_new = np.zeros([np.size(endog,0),np.size(endog,1)])
endog_lagged_new = np.zeros([np.size(endog_lagged,0), np.size(endog_lagged,1)])
params_new_inc, params_new = [np.zeros([np.size(params,0), np.size(params,1)])
for i in range(2)]
sigma_u_new_inc, sigma_u_new = [np.zeros([np.size(sigma_u,0), np.size(sigma_u,1)])
for i in range(2)]
num_end = len(self.params[0])
names_new = []
#Rearrange elements and fill in new arrays
k = self.k_trend
for i, c in enumerate(order):
endog_new[:,i] = self.endog[:,c]
if k > 0:
params_new_inc[0,i] = params[0,i]
endog_lagged_new[:,0] = endog_lagged[:,0]
for j in range(k_ar):
params_new_inc[i+j*num_end+k,:] = self.params[c+j*num_end+k,:]
endog_lagged_new[:,i+j*num_end+k] = endog_lagged[:,c+j*num_end+k]
sigma_u_new_inc[i,:] = sigma_u[c,:]
names_new.append(names[c])
for i, c in enumerate(order):
params_new[:,i] = params_new_inc[:,c]
sigma_u_new[:,i] = sigma_u_new_inc[:,c]
return VARResults(endog=endog_new, endog_lagged=endog_lagged_new,
params=params_new, sigma_u=sigma_u_new,
lag_order=self.k_ar, model=self.model,
trend='c', names=names_new, dates=self.dates)
#-------------------------------------------------------------------------------
# VARProcess class: for known or unknown VAR process
class VAR(tsbase.TimeSeriesModel):
r"""
Fit VAR(p) process and do lag order selection
.. math:: y_t = A_1 y_{t-1} + \ldots + A_p y_{t-p} + u_t
Parameters
----------
endog : array-like
2-d endogenous response variable. The independent variable.
dates : array-like
must match number of rows of endog
References
----------
Lutkepohl (2005) New Introduction to Multiple Time Series Analysis
"""
def __init__(self, endog, dates=None, freq=None, missing='none'):
super(VAR, self).__init__(endog, None, dates, freq, missing=missing)
if self.endog.ndim == 1:
raise ValueError("Only gave one variable to VAR")
self.y = self.endog #keep alias for now
self.neqs = self.endog.shape[1]
def _get_predict_start(self, start, k_ar):
if start is None:
start = k_ar
return super(VAR, self)._get_predict_start(start)
def predict(self, params, start=None, end=None, lags=1, trend='c'):
"""
Returns in-sample predictions or forecasts
"""
start = self._get_predict_start(start, lags)
end, out_of_sample = self._get_predict_end(end)
if end < start:
raise ValueError("end is before start")
if end == start + out_of_sample:
return np.array([])
k_trend = util.get_trendorder(trend)
k = self.neqs
k_ar = lags
predictedvalues = np.zeros((end + 1 - start + out_of_sample, k))
if k_trend != 0:
intercept = params[:k_trend]
predictedvalues += intercept
y = self.y
X = util.get_var_endog(y, lags, trend=trend, has_constant='raise')
fittedvalues = np.dot(X, params)
fv_start = start - k_ar
pv_end = min(len(predictedvalues), len(fittedvalues) - fv_start)
fv_end = min(len(fittedvalues), end-k_ar+1)
predictedvalues[:pv_end] = fittedvalues[fv_start:fv_end]
if not out_of_sample:
return predictedvalues
# fit out of sample
y = y[-k_ar:]
coefs = params[k_trend:].reshape((k_ar, k, k)).swapaxes(1,2)
predictedvalues[pv_end:] = forecast(y, coefs, intercept, out_of_sample)
return predictedvalues
def fit(self, maxlags=None, method='ols', ic=None, trend='c',
verbose=False):
"""
Fit the VAR model
Parameters
----------
maxlags : int
Maximum number of lags to check for order selection, defaults to
12 * (nobs/100.)**(1./4), see select_order function
method : {'ols'}
Estimation method to use
ic : {'aic', 'fpe', 'hqic', 'bic', None}
Information criterion to use for VAR order selection.
aic : Akaike
fpe : Final prediction error
hqic : Hannan-Quinn
bic : Bayesian a.k.a. Schwarz
verbose : bool, default False
Print order selection output to the screen
trend, str {"c", "ct", "ctt", "nc"}
"c" - add constant
"ct" - constant and trend
"ctt" - constant, linear and quadratic trend
"nc" - co constant, no trend
Note that these are prepended to the columns of the dataset.
Notes
-----
Lutkepohl pp. 146-153
Returns
-------
est : VARResults
"""
lags = maxlags
if trend not in ['c', 'ct', 'ctt', 'nc']:
raise ValueError("trend '{}' not supported for VAR".format(trend))
if ic is not None:
selections = self.select_order(maxlags=maxlags, verbose=verbose)
if ic not in selections:
raise Exception("%s not recognized, must be among %s"
% (ic, sorted(selections)))
lags = selections[ic]
if verbose:
print('Using %d based on %s criterion' % (lags, ic))
else:
if lags is None:
lags = 1
k_trend = util.get_trendorder(trend)
self.exog_names = util.make_lag_names(self.endog_names, lags, k_trend)
self.nobs = len(self.endog) - lags
return self._estimate_var(lags, trend=trend)
def _estimate_var(self, lags, offset=0, trend='c'):
"""
lags : int
offset : int
Periods to drop from beginning-- for order selection so it's an
apples-to-apples comparison
trend : string or None
As per above
"""
# have to do this again because select_order doesn't call fit
self.k_trend = k_trend = util.get_trendorder(trend)
if offset < 0: # pragma: no cover
raise ValueError('offset must be >= 0')
y = self.y[offset:]
z = util.get_var_endog(y, lags, trend=trend, has_constant='raise')
y_sample = y[lags:]
# Lutkepohl p75, about 5x faster than stated formula
params = np.linalg.lstsq(z, y_sample)[0]
resid = y_sample - np.dot(z, params)
# Unbiased estimate of covariance matrix $\Sigma_u$ of the white noise
# process $u$
# equivalent definition
# .. math:: \frac{1}{T - Kp - 1} Y^\prime (I_T - Z (Z^\prime Z)^{-1}
# Z^\prime) Y
# Ref: Lutkepohl p.75
# df_resid right now is T - Kp - 1, which is a suggested correction
avobs = len(y_sample)
df_resid = avobs - (self.neqs * lags + k_trend)
sse = np.dot(resid.T, resid)
omega = sse / df_resid
varfit = VARResults(y, z, params, omega, lags, names=self.endog_names,
trend=trend, dates=self.data.dates, model=self)
return VARResultsWrapper(varfit)
def select_order(self, maxlags=None, verbose=True):
"""
Compute lag order selections based on each of the available information
criteria
Parameters
----------
maxlags : int
if None, defaults to 12 * (nobs/100.)**(1./4)
verbose : bool, default True
If True, print table of info criteria and selected orders
Returns
-------
selections : dict {info_crit -> selected_order}
"""
if maxlags is None:
maxlags = int(round(12*(len(self.endog)/100.)**(1/4.)))
ics = defaultdict(list)
for p in range(maxlags + 1):
# exclude some periods to same amount of data used for each lag
# order
result = self._estimate_var(p, offset=maxlags-p)
for k, v in iteritems(result.info_criteria):
ics[k].append(v)
selected_orders = dict((k, mat(v).argmin())
for k, v in iteritems(ics))
if verbose:
output.print_ic_table(ics, selected_orders)
return selected_orders
class VARProcess(object):
"""
Class represents a known VAR(p) process
Parameters
----------
coefs : ndarray (p x k x k)
intercept : ndarray (length k)
sigma_u : ndarray (k x k)
names : sequence (length k)
Returns
-------
**Attributes**:
"""
def __init__(self, coefs, intercept, sigma_u, names=None):
self.k_ar = len(coefs)
self.neqs = coefs.shape[1]
self.coefs = coefs
self.intercept = intercept
self.sigma_u = sigma_u
self.names = names
def get_eq_index(self, name):
"Return integer position of requested equation name"
return util.get_index(self.names, name)
def __str__(self):
output = ('VAR(%d) process for %d-dimensional response y_t'
% (self.k_ar, self.neqs))
output += '\nstable: %s' % self.is_stable()
output += '\nmean: %s' % self.mean()
return output
def is_stable(self, verbose=False):
"""Determine stability based on model coefficients
Parameters
----------
verbose : bool
Print eigenvalues of the VAR(1) companion
Notes
-----
Checks if det(I - Az) = 0 for any mod(z) <= 1, so all the eigenvalues of
the companion matrix must lie outside the unit circle
"""
return is_stable(self.coefs, verbose=verbose)
def plotsim(self, steps=1000):
"""
Plot a simulation from the VAR(p) process for the desired number of
steps
"""
Y = util.varsim(self.coefs, self.intercept, self.sigma_u, steps=steps)
plotting.plot_mts(Y)
def mean(self):
r"""Mean of stable process
Lutkepohl eq. 2.1.23
.. math:: \mu = (I - A_1 - \dots - A_p)^{-1} \alpha
"""
return solve(self._char_mat, self.intercept)
def ma_rep(self, maxn=10):
r"""Compute MA(:math:`\infty`) coefficient matrices
Parameters
----------
maxn : int
Number of coefficient matrices to compute
Returns
-------
coefs : ndarray (maxn x k x k)
"""
return ma_rep(self.coefs, maxn=maxn)
def orth_ma_rep(self, maxn=10, P=None):
r"""Compute Orthogonalized MA coefficient matrices using P matrix such
that :math:`\Sigma_u = PP^\prime`. P defaults to the Cholesky
decomposition of :math:`\Sigma_u`
Parameters
----------
maxn : int
Number of coefficient matrices to compute
P : ndarray (k x k), optional
Matrix such that Sigma_u = PP', defaults to Cholesky descomp
Returns
-------
coefs : ndarray (maxn x k x k)
"""
if P is None:
P = self._chol_sigma_u
ma_mats = self.ma_rep(maxn=maxn)
return mat([np.dot(coefs, P) for coefs in ma_mats])
def long_run_effects(self):
"""Compute long-run effect of unit impulse
.. math::
\Psi_\infty = \sum_{i=0}^\infty \Phi_i
"""
return L.inv(self._char_mat)
@cache_readonly
def _chol_sigma_u(self):
return chol(self.sigma_u)
@cache_readonly
def _char_mat(self):
return np.eye(self.neqs) - self.coefs.sum(0)
def acf(self, nlags=None):
"""Compute theoretical autocovariance function
Returns
-------
acf : ndarray (p x k x k)
"""
return var_acf(self.coefs, self.sigma_u, nlags=nlags)
def acorr(self, nlags=None):
"""Compute theoretical autocorrelation function
Returns
-------
acorr : ndarray (p x k x k)
"""
return util.acf_to_acorr(self.acf(nlags=nlags))
def plot_acorr(self, nlags=10, linewidth=8):
"Plot theoretical autocorrelation function"
plotting.plot_full_acorr(self.acorr(nlags=nlags), linewidth=linewidth)
def forecast(self, y, steps):
"""Produce linear minimum MSE forecasts for desired number of steps
ahead, using prior values y
Parameters
----------
y : ndarray (p x k)
steps : int
Returns
-------
forecasts : ndarray (steps x neqs)
Notes
-----
Lutkepohl pp 37-38
"""
return forecast(y, self.coefs, self.intercept, steps)
def mse(self, steps):
"""
Compute theoretical forecast error variance matrices
Parameters
----------
steps : int
Number of steps ahead
Notes
-----
.. math:: \mathrm{MSE}(h) = \sum_{i=0}^{h-1} \Phi \Sigma_u \Phi^T
Returns
-------
forc_covs : ndarray (steps x neqs x neqs)
"""
ma_coefs = self.ma_rep(steps)
k = len(self.sigma_u)
forc_covs = np.zeros((steps, k, k))
prior = np.zeros((k, k))
for h in range(steps):
# Sigma(h) = Sigma(h-1) + Phi Sig_u Phi'
phi = ma_coefs[h]
var = chain_dot(phi, self.sigma_u, phi.T)
forc_covs[h] = prior = prior + var
return forc_covs
forecast_cov = mse
def _forecast_vars(self, steps):
covs = self.forecast_cov(steps)
# Take diagonal for each cov
inds = np.arange(self.neqs)
return covs[:, inds, inds]
def forecast_interval(self, y, steps, alpha=0.05):
"""Construct forecast interval estimates assuming the y are Gaussian
Parameters
----------
Notes
-----
Lutkepohl pp. 39-40
Returns
-------
(lower, mid, upper) : (ndarray, ndarray, ndarray)
"""
assert(0 < alpha < 1)
q = util.norm_signif_level(alpha)
point_forecast = self.forecast(y, steps)
sigma = np.sqrt(self._forecast_vars(steps))
forc_lower = point_forecast - q * sigma
forc_upper = point_forecast + q * sigma
return point_forecast, forc_lower, forc_upper
#-------------------------------------------------------------------------------
# VARResults class
class VARResults(VARProcess):
"""Estimate VAR(p) process with fixed number of lags
Parameters
----------
endog : array
endog_lagged : array
params : array
sigma_u : array
lag_order : int
model : VAR model instance
trend : str {'nc', 'c', 'ct'}
names : array-like
List of names of the endogenous variables in order of appearance in `endog`.
dates
Returns
-------
**Attributes**
aic
bic
bse
coefs : ndarray (p x K x K)
Estimated A_i matrices, A_i = coefs[i-1]
cov_params
dates
detomega
df_model : int
df_resid : int
endog
endog_lagged
fittedvalues
fpe
intercept
info_criteria
k_ar : int
k_trend : int
llf
model
names
neqs : int
Number of variables (equations)
nobs : int
n_totobs : int
params
k_ar : int
Order of VAR process
params : ndarray (Kp + 1) x K
A_i matrices and intercept in stacked form [int A_1 ... A_p]
pvalues
names : list
variables names
resid
roots : array
The roots of the VAR process are the solution to
(I - coefs[0]*z - coefs[1]*z**2 ... - coefs[p-1]*z**k_ar) = 0.
Note that the inverse roots are returned, and stability requires that
the roots lie outside the unit circle.
sigma_u : ndarray (K x K)
Estimate of white noise process variance Var[u_t]
sigma_u_mle
stderr
trenorder
tvalues
y :
ys_lagged
"""
_model_type = 'VAR'
def __init__(self, endog, endog_lagged, params, sigma_u, lag_order,
model=None, trend='c', names=None, dates=None):
self.model = model
self.y = self.endog = endog #keep alias for now
self.ys_lagged = self.endog_lagged = endog_lagged #keep alias for now
self.dates = dates
self.n_totobs, neqs = self.y.shape
self.nobs = self.n_totobs - lag_order
k_trend = util.get_trendorder(trend)
if k_trend > 0: # make this the polynomial trend order
trendorder = k_trend - 1
else:
trendorder = None
self.k_trend = k_trend
self.trendorder = trendorder
self.exog_names = util.make_lag_names(names, lag_order, k_trend)
self.params = params
# Initialize VARProcess parent class
# construct coefficient matrices
# Each matrix needs to be transposed
reshaped = self.params[self.k_trend:]
reshaped = reshaped.reshape((lag_order, neqs, neqs))
# Need to transpose each coefficient matrix
intercept = self.params[0]
coefs = reshaped.swapaxes(1, 2).copy()
super(VARResults, self).__init__(coefs, intercept, sigma_u, names=names)
def plot(self):
"""Plot input time series
"""
plotting.plot_mts(self.y, names=self.names, index=self.dates)
@property
def df_model(self):
"""Number of estimated parameters, including the intercept / trends
"""
return self.neqs * self.k_ar + self.k_trend
@property
def df_resid(self):
"Number of observations minus number of estimated parameters"
return self.nobs - self.df_model
@cache_readonly
def fittedvalues(self):
"""The predicted insample values of the response variables of the model.
"""
return np.dot(self.ys_lagged, self.params)
@cache_readonly
def resid(self):
"""Residuals of response variable resulting from estimated coefficients
"""
return self.y[self.k_ar:] - self.fittedvalues
def sample_acov(self, nlags=1):
return _compute_acov(self.y[self.k_ar:], nlags=nlags)
def sample_acorr(self, nlags=1):
acovs = self.sample_acov(nlags=nlags)
return _acovs_to_acorrs(acovs)
def plot_sample_acorr(self, nlags=10, linewidth=8):
"Plot theoretical autocorrelation function"
plotting.plot_full_acorr(self.sample_acorr(nlags=nlags),
linewidth=linewidth)
def resid_acov(self, nlags=1):
"""
Compute centered sample autocovariance (including lag 0)
Parameters
----------
nlags : int
Returns
-------
"""
return _compute_acov(self.resid, nlags=nlags)
def resid_acorr(self, nlags=1):
"""
Compute sample autocorrelation (including lag 0)
Parameters
----------
nlags : int
Returns
-------
"""
acovs = self.resid_acov(nlags=nlags)
return _acovs_to_acorrs(acovs)
@cache_readonly
def resid_corr(self):
"Centered residual correlation matrix"
return self.resid_acorr(0)[0]
@cache_readonly
def sigma_u_mle(self):
"""(Biased) maximum likelihood estimate of noise process covariance
"""
return self.sigma_u * self.df_resid / self.nobs
@cache_readonly
def cov_params(self):
"""Estimated variance-covariance of model coefficients
Notes
-----
Covariance of vec(B), where B is the matrix
[intercept, A_1, ..., A_p] (K x (Kp + 1))
Adjusted to be an unbiased estimator
Ref: Lutkepohl p.74-75
"""
z = self.ys_lagged
return np.kron(L.inv(np.dot(z.T, z)), self.sigma_u)
def cov_ybar(self):
r"""Asymptotically consistent estimate of covariance of the sample mean
.. math::
\sqrt(T) (\bar{y} - \mu) \rightarrow {\cal N}(0, \Sigma_{\bar{y}})\\
\Sigma_{\bar{y}} = B \Sigma_u B^\prime, \text{where } B = (I_K - A_1
- \cdots - A_p)^{-1}
Notes
-----
Lutkepohl Proposition 3.3
"""
Ainv = L.inv(np.eye(self.neqs) - self.coefs.sum(0))
return chain_dot(Ainv, self.sigma_u, Ainv.T)
#------------------------------------------------------------
# Estimation-related things
@cache_readonly
def _zz(self):
# Z'Z
return np.dot(self.ys_lagged.T, self.ys_lagged)
@property
def _cov_alpha(self):
"""
Estimated covariance matrix of model coefficients ex intercept
"""
# drop intercept and trend
return self.cov_params[self.k_trend*self.neqs:, self.k_trend*self.neqs:]
@cache_readonly
def _cov_sigma(self):
"""
Estimated covariance matrix of vech(sigma_u)
"""
D_K = tsa.duplication_matrix(self.neqs)
D_Kinv = npl.pinv(D_K)
sigxsig = np.kron(self.sigma_u, self.sigma_u)
return 2 * chain_dot(D_Kinv, sigxsig, D_Kinv.T)
@cache_readonly
def llf(self):
"Compute VAR(p) loglikelihood"
return var_loglike(self.resid, self.sigma_u_mle, self.nobs)
@cache_readonly
def stderr(self):
"""Standard errors of coefficients, reshaped to match in size
"""
stderr = np.sqrt(np.diag(self.cov_params))
return stderr.reshape((self.df_model, self.neqs), order='C')
bse = stderr # statsmodels interface?
@cache_readonly
def tvalues(self):
"""Compute t-statistics. Use Student-t(T - Kp - 1) = t(df_resid) to test
significance.
"""
return self.params / self.stderr
@cache_readonly
def pvalues(self):
"""Two-sided p-values for model coefficients from Student t-distribution
"""
return stats.t.sf(np.abs(self.tvalues), self.df_resid)*2
def plot_forecast(self, steps, alpha=0.05, plot_stderr=True):
"""
Plot forecast
"""
mid, lower, upper = self.forecast_interval(self.y[-self.k_ar:], steps,
alpha=alpha)
plotting.plot_var_forc(self.y, mid, lower, upper, names=self.names,
plot_stderr=plot_stderr)
# Forecast error covariance functions
def forecast_cov(self, steps=1):
r"""Compute forecast covariance matrices for desired number of steps
Parameters
----------
steps : int
Notes
-----
.. math:: \Sigma_{\hat y}(h) = \Sigma_y(h) + \Omega(h) / T
Ref: Lutkepohl pp. 96-97
Returns
-------
covs : ndarray (steps x k x k)
"""
mse = self.mse(steps)
omegas = self._omega_forc_cov(steps)
return mse + omegas / self.nobs
#Monte Carlo irf standard errors
def irf_errband_mc(self, orth=False, repl=1000, T=10,
signif=0.05, seed=None, burn=100, cum=False):
"""
Compute Monte Carlo integrated error bands assuming normally
distributed for impulse response functions
Parameters
----------
orth: bool, default False
Compute orthoganalized impulse response error bands
repl: int
number of Monte Carlo replications to perform
T: int, default 10
number of impulse response periods
signif: float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed: int
np.random.seed for replications
burn: int
number of initial observations to discard for simulation
cum: bool, default False
produce cumulative irf error bands
Notes
-----
Lutkepohl (2005) Appendix D
Returns
-------
Tuple of lower and upper arrays of ma_rep monte carlo standard errors
"""
neqs = self.neqs
mean = self.mean()
k_ar = self.k_ar
coefs = self.coefs
sigma_u = self.sigma_u
intercept = self.intercept
df_model = self.df_model
nobs = self.nobs
ma_coll = np.zeros((repl, T+1, neqs, neqs))
if (orth == True and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T).cumsum(axis=0)
elif (orth == True and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T)
elif (orth == False and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T).cumsum(axis=0)
elif (orth == False and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T)
for i in range(repl):
#discard first hundred to eliminate correct for starting bias
sim = util.varsim(coefs, intercept, sigma_u, steps=nobs+burn)
sim = sim[burn:]
ma_coll[i,:,:,:] = fill_coll(sim)
ma_sort = np.sort(ma_coll, axis=0) #sort to get quantiles
index = round(signif/2*repl)-1,round((1-signif/2)*repl)-1
lower = ma_sort[index[0],:, :, :]
upper = ma_sort[index[1],:, :, :]
return lower, upper
def irf_resim(self, orth=False, repl=1000, T=10,
seed=None, burn=100, cum=False):
"""
Simulates impulse response function, returning an array of simulations.
Used for Sims-Zha error band calculation.
Parameters
----------
orth: bool, default False
Compute orthoganalized impulse response error bands
repl: int
number of Monte Carlo replications to perform
T: int, default 10
number of impulse response periods
signif: float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed: int
np.random.seed for replications
burn: int
number of initial observations to discard for simulation
cum: bool, default False
produce cumulative irf error bands
Notes
-----
Sims, Christoper A., and Tao Zha. 1999. "Error Bands for Impulse Response." Econometrica 67: 1113-1155.
Returns
-------
Array of simulated impulse response functions
"""
neqs = self.neqs
mean = self.mean()
k_ar = self.k_ar
coefs = self.coefs
sigma_u = self.sigma_u
intercept = self.intercept
df_model = self.df_model
nobs = self.nobs
if seed is not None:
np.random.seed(seed=seed)
ma_coll = np.zeros((repl, T+1, neqs, neqs))
if (orth == True and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T).cumsum(axis=0)
elif (orth == True and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T)
elif (orth == False and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T).cumsum(axis=0)
elif (orth == False and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T)
for i in range(repl):
#discard first hundred to eliminate correct for starting bias
sim = util.varsim(coefs, intercept, sigma_u, steps=nobs+burn)
sim = sim[burn:]
ma_coll[i,:,:,:] = fill_coll(sim)
return ma_coll
def _omega_forc_cov(self, steps):
# Approximate MSE matrix \Omega(h) as defined in Lut p97
G = self._zz
Ginv = L.inv(G)
# memoize powers of B for speedup
# TODO: see if can memoize better
B = self._bmat_forc_cov()
_B = {}
def bpow(i):
if i not in _B:
_B[i] = np.linalg.matrix_power(B, i)
return _B[i]
phis = self.ma_rep(steps)
sig_u = self.sigma_u
omegas = np.zeros((steps, self.neqs, self.neqs))
for h in range(1, steps + 1):
if h == 1:
omegas[h-1] = self.df_model * self.sigma_u
continue
om = omegas[h-1]
for i in range(h):
for j in range(h):
Bi = bpow(h - 1 - i)
Bj = bpow(h - 1 - j)
mult = np.trace(chain_dot(Bi.T, Ginv, Bj, G))
om += mult * chain_dot(phis[i], sig_u, phis[j].T)
omegas[h-1] = om
return omegas
def _bmat_forc_cov(self):
# B as defined on p. 96 of Lut
upper = np.zeros((1, self.df_model))
upper[0,0] = 1
lower_dim = self.neqs * (self.k_ar - 1)
I = np.eye(lower_dim)
lower = np.column_stack((np.zeros((lower_dim, 1)), I,
np.zeros((lower_dim, self.neqs))))
return np.vstack((upper, self.params.T, lower))
def summary(self):
"""Compute console output summary of estimates
Returns
-------
summary : VARSummary
"""
return VARSummary(self)
def irf(self, periods=10, var_decomp=None, var_order=None):
"""Analyze impulse responses to shocks in system
Parameters
----------
periods : int
var_decomp : ndarray (k x k), lower triangular
Must satisfy Omega = P P', where P is the passed matrix. Defaults to
Cholesky decomposition of Omega
var_order : sequence
Alternate variable order for Cholesky decomposition
Returns
-------
irf : IRAnalysis
"""
if var_order is not None:
raise NotImplementedError('alternate variable order not implemented'
' (yet)')
return IRAnalysis(self, P=var_decomp, periods=periods)
def fevd(self, periods=10, var_decomp=None):
"""
Compute forecast error variance decomposition ("fevd")
Returns
-------
fevd : FEVD instance
"""
return FEVD(self, P=var_decomp, periods=periods)
def reorder(self, order):
"""Reorder variables for structural specification
"""
if len(order) != len(self.params[0,:]):
raise ValueError("Reorder specification length should match number of endogenous variables")
#This convert order to list of integers if given as strings
if isinstance(order[0], string_types):
order_new = []
for i, nam in enumerate(order):
order_new.append(self.names.index(order[i]))
order = order_new
return _reordered(self, order)
#-------------------------------------------------------------------------------
# VAR Diagnostics: Granger-causality, whiteness of residuals, normality, etc.
def test_causality(self, equation, variables, kind='f', signif=0.05,
verbose=True):
"""Compute test statistic for null hypothesis of Granger-noncausality,
general function to test joint Granger-causality of multiple variables
Parameters
----------
equation : string or int
Equation to test for causality
variables : sequence (of strings or ints)
List, tuple, etc. of variables to test for Granger-causality
kind : {'f', 'wald'}
Perform F-test or Wald (chi-sq) test
signif : float, default 5%
Significance level for computing critical values for test,
defaulting to standard 0.95 level
Notes
-----
Null hypothesis is that there is no Granger-causality for the indicated
variables. The degrees of freedom in the F-test are based on the
number of variables in the VAR system, that is, degrees of freedom
are equal to the number of equations in the VAR times degree of freedom
of a single equation.
Returns
-------
results : dict
"""
if isinstance(variables, (string_types, int, np.integer)):
variables = [variables]
k, p = self.neqs, self.k_ar
# number of restrictions
N = len(variables) * self.k_ar
# Make restriction matrix
C = np.zeros((N, k ** 2 * p + k), dtype=float)
eq_index = self.get_eq_index(equation)
vinds = mat([self.get_eq_index(v) for v in variables])
# remember, vec is column order!
offsets = np.concatenate([k + k ** 2 * j + k * vinds + eq_index
for j in range(p)])
C[np.arange(N), offsets] = 1
# Lutkepohl 3.6.5
Cb = np.dot(C, vec(self.params.T))
middle = L.inv(chain_dot(C, self.cov_params, C.T))
# wald statistic
lam_wald = statistic = chain_dot(Cb, middle, Cb)
if kind.lower() == 'wald':
df = N
dist = stats.chi2(df)
elif kind.lower() == 'f':
statistic = lam_wald / N
df = (N, k * self.df_resid)
dist = stats.f(*df)
else:
raise Exception('kind %s not recognized' % kind)
pvalue = dist.sf(statistic)
crit_value = dist.ppf(1 - signif)
conclusion = 'fail to reject' if statistic < crit_value else 'reject'
results = {
'statistic' : statistic,
'crit_value' : crit_value,
'pvalue' : pvalue,
'df' : df,
'conclusion' : conclusion,
'signif' : signif
}
if verbose:
summ = output.causality_summary(results, variables, equation, kind)
print(summ)
return results
def test_whiteness(self, nlags=10, plot=True, linewidth=8):
"""
Test white noise assumption. Sample (Y) autocorrelations are compared
with the standard :math:`2 / \sqrt(T)` bounds.
Parameters
----------
plot : boolean, default True
Plot autocorrelations with 2 / sqrt(T) bounds
"""
acorrs = self.sample_acorr(nlags)
bound = 2 / np.sqrt(self.nobs)
# TODO: this probably needs some UI work
if (np.abs(acorrs) > bound).any():
print('FAIL: Some autocorrelations exceed %.4f bound. '
'See plot' % bound)
else:
print('PASS: No autocorrelations exceed %.4f bound' % bound)
if plot:
fig = plotting.plot_full_acorr(acorrs[1:],
xlabel=np.arange(1, nlags+1),
err_bound=bound,
linewidth=linewidth)
fig.suptitle(r"ACF plots with $2 / \sqrt{T}$ bounds "
"for testing whiteness assumption")
def test_normality(self, signif=0.05, verbose=True):
"""
Test assumption of normal-distributed errors using Jarque-Bera-style
omnibus Chi^2 test
Parameters
----------
signif : float
Test significance threshold
Notes
-----
H0 (null) : data are generated by a Gaussian-distributed process
"""
Pinv = npl.inv(self._chol_sigma_u)
w = np.array([np.dot(Pinv, u) for u in self.resid])
b1 = (w ** 3).sum(0) / self.nobs
lam_skew = self.nobs * np.dot(b1, b1) / 6
b2 = (w ** 4).sum(0) / self.nobs - 3
lam_kurt = self.nobs * np.dot(b2, b2) / 24
lam_omni = lam_skew + lam_kurt
omni_dist = stats.chi2(self.neqs * 2)
omni_pvalue = omni_dist.sf(lam_omni)
crit_omni = omni_dist.ppf(1 - signif)
conclusion = 'fail to reject' if lam_omni < crit_omni else 'reject'
results = {
'statistic' : lam_omni,
'crit_value' : crit_omni,
'pvalue' : omni_pvalue,
'df' : self.neqs * 2,
'conclusion' : conclusion,
'signif' : signif
}
if verbose:
summ = output.normality_summary(results)
print(summ)
return results
@cache_readonly
def detomega(self):
r"""
Return determinant of white noise covariance with degrees of freedom
correction:
.. math::
\hat \Omega = \frac{T}{T - Kp - 1} \hat \Omega_{\mathrm{MLE}}
"""
return L.det(self.sigma_u)
@cache_readonly
def info_criteria(self):
"information criteria for lagorder selection"
nobs = self.nobs
neqs = self.neqs
lag_order = self.k_ar
free_params = lag_order * neqs ** 2 + neqs * self.k_trend
ld = logdet_symm(self.sigma_u_mle)
# See Lutkepohl pp. 146-150
aic = ld + (2. / nobs) * free_params
bic = ld + (np.log(nobs) / nobs) * free_params
hqic = ld + (2. * np.log(np.log(nobs)) / nobs) * free_params
fpe = ((nobs + self.df_model) / self.df_resid) ** neqs * np.exp(ld)
return {
'aic' : aic,
'bic' : bic,
'hqic' : hqic,
'fpe' : fpe
}
@property
def aic(self):
"Akaike information criterion"
return self.info_criteria['aic']
@property
def fpe(self):
"""Final Prediction Error (FPE)
Lutkepohl p. 147, see info_criteria
"""
return self.info_criteria['fpe']
@property
def hqic(self):
"Hannan-Quinn criterion"
return self.info_criteria['hqic']
@property
def bic(self):
"Bayesian a.k.a. Schwarz info criterion"
return self.info_criteria['bic']
@cache_readonly
def roots(self):
neqs = self.neqs
k_ar = self.k_ar
p = neqs * k_ar
arr = np.zeros((p,p))
arr[:neqs,:] = np.column_stack(self.coefs)
arr[neqs:,:-neqs] = np.eye(p-neqs)
roots = np.linalg.eig(arr)[0]**-1
idx = np.argsort(np.abs(roots))[::-1] # sort by reverse modulus
return roots[idx]
class VARResultsWrapper(wrap.ResultsWrapper):
_attrs = {'bse' : 'columns_eq', 'cov_params' : 'cov',
'params' : 'columns_eq', 'pvalues' : 'columns_eq',
'tvalues' : 'columns_eq', 'sigma_u' : 'cov_eq',
'sigma_u_mle' : 'cov_eq',
'stderr' : 'columns_eq'}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_methods,
_methods)
_wrap_methods.pop('cov_params') # not yet a method in VARResults
wrap.populate_wrapper(VARResultsWrapper, VARResults)
class FEVD(object):
"""
Compute and plot Forecast error variance decomposition and asymptotic
standard errors
"""
def __init__(self, model, P=None, periods=None):
self.periods = periods
self.model = model
self.neqs = model.neqs
self.names = model.model.endog_names
self.irfobj = model.irf(var_decomp=P, periods=periods)
self.orth_irfs = self.irfobj.orth_irfs
# cumulative impulse responses
irfs = (self.orth_irfs[:periods] ** 2).cumsum(axis=0)
rng = lrange(self.neqs)
mse = self.model.mse(periods)[:, rng, rng]
# lag x equation x component
fevd = np.empty_like(irfs)
for i in range(periods):
fevd[i] = (irfs[i].T / mse[i]).T
# switch to equation x lag x component
self.decomp = fevd.swapaxes(0, 1)
def summary(self):
buf = StringIO()
rng = lrange(self.periods)
for i in range(self.neqs):
ppm = output.pprint_matrix(self.decomp[i], rng, self.names)
buf.write('FEVD for %s\n' % self.names[i])
buf.write(ppm + '\n')
print(buf.getvalue())
def cov(self):
"""Compute asymptotic standard errors
Returns
-------
"""
raise NotImplementedError
def plot(self, periods=None, figsize=(10,10), **plot_kwds):
"""Plot graphical display of FEVD
Parameters
----------
periods : int, default None
Defaults to number originally specified. Can be at most that number
"""
import matplotlib.pyplot as plt
k = self.neqs
periods = periods or self.periods
fig, axes = plt.subplots(nrows=k, figsize=figsize)
fig.suptitle('Forecast error variance decomposition (FEVD)')
colors = [str(c) for c in np.arange(k, dtype=float) / k]
ticks = np.arange(periods)
limits = self.decomp.cumsum(2)
for i in range(k):
ax = axes[i]
this_limits = limits[i].T
handles = []
for j in range(k):
lower = this_limits[j - 1] if j > 0 else 0
upper = this_limits[j]
handle = ax.bar(ticks, upper - lower, bottom=lower,
color=colors[j], label=self.names[j],
**plot_kwds)
handles.append(handle)
ax.set_title(self.names[i])
# just use the last axis to get handles for plotting
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc='upper right')
plotting.adjust_subplots(right=0.85)
#-------------------------------------------------------------------------------
def _compute_acov(x, nlags=1):
x = x - x.mean(0)
result = []
for lag in range(nlags + 1):
if lag > 0:
r = np.dot(x[lag:].T, x[:-lag])
else:
r = np.dot(x.T, x)
result.append(r)
return np.array(result) / len(x)
def _acovs_to_acorrs(acovs):
sd = np.sqrt(np.diag(acovs[0]))
return acovs / np.outer(sd, sd)
if __name__ == '__main__':
import statsmodels.api as sm
from statsmodels.tsa.vector_ar.util import parse_lutkepohl_data
import statsmodels.tools.data as data_util
np.set_printoptions(linewidth=140, precision=5)
sdata, dates = parse_lutkepohl_data('data/%s.dat' % 'e1')
names = sdata.dtype.names
data = data_util.struct_to_ndarray(sdata)
adj_data = np.diff(np.log(data), axis=0)
# est = VAR(adj_data, p=2, dates=dates[1:], names=names)
model = VAR(adj_data[:-16], dates=dates[1:-16], names=names)
# model = VAR(adj_data[:-16], dates=dates[1:-16], names=names)
est = model.fit(maxlags=2)
irf = est.irf()
y = est.y[-2:]
"""
# irf.plot_irf()
# i = 2; j = 1
# cv = irf.cum_effect_cov(orth=True)
# print np.sqrt(cv[:, j * 3 + i, j * 3 + i]) / 1e-2
# data = np.genfromtxt('Canada.csv', delimiter=',', names=True)
# data = data.view((float, 4))
"""
'''
mdata = sm.datasets.macrodata.load().data
mdata2 = mdata[['realgdp','realcons','realinv']]
names = mdata2.dtype.names
data = mdata2.view((float,3))
data = np.diff(np.log(data), axis=0)
import pandas as pn
df = pn.DataFrame.fromRecords(mdata)
df = np.log(df.reindex(columns=names))
df = (df - df.shift(1)).dropna()
model = VAR(df)
est = model.fit(maxlags=2)
irf = est.irf()
'''
|
bsd-3-clause
|
lanniaoershi/android_kernel_oneplus_msm8994
|
scripts/tracing/draw_functrace.py
|
14676
|
3560
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
gpl-2.0
|
delectable/DIGITS
|
digits/test_status.py
|
5
|
2316
|
# Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
from gevent import monkey; monkey.patch_all()
from nose.tools import assert_raises
import mock
import pickle
import tempfile
from status import Status
from config import config_value
from job import Job
class TestScheduler():
def test_run_too_soon(self):
job = Job('test')
job.status = Status.WAIT
job.status = Status.RUN
# Status.WAIT should be removed so the len should be 2 rather
# than 3.
assert len(job.status_history) == 2, 'history length should be 2'
def test_empty_history(self):
job = Job('test')
job.status = Status.WAIT
job.status = Status.RUN
job.status_history = []
# An empty history should not happen, but if it did, the value
# should be Status.INIT.
assert job.status == Status.INIT, 'status should be Status.INIT'
def test_set_dict(self):
job = Job('test')
# testing some untested cases in set_dict()
job.status = Status.ERROR
assert job.status.css == 'danger', 'status.css should be "danger".'
job.status = '404'
assert job.status.css == 'default', 'status.css should be "default".'
def test_equality(self):
s = Status(Status.INIT)
# testing __eq__
assert (s == Status.INIT), 'should be true.'
assert (s == 'I'), 'should be true.'
assert not (s == 7), 'should be false.'
assert not (s != Status.INIT), 'should be false.'
assert not (s != 'I'), 'should be false.'
assert (s != 7), 'should be true.'
def test_pickle(self):
# Testng __setstate__ and __getstate__
s = Status(Status.INIT)
s = Status.WAIT
loaded_status = None
tmpfile_path = 'tmp.p'
with open(tmpfile_path, 'wb') as tmpfile:
pickle.dump(s, tmpfile)
tmpfile.close()
with open(tmpfile_path, 'rb') as tmpfile:
loaded_status = pickle.load(tmpfile)
tmpfile.close()
print loaded_status
assert loaded_status == Status.WAIT, 'status should be WAIT'
def test_str(self):
# Testing __str__
s = Status(Status.INIT)
s = Status.WAIT
assert str(s) == 'W', 'should be W'
|
bsd-3-clause
|
WeblateOrg/weblate
|
weblate/formats/helpers.py
|
2
|
1365
|
#
# Copyright © 2012 - 2021 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from io import BytesIO
CONTROLCHARS = {
"\x00",
"\x01",
"\x02",
"\x03",
"\x04",
"\x05",
"\x06",
"\x07",
"\x08",
"\x0b",
"\x0c",
"\x0e",
"\x0f",
"\x10",
"\x11",
"\x12",
"\x13",
"\x14",
"\x15",
"\x16",
"\x17",
"\x18",
"\x19",
"\x1a",
"\x1b",
"\x1c",
"\x1d",
"\x1e",
"\x1f",
}
class BytesIOMode(BytesIO):
"""StringIO with mode attribute to make ttkit happy."""
def __init__(self, filename, data):
super().__init__(data)
self.mode = "r"
self.name = filename
|
gpl-3.0
|
bobobox/ansible
|
lib/ansible/modules/network/nxos/nxos_vlan.py
|
7
|
13865
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: nxos_vlan
version_added: "2.1"
short_description: Manages VLAN resources and attributes.
description:
- Manages VLAN configurations on NX-OS switches.
author: Jason Edelman (@jedelman8)
options:
vlan_id:
description:
- Single VLAN ID.
required: false
default: null
vlan_range:
description:
- Range of VLANs such as 2-10 or 2,5,10-15, etc.
required: false
default: null
name:
description:
- Name of VLAN.
required: false
default: null
vlan_state:
description:
- Manage the vlan operational state of the VLAN
(equivalent to state {active | suspend} command.
required: false
default: active
choices: ['active','suspend']
admin_state:
description:
- Manage the VLAN administrative state of the VLAN equivalent
to shut/no shut in VLAN config mode.
required: false
default: up
choices: ['up','down']
mapped_vni:
description:
- The Virtual Network Identifier (VNI) ID that is mapped to the
VLAN. Valid values are integer and keyword 'default'.
required: false
default: null
version_added: "2.2"
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: Ensure a range of VLANs are not present on the switch
nxos_vlan:
vlan_range: "2-10,20,50,55-60,100-150"
host: 68.170.147.165
username: cisco
password: cisco
state: absent
transport: nxapi
- name: Ensure VLAN 50 exists with the name WEB and is in the shutdown state
nxos_vlan:
vlan_id: 50
host: 68.170.147.165
admin_state: down
name: WEB
transport: nxapi
username: cisco
password: cisco
- name: Ensure VLAN is NOT on the device
nxos_vlan:
vlan_id: 50
host: 68.170.147.165
state: absent
transport: nxapi
username: cisco
password: cisco
'''
RETURN = '''
proposed_vlans_list:
description: list of VLANs being proposed
returned: when debug enabled
type: list
sample: ["100"]
existing_vlans_list:
description: list of existing VLANs on the switch prior to making changes
returned: when debug enabled
type: list
sample: ["1", "2", "3", "4", "5", "20"]
end_state_vlans_list:
description: list of VLANs after the module is executed
returned: when debug enabled
type: list
sample: ["1", "2", "3", "4", "5", "20", "100"]
proposed:
description: k/v pairs of parameters passed into module (does not include
vlan_id or vlan_range)
returned: when debug enabled
type: dict or null
sample: {"admin_state": "down", "name": "app_vlan",
"vlan_state": "suspend", "mapped_vni": "5000"}
existing:
description: k/v pairs of existing vlan or null when using vlan_range
returned: when debug enabled
type: dict
sample: {"admin_state": "down", "name": "app_vlan",
"vlan_id": "20", "vlan_state": "suspend", "mapped_vni": ""}
end_state:
description: k/v pairs of the VLAN after executing module or null
when using vlan_range
returned: when debug enabled
type: dict or null
sample: {"admin_state": "down", "name": "app_vlan", "vlan_id": "20",
"vlan_state": "suspend", "mapped_vni": "5000"}
updates:
description: command string sent to the device
returned: always
type: list
sample: ["vlan 20", "vlan 55", "vn-segment 5000"]
commands:
description: command string sent to the device
returned: always
type: list
sample: ["vlan 20", "vlan 55", "vn-segment 5000"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.nxos import run_commands, load_config, get_config
from ansible.module_utils.basic import AnsibleModule
def vlan_range_to_list(vlans):
result = []
if vlans:
for part in vlans.split(','):
if part == 'none':
break
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
result.extend(range(a, b + 1))
else:
a = int(part)
result.append(a)
return numerical_sort(result)
return result
def numerical_sort(string_int_list):
"""Sort list of strings (VLAN IDs) that are digits in numerical order.
"""
as_int_list = []
as_str_list = []
for vlan in string_int_list:
as_int_list.append(int(vlan))
as_int_list.sort()
for vlan in as_int_list:
as_str_list.append(str(vlan))
return as_str_list
def build_commands(vlans, state):
commands = []
for vlan in vlans:
if state == 'present':
command = 'vlan {0}'.format(vlan)
commands.append(command)
elif state == 'absent':
command = 'no vlan {0}'.format(vlan)
commands.append(command)
return commands
def get_vlan_config_commands(vlan, vid):
"""Build command list required for VLAN configuration
"""
reverse_value_map = {
"admin_state": {
"down": "shutdown",
"up": "no shutdown"
}
}
if vlan.get('admin_state'):
# apply value map when making change to the admin state
# note: would need to be a loop or more in depth check if
# value map has more than 1 key
vlan = apply_value_map(reverse_value_map, vlan)
VLAN_ARGS = {
'name': 'name {0}',
'vlan_state': 'state {0}',
'admin_state': '{0}',
'mode': 'mode {0}',
'mapped_vni': 'vn-segment {0}'
}
commands = []
for param, value in vlan.items():
if param == 'mapped_vni' and value == 'default':
command = 'no vn-segment'
else:
command = VLAN_ARGS.get(param).format(vlan.get(param))
if command:
commands.append(command)
commands.insert(0, 'vlan ' + vid)
commands.append('exit')
return commands
def get_list_of_vlans(module):
body = run_commands(module, ['show vlan | json'])
vlan_list = []
vlan_table = body[0].get('TABLE_vlanbrief')['ROW_vlanbrief']
if isinstance(vlan_table, list):
for vlan in vlan_table:
vlan_list.append(str(vlan['vlanshowbr-vlanid-utf']))
else:
vlan_list.append('1')
return vlan_list
def get_vni(vlanid, module):
flags = str('all | section vlan.{0}'.format(vlanid)).split(' ')
body = get_config(module, flags=flags)
#command = 'show run all | section vlan.{0}'.format(vlanid)
#body = execute_show_command(command, module, command_type='cli_show_ascii')[0]
value = ''
if body:
REGEX = re.compile(r'(?:vn-segment\s)(?P<value>.*)$', re.M)
if 'vn-segment' in body:
value = REGEX.search(body).group('value')
return value
def get_vlan(vlanid, module):
"""Get instance of VLAN as a dictionary
"""
command = 'show vlan id %s | json' % vlanid
body = run_commands(module, [command])
#command = 'show vlan id ' + vlanid
#body = execute_show_command(command, module)
try:
vlan_table = body[0]['TABLE_vlanbriefid']['ROW_vlanbriefid']
except (TypeError, IndexError):
return {}
key_map = {
"vlanshowbr-vlanid-utf": "vlan_id",
"vlanshowbr-vlanname": "name",
"vlanshowbr-vlanstate": "vlan_state",
"vlanshowbr-shutstate": "admin_state"
}
vlan = apply_key_map(key_map, vlan_table)
value_map = {
"admin_state": {
"shutdown": "down",
"noshutdown": "up"
}
}
vlan = apply_value_map(value_map, vlan)
vlan['mapped_vni'] = get_vni(vlanid, module)
return vlan
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = str(value)
return new_dict
def apply_value_map(value_map, resource):
for key, value in value_map.items():
resource[key] = value[resource.get(key)]
return resource
def main():
argument_spec = dict(
vlan_id=dict(required=False, type='str'),
vlan_range=dict(required=False),
name=dict(required=False),
vlan_state=dict(choices=['active', 'suspend'], required=False),
mapped_vni=dict(required=False, type='str'),
state=dict(choices=['present', 'absent'], default='present',
required=False),
admin_state=dict(choices=['up', 'down'], required=False),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['vlan_range', 'name'],
['vlan_id', 'vlan_range']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
warnings = list()
check_args(module, warnings)
vlan_range = module.params['vlan_range']
vlan_id = module.params['vlan_id']
name = module.params['name']
vlan_state = module.params['vlan_state']
admin_state = module.params['admin_state']
mapped_vni = module.params['mapped_vni']
state = module.params['state']
changed = False
if vlan_id:
if not vlan_id.isdigit():
module.fail_json(msg='vlan_id must be a valid VLAN ID')
args = dict(name=name, vlan_state=vlan_state,
admin_state=admin_state, mapped_vni=mapped_vni)
proposed = dict((k, v) for k, v in args.items() if v is not None)
proposed_vlans_list = numerical_sort(vlan_range_to_list(
vlan_id or vlan_range))
existing_vlans_list = numerical_sort(get_list_of_vlans(module))
commands = []
existing = {}
if vlan_range:
if state == 'present':
# These are all of the VLANs being proposed that don't
# already exist on the switch
vlans_delta = list(
set(proposed_vlans_list).difference(existing_vlans_list))
commands = build_commands(vlans_delta, state)
elif state == 'absent':
# VLANs that are common between what is being proposed and
# what is on the switch
vlans_common = list(
set(proposed_vlans_list).intersection(existing_vlans_list))
commands = build_commands(vlans_common, state)
else:
existing = get_vlan(vlan_id, module)
if state == 'absent':
if existing:
commands = ['no vlan ' + vlan_id]
elif state == 'present':
if (existing.get('mapped_vni') == '0' and
proposed.get('mapped_vni') == 'default'):
proposed.pop('mapped_vni')
delta = dict(set(
proposed.items()).difference(existing.items()))
if delta or not existing:
commands = get_vlan_config_commands(delta, vlan_id)
end_state = existing
end_state_vlans_list = existing_vlans_list
if commands:
if existing.get('mapped_vni') and state != 'absent':
if (existing.get('mapped_vni') != proposed.get('mapped_vni') and
existing.get('mapped_vni') != '0' and proposed.get('mapped_vni') != 'default'):
commands.insert(1, 'no vn-segment')
if module.check_mode:
module.exit_json(changed=True,
commands=commands)
else:
load_config(module, commands)
changed = True
end_state_vlans_list = numerical_sort(get_list_of_vlans(module))
if 'configure' in commands:
commands.pop(0)
if vlan_id:
end_state = get_vlan(vlan_id, module)
results = {
'commands': commands,
'updates': commands,
'changed': changed,
'warnings': warnings
}
if module._debug:
results.update({
'proposed_vlans_list': proposed_vlans_list,
'existing_vlans_list': existing_vlans_list,
'proposed': proposed,
'existing': existing,
'end_state': end_state,
'end_state_vlans_list': end_state_vlans_list
})
module.exit_json(**results)
if __name__ == '__main__':
main()
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.