repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
welex91/ansible-modules-core
|
cloud/amazon/s3.py
|
11
|
26913
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: s3
short_description: manage objects in S3.
description:
- This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and deleting both objects and buckets, retrieving objects as files or strings and generating download links. This module has a dependency on python-boto.
version_added: "1.1"
options:
aws_access_key:
description:
- AWS access key id. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: ['ec2_secret_key', 'secret_key']
bucket:
description:
- Bucket name.
required: true
default: null
aliases: []
dest:
description:
- The destination file path when downloading an object/key with a GET operation.
required: false
aliases: []
version_added: "1.3"
encrypt:
description:
- When set for PUT mode, asks for server-side encryption
required: false
default: no
version_added: "2.0"
expiration:
description:
- Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation.
required: false
default: 600
aliases: []
headers:
description:
- Custom headers for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
required: false
default: null
version_added: "2.0"
marker:
description:
- Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order.
required: false
default: null
version_added: "2.0"
max_keys:
description:
- Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys.
required: false
default: 1000
version_added: "2.0"
metadata:
description:
- Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
required: false
default: null
version_added: "1.6"
mode:
description:
- Switches the module behaviour between put (upload), get (download), geturl (return download url, Ansible 1.3+), getstr (download object as string (1.3+)), list (list keys, Ansible 2.0+), create (bucket), delete (bucket), and delobj (delete object, Ansible 2.0+).
required: true
choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list']
object:
description:
- Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
required: false
default: null
permission:
description:
- This option lets the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', 'public-read', 'public-read-write', 'authenticated-read'. Multiple permissions can be specified as a list.
required: false
default: private
version_added: "2.0"
prefix:
description:
- Limits the response to keys that begin with the specified prefix for list mode
required: false
default: null
version_added: "2.0"
version:
description:
- Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket.
required: false
default: null
aliases: []
version_added: "2.0"
overwrite:
description:
- Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. Boolean or one of [always, never, different], true is equal to 'always' and false is equal to 'never', new in 2.0
required: false
default: 'always'
version_added: "1.2"
region:
description:
- "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard. Prior to ansible 1.8 this parameter could be specified but had no effect."
required: false
default: null
version_added: "1.8"
retries:
description:
- On recoverable failure, how many times to retry before actually failing.
required: false
default: 0
version_added: "2.0"
s3_url:
description:
- S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS
default: null
aliases: [ S3_URL ]
src:
description:
- The source file path when performing a PUT operation.
required: false
default: null
aliases: []
version_added: "1.3"
requirements: [ "boto" ]
author:
- "Lester Wade (@lwade)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Simple PUT operation
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put
# Simple GET operation
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get
# Get a specific version of an object.
- s3: bucket=mybucket object=/my/desired/key.txt version=48c9ee5131af7a716edc22df9772aa6f dest=/usr/local/myfile.txt mode=get
# PUT/upload with metadata
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache'
# PUT/upload with custom headers
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put [email protected]
# List keys simple
- s3: bucket=mybucket mode=list
# List keys all options
- s3: bucket=mybucket mode=list prefix=/my/desired/ marker=/my/desired/0023.txt max_keys=472
# Create an empty bucket
- s3: bucket=mybucket mode=create permission=public-read
# Create a bucket with key as directory, in the EU region
- s3: bucket=mybucket object=/my/directory/path mode=create region=eu-west-1
# Delete a bucket and all contents
- s3: bucket=mybucket mode=delete
# GET an object but dont download if the file checksums match. New in 2.0
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get overwrite=different
# Delete an object from a bucket
- s3: bucket=mybucket object=/my/desired/key.txt mode=delobj
'''
import os
import urlparse
from ssl import SSLError
try:
import boto
import boto.ec2
from boto.s3.connection import Location
from boto.s3.connection import OrdinaryCallingFormat
from boto.s3.connection import S3Connection
from boto.s3.acl import CannedACLStrings
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def key_check(module, s3, bucket, obj, version=None):
try:
bucket = s3.lookup(bucket)
key_check = bucket.get_key(obj, version_id=version)
except s3.provider.storage_response_error, e:
if version is not None and e.status == 400: # If a specified version doesn't exist a 400 is returned.
key_check = None
else:
module.fail_json(msg=str(e))
if key_check:
return True
else:
return False
def keysum(module, s3, bucket, obj, version=None):
bucket = s3.lookup(bucket)
key_check = bucket.get_key(obj, version_id=version)
if not key_check:
return None
md5_remote = key_check.etag[1:-1]
etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
if etag_multipart is True:
module.fail_json(msg="Files uploaded with multipart of s3 are not supported with checksum, unable to compute checksum.")
return md5_remote
def bucket_check(module, s3, bucket):
try:
result = s3.lookup(bucket)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if result:
return True
else:
return False
def create_bucket(module, s3, bucket, location=None):
if location is None:
location = Location.DEFAULT
try:
bucket = s3.create_bucket(bucket, location=location)
for acl in module.params.get('permission'):
bucket.set_acl(acl)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if bucket:
return True
def get_bucket(module, s3, bucket):
try:
return s3.lookup(bucket)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def list_keys(module, bucket_object, prefix, marker, max_keys):
all_keys = bucket_object.get_all_keys(prefix=prefix, marker=marker, max_keys=max_keys)
keys = [x.key for x in all_keys]
module.exit_json(msg="LIST operation complete", s3_keys=keys)
def delete_bucket(module, s3, bucket):
try:
bucket = s3.lookup(bucket)
bucket_contents = bucket.list()
bucket.delete_keys([key.name for key in bucket_contents])
bucket.delete()
return True
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def delete_key(module, s3, bucket, obj):
try:
bucket = s3.lookup(bucket)
bucket.delete_key(obj)
module.exit_json(msg="Object deleted from bucket %s"%bucket, changed=True)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def create_dirkey(module, s3, bucket, obj):
try:
bucket = s3.lookup(bucket)
key = bucket.new_key(obj)
key.set_contents_from_string('')
module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def path_check(path):
if os.path.exists(path):
return True
else:
return False
def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers):
try:
bucket = s3.lookup(bucket)
key = bucket.new_key(obj)
if metadata:
for meta_key in metadata.keys():
key.set_metadata(meta_key, metadata[meta_key])
key.set_contents_from_filename(src, encrypt_key=encrypt, headers=headers)
for acl in module.params.get('permission'):
key.set_acl(acl)
url = key.generate_url(expiry)
module.exit_json(msg="PUT operation complete", url=url, changed=True)
except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
# retries is the number of loops; range/xrange needs to be one
# more to get that count of loops.
bucket = s3.lookup(bucket)
key = bucket.get_key(obj, version_id=version)
for x in range(0, retries + 1):
try:
key.get_contents_to_filename(dest)
module.exit_json(msg="GET operation complete", changed=True)
except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
except SSLError as e:
# actually fail on last pass through the loop.
if x >= retries:
module.fail_json(msg="s3 download failed; %s" % e)
# otherwise, try again, this may be a transient timeout.
pass
def download_s3str(module, s3, bucket, obj, version=None):
try:
bucket = s3.lookup(bucket)
key = bucket.get_key(obj, version_id=version)
contents = key.get_contents_as_string()
module.exit_json(msg="GET operation complete", contents=contents, changed=True)
except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def get_download_url(module, s3, bucket, obj, expiry, changed=True):
try:
bucket = s3.lookup(bucket)
key = bucket.lookup(obj)
url = key.generate_url(expiry)
module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def is_fakes3(s3_url):
""" Return True if s3_url has scheme fakes3:// """
if s3_url is not None:
return urlparse.urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
else:
return False
def is_walrus(s3_url):
""" Return True if it's Walrus endpoint, not S3
We assume anything other than *.amazonaws.com is Walrus"""
if s3_url is not None:
o = urlparse.urlparse(s3_url)
return not o.hostname.endswith('amazonaws.com')
else:
return False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
bucket = dict(required=True),
dest = dict(default=None),
encrypt = dict(default=True, type='bool'),
expiry = dict(default=600, aliases=['expiration']),
headers = dict(type='dict'),
marker = dict(default=None),
max_keys = dict(default=1000),
metadata = dict(type='dict'),
mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
object = dict(),
permission = dict(type='list', default=['private']),
version = dict(default=None),
overwrite = dict(aliases=['force'], default='always'),
prefix = dict(default=None),
retries = dict(aliases=['retry'], type='int', default=0),
s3_url = dict(aliases=['S3_URL']),
src = dict(),
),
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
bucket = module.params.get('bucket')
encrypt = module.params.get('encrypt')
expiry = int(module.params['expiry'])
if module.params.get('dest'):
dest = os.path.expanduser(module.params.get('dest'))
headers = module.params.get('headers')
marker = module.params.get('marker')
max_keys = module.params.get('max_keys')
metadata = module.params.get('metadata')
mode = module.params.get('mode')
obj = module.params.get('object')
version = module.params.get('version')
overwrite = module.params.get('overwrite')
prefix = module.params.get('prefix')
retries = module.params.get('retries')
s3_url = module.params.get('s3_url')
src = module.params.get('src')
for acl in module.params.get('permission'):
if acl not in CannedACLStrings:
module.fail_json(msg='Unknown permission specified: %s' % str(acl))
if overwrite not in ['always', 'never', 'different']:
if module.boolean(overwrite):
overwrite = 'always'
else:
overwrite = 'never'
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if region in ('us-east-1', '', None):
# S3ism for the US Standard region
location = Location.DEFAULT
else:
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region
if module.params.get('object'):
obj = os.path.expanduser(module.params['object'])
# allow eucarc environment variables to be used if ansible vars aren't set
if not s3_url and 'S3_URL' in os.environ:
s3_url = os.environ['S3_URL']
# bucket names with .'s in them need to use the calling_format option,
# otherwise the connection will fail. See https://github.com/boto/boto/issues/2836
# for more details.
if '.' in bucket:
aws_connect_kwargs['calling_format'] = OrdinaryCallingFormat()
# Look at s3_url and tweak connection settings
# if connecting to Walrus or fakes3
try:
if is_fakes3(s3_url):
fakes3 = urlparse.urlparse(s3_url)
s3 = S3Connection(
is_secure=fakes3.scheme == 'fakes3s',
host=fakes3.hostname,
port=fakes3.port,
calling_format=OrdinaryCallingFormat(),
**aws_connect_kwargs
)
elif is_walrus(s3_url):
walrus = urlparse.urlparse(s3_url).hostname
s3 = boto.connect_walrus(walrus, **aws_connect_kwargs)
else:
aws_connect_kwargs['is_secure'] = True
s3 = connect_to_aws(boto.s3, location, **aws_connect_kwargs)
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
if s3 is None:
s3 = boto.connect_s3(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg='No Authentication Handler found: %s ' % str(e))
except Exception, e:
module.fail_json(msg='Failed to connect to S3: %s' % str(e))
if s3 is None: # this should never happen
module.fail_json(msg ='Unknown error, failed to create s3 connection, no information from boto.')
# If our mode is a GET operation (download), go through the procedure as appropriate ...
if mode == 'get':
# First, we check to see if the bucket exists, we get "bucket" returned.
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is False:
module.fail_json(msg="Source bucket cannot be found", failed=True)
# Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check.
keyrtn = key_check(module, s3, bucket, obj, version=version)
if keyrtn is False:
if version is not None:
module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True)
else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
# If the destination path doesn't exist or overwrite is True, no need to do the md5um etag check, so just download.
pathrtn = path_check(dest)
if pathrtn is False or overwrite == 'always':
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
# Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists.
if pathrtn is True:
md5_remote = keysum(module, s3, bucket, obj, version=version)
md5_local = module.md5(dest)
if md5_local == md5_remote:
sum_matches = True
if overwrite == 'always':
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
else:
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
else:
sum_matches = False
if overwrite in ('always', 'different'):
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
else:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.")
# Firstly, if key_matches is TRUE and overwrite is not enabled, we EXIT with a helpful message.
if sum_matches is True and overwrite == 'never':
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False)
# if our mode is a PUT operation (upload), go through the procedure as appropriate ...
if mode == 'put':
# Use this snippet to debug through conditionals:
# module.exit_json(msg="Bucket return %s"%bucketrtn)
# Lets check the src path.
pathrtn = path_check(src)
if pathrtn is False:
module.fail_json(msg="Local object for PUT does not exist", failed=True)
# Lets check to see if bucket exists to get ground truth.
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
keyrtn = key_check(module, s3, bucket, obj)
# Lets check key state. Does it exist and if it does, compute the etag md5sum.
if bucketrtn is True and keyrtn is True:
md5_remote = keysum(module, s3, bucket, obj)
md5_local = module.md5(src)
if md5_local == md5_remote:
sum_matches = True
if overwrite == 'always':
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
else:
get_download_url(module, s3, bucket, obj, expiry, changed=False)
else:
sum_matches = False
if overwrite in ('always', 'different'):
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
else:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.")
# If neither exist (based on bucket existence), we can create both.
if bucketrtn is False and pathrtn is True:
create_bucket(module, s3, bucket, location)
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
# If bucket exists but key doesn't, just upload.
if bucketrtn is True and pathrtn is True and keyrtn is False:
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
# Delete an object from a bucket, not the entire bucket
if mode == 'delobj':
if obj is None:
module.fail_json(msg="object parameter is required", failed=True);
if bucket:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
deletertn = delete_key(module, s3, bucket, obj)
if deletertn is True:
module.exit_json(msg="Object %s deleted from bucket %s." % (obj, bucket), changed=True)
else:
module.fail_json(msg="Bucket does not exist.", changed=False)
else:
module.fail_json(msg="Bucket parameter is required.", failed=True)
# Delete an entire bucket, including all objects in the bucket
if mode == 'delete':
if bucket:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
deletertn = delete_bucket(module, s3, bucket)
if deletertn is True:
module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=True)
else:
module.fail_json(msg="Bucket does not exist.", changed=False)
else:
module.fail_json(msg="Bucket parameter is required.", failed=True)
# Support for listing a set of keys
if mode == 'list':
bucket_object = get_bucket(module, s3, bucket)
# If the bucket does not exist then bail out
if bucket_object is None:
module.fail_json(msg="Target bucket (%s) cannot be found"% bucket, failed=True)
list_keys(module, bucket_object, prefix, marker, max_keys)
# Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
# WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
if mode == 'create':
if bucket and not obj:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
module.exit_json(msg="Bucket already exists.", changed=False)
else:
module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
if bucket and obj:
bucketrtn = bucket_check(module, s3, bucket)
if obj.endswith('/'):
dirobj = obj
else:
dirobj = obj + "/"
if bucketrtn is True:
keyrtn = key_check(module, s3, bucket, dirobj)
if keyrtn is True:
module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False)
else:
create_dirkey(module, s3, bucket, dirobj)
if bucketrtn is False:
created = create_bucket(module, s3, bucket, location)
create_dirkey(module, s3, bucket, dirobj)
# Support for grabbing the time-expired URL for an object in S3/Walrus.
if mode == 'geturl':
if bucket and obj:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is False:
module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True)
else:
keyrtn = key_check(module, s3, bucket, obj)
if keyrtn is True:
get_download_url(module, s3, bucket, obj, expiry)
else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
else:
module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
if mode == 'getstr':
if bucket and obj:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is False:
module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True)
else:
keyrtn = key_check(module, s3, bucket, obj, version=version)
if keyrtn is True:
download_s3str(module, s3, bucket, obj, version=version)
else:
if version is not None:
module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True)
else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
module.exit_json(failed=False)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
|
gpl-3.0
|
brian-yang/mozillians
|
vendor-local/lib/python/celery/worker/mediator.py
|
12
|
2613
|
# -*- coding: utf-8 -*-
"""
celery.worker.mediator
~~~~~~~~~~~~~~~~~~~~~~
The mediator is an internal thread that moves tasks
from an internal :class:`Queue` to the worker pool.
This is only used if rate limits are enabled, as it moves
messages from the rate limited queue (which holds tasks
that are allowed to be processed) to the pool. Disabling
rate limits will also disable this machinery,
and can improve performance.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
from Queue import Empty
from ..abstract import StartStopComponent
from ..app import app_or_default
from ..utils.threads import bgThread
class WorkerComponent(StartStopComponent):
name = "worker.mediator"
requires = ("pool", "queues", )
def __init__(self, w, **kwargs):
w.mediator = None
def include_if(self, w):
return not w.disable_rate_limits or w.pool_cls.requires_mediator
def create(self, w):
m = w.mediator = self.instantiate(w.mediator_cls, w.ready_queue,
app=w.app, callback=w.process_task,
logger=w.logger)
return m
class Mediator(bgThread):
#: The task queue, a :class:`~Queue.Queue` instance.
ready_queue = None
#: Callback called when a task is obtained.
callback = None
def __init__(self, ready_queue, callback, logger=None, app=None):
self.app = app_or_default(app)
self.logger = logger or self.app.log.get_default_logger()
self.ready_queue = ready_queue
self.callback = callback
self._does_debug = self.logger.isEnabledFor(logging.DEBUG)
super(Mediator, self).__init__()
def body(self):
try:
task = self.ready_queue.get(timeout=1.0)
except Empty:
return
if task.revoked():
return
if self._does_debug:
self.logger.debug(
"Mediator: Running callback for task: %s[%s]" % (
task.task_name, task.task_id))
try:
self.callback(task)
except Exception, exc:
self.logger.error("Mediator callback raised exception %r",
exc, exc_info=True,
extra={"data": {"id": task.task_id,
"name": task.task_name,
"hostname": task.hostname}})
move = body # XXX compat
|
bsd-3-clause
|
berkmancenter/spectacle
|
node_modules/bbb/node_modules/grunt/node_modules/gzip-js/node_modules/deflate-js/test/deflate.py
|
177
|
2329
|
import os
from colorama import Fore
from helpers import deflate, inflate, run_cmd
outDirDefault = 'test-outs'
testDirDefault = 'test-files'
"""
Run a single test
@param tFile- required; the full path to the file to run
@param level- optional (default: all); the compression level [1-9]
@param delete- optional (default: True); whether to delete the gzipped files
@return True if all tests passed; False if at least one test failed
"""
def runTest(tFile, level=None, delete=True, outDir=outDirDefault):
passed = True
if level == None:
for x in range(1, 10):
if runTest(tFile, x, delete) == False:
passed = False
return passed
# make the test-outs directory
try:
os.mkdir(outDir)
except:
pass
out1 = os.path.join(outDir, '%(file)s.%(level)d.deflate' % {'file': os.path.basename(tFile), 'level' : level})
out2 = os.path.join(outDir, '%(file)s.%(level)d.out.deflate' % {'file': os.path.basename(tFile), 'level' : level})
outData = deflate(tFile, outfile=out1, level=level)
run_cmd('../bin/deflate.js --level %(level)d --file %(file)s --output %(output)s' % {'level' : level, 'file' : tFile, 'output' : out2})
result = run_cmd('diff %(file1)s %(file2)s' % {'file1' : out1, 'file2' : out2})
if result['returncode'] == 0:
status = Fore.GREEN + 'PASSED' + Fore.RESET
else:
passed = False
status = Fore.RED + 'FAILED' + Fore.RESET
print 'Level %(level)d: %(status)s' % {'level' : level, 'status' : status}
if delete == True:
os.remove(out1)
os.remove(out2)
return passed
"""
Runs all tests on the given level. This iterates throuth the testDir directory defined above.
@param level- The level to run on [1-9] (default: None, runs on all levels all)
@param delete- Whether to delete output files after the test is run
@return True if all levels passed, False if at least one failed
"""
def runAll(level=None, delete=True, testDir=testDirDefault, outDir=outDirDefault):
# make the test-outs directory
try:
os.mkdir(outDir)
except:
pass
passed = True
for tFile in os.listdir(testDir):
fullPath = os.path.join(testDir, tFile)
print Fore.YELLOW + tFile + Fore.RESET
if runTest(fullPath, level, delete) == False:
passed = False
print ''
# if we deletede all the files that were created, delete the directory
if delete == True:
os.rmdir(outDir)
return passed
|
gpl-2.0
|
cmbclh/vnpy1.7
|
archive/vn.strategy/strategydemo/demoEngine.py
|
46
|
6897
|
# encoding: UTF-8
"""
该文件中包含的是交易平台的中间层,
将API和事件引擎包装到一个主引擎类中,便于管理。
当客户想采用服务器-客户机模式,实现交易功能放在托管机房,
而图形控制功能在本地电脑时,该主引擎负责实现远程通讯。
"""
import sys
from datetime import date
from time import sleep
import shelve
from PyQt4 import QtCore
from demoApi import *
from eventEngine import EventEngine
########################################################################
class MainEngine:
"""主引擎,负责对API的调度"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.ee = EventEngine() # 创建事件驱动引擎
self.md = DemoMdApi(self.ee) # 创建API接口
self.td = DemoTdApi(self.ee)
self.ee.start() # 启动事件驱动引擎
# 循环查询持仓和账户相关
self.countGet = 0 # 查询延时计数
self.lastGet = 'Account' # 上次查询的性质
self.ee.register(EVENT_TDLOGIN, self.initGet) # 登录成功后开始初始化查询
# 合约储存相关
self.dictInstrument = {} # 字典(保存合约查询数据)
self.ee.register(EVENT_INSTRUMENT, self.insertInstrument)
#----------------------------------------------------------------------
def login(self, userid, password, brokerid, mdAddress, tdAddress):
"""登陆"""
self.md.login(mdAddress, userid, password, brokerid)
self.td.login(tdAddress, userid, password, brokerid)
#----------------------------------------------------------------------
def subscribe(self, instrumentid, exchangeid):
"""订阅合约"""
self.md.subscribe(instrumentid, exchangeid)
#----------------------------------------------------------------------
def getAccount(self):
"""查询账户"""
self.td.getAccount()
#----------------------------------------------------------------------
def getInvestor(self):
"""查询投资者"""
self.td.getInvestor()
#----------------------------------------------------------------------
def getPosition(self):
"""查询持仓"""
self.td.getPosition()
#----------------------------------------------------------------------
def getInstrument(self):
"""获取合约"""
event = Event(type_=EVENT_LOG)
log = u'查询合约信息'
event.dict_['log'] = log
self.ee.put(event)
self.td.getInstrument()
#----------------------------------------------------------------------
def sendOrder(self, instrumentid, exchangeid, price, pricetype, volume, direction, offset):
"""发单"""
ref = self.td.sendOrder(instrumentid, exchangeid, price, pricetype, volume, direction, offset)
return str(ref)
#----------------------------------------------------------------------
def cancelOrder(self, instrumentid, exchangeid, orderref, frontid, sessionid):
"""撤单"""
self.td.cancelOrder(instrumentid, exchangeid, orderref, frontid, sessionid)
#----------------------------------------------------------------------
def getAccountPosition(self, event):
"""循环查询账户和持仓"""
self.countGet = self.countGet + 1
# 每5秒发一次查询
if self.countGet > 5:
self.countGet = 0 # 清空计数
if self.lastGet == 'Account':
self.getPosition()
self.lastGet = 'Position'
else:
self.getAccount()
self.lastGet = 'Account'
#----------------------------------------------------------------------
def initGet(self, event):
"""在交易服务器登录成功后,开始初始化查询"""
# 打开设定文件setting.vn
f = shelve.open('setting.vn')
# 尝试读取设定字典,若该字典不存在,则发出查询请求
try:
d = f['instrument']
# 如果本地保存的合约数据是今日的,则载入,否则发出查询请求
today = date.today()
if d['date'] == today:
self.dictInstrument = d['dictInstrument']
event = Event(type_=EVENT_LOG)
log = u'合约信息读取完成'
event.dict_['log'] = log
self.ee.put(event)
self.getInvestor()
# 开始循环查询
self.ee.register(EVENT_TIMER, self.getAccountPosition)
else:
self.getInstrument()
except KeyError:
self.getInstrument()
f.close()
#----------------------------------------------------------------------
def insertInstrument(self, event):
"""插入合约对象"""
data = event.dict_['data']
last = event.dict_['last']
self.dictInstrument[data['InstrumentID']] = data
# 合约对象查询完成后,查询投资者信息并开始循环查询
if last:
# 将查询完成的合约信息保存到本地文件,今日登录可直接使用不再查询
self.saveInstrument()
event = Event(type_=EVENT_LOG)
log = u'合约信息查询完成'
event.dict_['log'] = log
self.ee.put(event)
self.getInvestor()
# 开始循环查询
self.ee.register(EVENT_TIMER, self.getAccountPosition)
#----------------------------------------------------------------------
def selectInstrument(self, instrumentid):
"""获取合约信息对象"""
try:
instrument = self.dictInstrument[instrumentid]
except KeyError:
instrument = None
return instrument
#----------------------------------------------------------------------
def exit(self):
"""退出"""
# 销毁API对象
self.td = None
self.md = None
# 停止事件驱动引擎
self.ee.stop()
#----------------------------------------------------------------------
def saveInstrument(self):
"""保存合约属性数据"""
f = shelve.open('setting.vn')
d = {}
d['dictInstrument'] = self.dictInstrument
d['date'] = date.today()
f['instrument'] = d
f.close()
|
mit
|
tntnatbry/tensorflow
|
tensorflow/python/kernel_tests/save_restore_ops_test.py
|
77
|
1477
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.io_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.platform import test
class ShardedFileOpsTest(test.TestCase):
def testShardedFileName(self):
with session.Session(
target="", config=config_pb2.ConfigProto(device_count={"CPU": 2})):
self.assertEqual(
gen_io_ops._sharded_filename("foo", 4, 100).eval(),
b"foo-00004-of-00100")
self.assertEqual(
gen_io_ops._sharded_filespec("foo", 100).eval(),
b"foo-?????-of-00100")
if __name__ == "__main__":
test.main()
|
apache-2.0
|
Karaage-Cluster/karaage-debian
|
karaage/legacy/people/south_migrations/0004_auto__add_field_person_is_systemuser.py
|
3
|
7580
|
# encoding: utf-8
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Person.is_systemuser'
db.add_column('person', 'is_systemuser', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Person.is_systemuser'
db.delete_column('person', 'is_systemuser')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'people.institute': {
'Meta': {'object_name': 'Institute', 'db_table': "'institute'"},
'active_delegate': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'active_delegate'", 'null': 'True', 'to': "orm['people.Person']"}),
'delegate': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'delegate'", 'null': 'True', 'to': "orm['people.Person']"}),
'gid': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sub_delegates': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sub_delegates'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['people.Person']"})
},
'people.person': {
'Meta': {'object_name': 'Person', 'db_table': "'person'"},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'approved_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_approver'", 'null': 'True', 'to': "orm['people.Person']"}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'date_approved': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_deleted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_deletor'", 'null': 'True', 'to': "orm['people.Person']"}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Institute']"}),
'is_systemuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_usage': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'supervisor': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['people']
|
gpl-3.0
|
azumimuo/family-xbmc-addon
|
plugin.video.exodus/resources/lib/sources/tinydl.py
|
8
|
5845
|
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['tinydl.com']
self.base_link = 'http://tinydl.com'
self.search_link = '/search/%s/feed/rss2/'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
hostDict = hostprDict + hostDict
items = []
for post in posts:
try:
t = client.parseDOM(post, 'title')[0]
c = client.parseDOM(post, 'content.+?')[0]
u = re.findall('>Single Link(.+?)(?:#ff0000|$)', c.replace('\n', ''))[0]
u = client.parseDOM(u, 'a', ret='href')
s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', c)
s = s[0] if s else '0'
items += [(t, i, s) for i in u]
except:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check: sources = check
return sources
except:
return sources
def resolve(self, url):
return url
|
gpl-2.0
|
130265/Galaxy-S4-Value-Edition-I9515L-Kernel
|
tools/perf/util/setup.py
|
4998
|
1330
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='[email protected]',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
gpl-2.0
|
GdZ/scriptfile
|
software/googleAppEngine/lib/django_1_3/django/contrib/admin/views/decorators.py
|
151
|
1381
|
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.utils.translation import ugettext as _
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.auth.views import login
from django.contrib.auth import REDIRECT_FIELD_NAME
def staff_member_required(view_func):
"""
Decorator for views that checks that the user is logged in and is a staff
member, displaying the login page if necessary.
"""
def _checklogin(request, *args, **kwargs):
if request.user.is_active and request.user.is_staff:
# The user is valid. Continue to the admin page.
return view_func(request, *args, **kwargs)
assert hasattr(request, 'session'), "The Django admin requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'."
defaults = {
'template_name': 'admin/login.html',
'authentication_form': AdminAuthenticationForm,
'extra_context': {
'title': _('Log in'),
'app_path': request.get_full_path(),
REDIRECT_FIELD_NAME: request.get_full_path(),
},
}
return login(request, **defaults)
return wraps(view_func)(_checklogin)
|
mit
|
pcabido/socorro
|
socorro/processor/mozilla_transform_rules.py
|
9
|
36051
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
import time
import ujson
import re
from sys import maxint
from gzip import open as gzip_open
from ujson import load as json_load
from urllib import unquote_plus
from configman import Namespace
from configman.converters import (
str_to_python_object,
)
from socorro.lib.ooid import dateFromOoid
from socorro.lib.transform_rules import Rule
from socorro.lib.datetimeutil import (
UTC,
datetimeFromISOdateString,
datestring_to_weekly_partition
)
from socorro.lib.context_tools import temp_file_context
from socorro.external.postgresql.dbapi2_util import (
execute_query_fetchall,
execute_no_results
)
#==============================================================================
class ProductRule(Rule):
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
processed_crash.product = raw_crash.get('ProductName', '')
processed_crash.version = raw_crash.get('Version', '')
processed_crash.productid = raw_crash.get('ProductID', '')
processed_crash.distributor = raw_crash.get('Distributor', None)
processed_crash.distributor_version = raw_crash.get(
'Distributor_version',
None
)
processed_crash.release_channel = raw_crash.get('ReleaseChannel', '')
# redundant, but I want to exactly match old processors.
processed_crash.ReleaseChannel = raw_crash.get('ReleaseChannel', '')
processed_crash.build = raw_crash.get('BuildID', '')
return True
#==============================================================================
class UserDataRule(Rule):
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
processed_crash.url = raw_crash.get('URL', None)
processed_crash.user_comments = raw_crash.get('Comments', None)
processed_crash.email = raw_crash.get('Email', None)
#processed_crash.user_id = raw_crash.get('UserID', '')
processed_crash.user_id = ''
return True
#==============================================================================
class EnvironmentRule(Rule):
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
processed_crash.app_notes = raw_crash.get('Notes', '')
return True
#==============================================================================
class PluginRule(Rule): # Hangs are here
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
try:
plugin_hang_as_int = int(raw_crash.get('PluginHang', False))
except ValueError:
plugin_hang_as_int = 0
if plugin_hang_as_int:
processed_crash.hangid = 'fake-' + raw_crash.uuid
else:
processed_crash.hangid = raw_crash.get('HangID', None)
# the processed_crash.hang_type has the following meaning:
# hang_type == -1 is a plugin hang
# hang_type == 1 is a browser hang
# hang_type == 0 is not a hang at all, but a normal crash
try:
hang_as_int = int(raw_crash.get('Hang', False))
except ValueError:
hang_as_int = 0
if hang_as_int:
processed_crash.hang_type = 1
elif plugin_hang_as_int:
processed_crash.hang_type = -1
elif processed_crash.hangid:
processed_crash.hang_type = -1
else:
processed_crash.hang_type = 0
processed_crash.process_type = raw_crash.get('ProcessType', None)
if not processed_crash.process_type:
return True
if processed_crash.process_type == 'plugin':
# Bug#543776 We actually will are relaxing the non-null policy...
# a null filename, name, and version is OK. We'll use empty strings
processed_crash.PluginFilename = (
raw_crash.get('PluginFilename', '')
)
processed_crash.PluginName = (
raw_crash.get('PluginName', '')
)
processed_crash.PluginVersion = (
raw_crash.get('PluginVersion', '')
)
return True
#==============================================================================
class AddonsRule(Rule):
required_config = Namespace()
required_config.add_option(
'collect_addon',
doc='boolean indictating if information about add-ons should be '
'collected',
default=True,
)
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
@staticmethod
def _addon_split_or_warn(addon_pair, processor_notes):
addon_splits = addon_pair.split(':', 1)
if len(addon_splits) == 1:
processor_notes.append(
'add-on "%s" is a bad name and/or version' %
addon_pair
)
addon_splits.append('')
return tuple(unquote_plus(x) for x in addon_splits)
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
processed_crash.addons_checked = None
try:
addons_checked_txt = raw_crash.EMCheckCompatibility.lower()
processed_crash.addons_checked = False
if addons_checked_txt == 'true':
processed_crash.addons_checked = True
except KeyError, e:
if 'EMCheckCompatibility' not in str(e):
raise
# it's okay to not have EMCheckCompatibility, other missing things
# are bad
if self.config.chatty:
self.config.logger.debug(
'AddonsRule: collect_addon: %s',
self.config.collect_addon
)
if self.config.collect_addon:
original_addon_str = raw_crash.get('Add-ons', '')
if not original_addon_str:
if self.config.chatty:
self.config.logger.debug(
'AddonsRule: no addons'
)
processed_crash.addons = []
else:
if self.config.chatty:
self.config.logger.debug(
'AddonsRule: trying to split addons'
)
processed_crash.addons = [
self._addon_split_or_warn(
x,
processor_meta.processor_notes
)
for x in original_addon_str.split(',')
]
if self.config.chatty:
self.config.logger.debug(
'AddonsRule: done: %s',
processed_crash.addons
)
return True
#==============================================================================
class DatesAndTimesRule(Rule):
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
@staticmethod
def _get_truncate_or_warn(
a_mapping,
key,
notes_list,
default=None,
max_length=10000
):
try:
return a_mapping[key][:max_length]
except (KeyError, AttributeError):
notes_list.append("WARNING: raw_crash missing %s" % key)
return default
except TypeError, x:
notes_list.append(
"WARNING: raw_crash[%s] contains unexpected value: %s; %s" %
(key, a_mapping[key], str(x))
)
return default
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
processor_notes = processor_meta.processor_notes
processed_crash.submitted_timestamp = raw_crash.get(
'submitted_timestamp',
dateFromOoid(raw_crash.uuid)
)
if isinstance(processed_crash.submitted_timestamp, basestring):
processed_crash.submitted_timestamp = datetimeFromISOdateString(
processed_crash.submitted_timestamp
)
processed_crash.date_processed = processed_crash.submitted_timestamp
# defaultCrashTime: must have crashed before date processed
submitted_timestamp_as_epoch = int(
time.mktime(processed_crash.submitted_timestamp.timetuple())
)
try:
timestampTime = int(
raw_crash.get('timestamp', submitted_timestamp_as_epoch)
) # the old name for crash time
except ValueError:
timestampTime = 0
processor_notes.append('non-integer value of "timestamp"')
try:
crash_time = int(
self._get_truncate_or_warn(
raw_crash,
'CrashTime',
processor_notes,
timestampTime,
10
)
)
except ValueError:
crash_time = 0
processor_notes.append(
'non-integer value of "CrashTime" (%s)' % raw_crash.CrashTime
)
processed_crash.crash_time = crash_time
if crash_time == submitted_timestamp_as_epoch:
processor_notes.append("client_crash_date is unknown")
# StartupTime: must have started up some time before crash
try:
startupTime = int(raw_crash.get('StartupTime', crash_time))
except ValueError:
startupTime = 0
processor_notes.append('non-integer value of "StartupTime"')
# InstallTime: must have installed some time before startup
try:
installTime = int(raw_crash.get('InstallTime', startupTime))
except ValueError:
installTime = 0
processor_notes.append('non-integer value of "InstallTime"')
processed_crash.client_crash_date = datetime.datetime.fromtimestamp(
crash_time,
UTC
)
processed_crash.install_age = crash_time - installTime
processed_crash.uptime = max(0, crash_time - startupTime)
try:
last_crash = int(raw_crash.SecondsSinceLastCrash)
except (KeyError, TypeError, ValueError):
last_crash = None
processor_notes.append(
'non-integer value of "SecondsSinceLastCrash"'
)
if last_crash > maxint:
last_crash = None
processor_notes.append(
'"SecondsSinceLastCrash" larger than MAXINT - set to NULL'
)
processed_crash.last_crash = last_crash
return True
#==============================================================================
class JavaProcessRule(Rule):
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
processed_crash.java_stack_trace = raw_crash.setdefault(
'JavaStackTrace',
None
)
return True
#==============================================================================
class OutOfMemoryBinaryRule(Rule):
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
def _predicate(self, raw_crash, raw_dumps, processed_crash, proc_meta):
return 'memory_report' in raw_dumps
#--------------------------------------------------------------------------
@staticmethod
def _extract_memory_info(dump_pathname, processor_notes):
"""Extract and return the JSON data from the .json.gz memory report.
file"""
try:
fd = gzip_open(dump_pathname, "rb")
except IOError, x:
error_message = "error in gzip for %s: %r" % (dump_pathname, x)
processor_notes.append(error_message)
return {"ERROR": error_message}
try:
memory_info = json_load(fd)
except ValueError, x:
error_message = "error in json for %s: %r" % (dump_pathname, x)
processor_notes.append(error_message)
return {"ERROR": error_message}
finally:
fd.close()
return memory_info
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
pathname = raw_dumps['memory_report']
with temp_file_context(pathname):
processed_crash.memory_report = self._extract_memory_info(
dump_pathname=pathname,
processor_notes=processor_meta.processor_notes
)
return True
#--------------------------------------------------------------------------
def setup_product_id_map(config, local_config, args_unused):
database_connection = local_config.database_class(local_config)
transaction = local_config.transaction_executor_class(
local_config,
database_connection
)
sql = (
"SELECT product_name, productid, rewrite FROM "
"product_productid_map WHERE rewrite IS TRUE"
)
product_mappings = transaction(
execute_query_fetchall,
sql
)
product_id_map = {}
for product_name, productid, rewrite in product_mappings:
product_id_map[productid] = {
'product_name': product_name,
'rewrite': rewrite
}
return product_id_map
#==============================================================================
class ProductRewrite(Rule):
required_config = Namespace()
required_config.add_option(
'database_class',
doc="the class of the database",
default=
'socorro.external.postgresql.connection_context.ConnectionContext',
from_string_converter=str_to_python_object,
reference_value_from='resource.postgresql',
)
required_config.add_option(
'transaction_executor_class',
default="socorro.database.transaction_executor."
"TransactionExecutorWithInfiniteBackoff",
doc='a class that will manage transactions',
from_string_converter=str_to_python_object,
reference_value_from='resource.postgresql',
)
required_config.add_aggregation(
'product_id_map',
setup_product_id_map
)
#--------------------------------------------------------------------------
def __init__(self, config):
super(ProductRewrite, self).__init__(config)
self.product_id_map = setup_product_id_map(
config,
config,
None
)
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
def _predicate(self, raw_crash, raw_dumps, processed_crash, proc_meta):
try:
return raw_crash['ProductID'] in self.product_id_map
except KeyError:
# no ProductID
return False
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
try:
product_id = raw_crash['ProductID']
except KeyError:
self.config.logger.debug('ProductID not in json_doc')
return False
old_product_name = raw_crash['ProductName']
new_product_name = (
self.product_id_map[product_id]['product_name']
)
raw_crash['ProductName'] = new_product_name
self.config.logger.debug(
'product name changed from %s to %s based '
'on productID %s',
old_product_name,
new_product_name,
product_id
)
return True
#==============================================================================
class ESRVersionRewrite(Rule):
#--------------------------------------------------------------------------
def version(self):
return '2.0'
#--------------------------------------------------------------------------
def _predicate(self, raw_crash, raw_dumps, processed_crash, proc_meta):
return raw_crash.get('ReleaseChannel', '') == 'esr'
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
try:
raw_crash['Version'] += 'esr'
except KeyError:
processor_meta.processor_notes.append(
'"Version" missing from esr release raw_crash'
)
#==============================================================================
class PluginContentURL(Rule):
#--------------------------------------------------------------------------
def version(self):
return '2.0'
#--------------------------------------------------------------------------
def _predicate(self, raw_crash, raw_dumps, processed_crash, proc_meta):
try:
return bool(raw_crash['PluginContentURL'])
except KeyError:
return False
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
raw_crash['URL'] = raw_crash['PluginContentURL']
return True
#==============================================================================
class PluginUserComment(Rule):
#--------------------------------------------------------------------------
def version(self):
return '2.0'
#--------------------------------------------------------------------------
def _predicate(self, raw_crash, raw_dumps, processed_crash, proc_meta):
try:
return bool(raw_crash['PluginUserComment'])
except KeyError:
return False
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
raw_crash['Comments'] = raw_crash['PluginUserComment']
return True
#==============================================================================
class WebAppRuntime(Rule):
#--------------------------------------------------------------------------
def version(self):
return '2.0'
#--------------------------------------------------------------------------
def _predicate(self, raw_crash, raw_dumps, processed_crash, proc_meta):
return raw_crash['ProductName'].startswith('Webapp Runtime')
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
raw_crash['ProductName'] = raw_crash['ProductName'].replace(' ', '')
return True
#==============================================================================
class ExploitablityRule(Rule):
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
try:
processed_crash.exploitability = (
processed_crash['json_dump']
['sensitive']['exploitability']
)
except KeyError:
processed_crash.exploitability = 'unknown'
processor_meta.processor_notes.append(
"exploitability information missing"
)
return True
#==============================================================================
class FlashVersionRule(Rule):
required_config = Namespace()
required_config.add_option(
'known_flash_identifiers',
doc='A subset of the known "debug identifiers" for flash versions, '
'associated to the version',
default={
'7224164B5918E29AF52365AF3EAF7A500': '10.1.51.66',
'C6CDEFCDB58EFE5C6ECEF0C463C979F80': '10.1.51.66',
'4EDBBD7016E8871A461CCABB7F1B16120': '10.1',
'D1AAAB5D417861E6A5B835B01D3039550': '10.0.45.2',
'EBD27FDBA9D9B3880550B2446902EC4A0': '10.0.45.2',
'266780DB53C4AAC830AFF69306C5C0300': '10.0.42.34',
'C4D637F2C8494896FBD4B3EF0319EBAC0': '10.0.42.34',
'B19EE2363941C9582E040B99BB5E237A0': '10.0.32.18',
'025105C956638D665850591768FB743D0': '10.0.32.18',
'986682965B43DFA62E0A0DFFD7B7417F0': '10.0.23',
'937DDCC422411E58EF6AD13710B0EF190': '10.0.23',
'860692A215F054B7B9474B410ABEB5300': '10.0.22.87',
'77CB5AC61C456B965D0B41361B3F6CEA0': '10.0.22.87',
'38AEB67F6A0B43C6A341D7936603E84A0': '10.0.12.36',
'776944FD51654CA2B59AB26A33D8F9B30': '10.0.12.36',
'974873A0A6AD482F8F17A7C55F0A33390': '9.0.262.0',
'B482D3DFD57C23B5754966F42D4CBCB60': '9.0.262.0',
'0B03252A5C303973E320CAA6127441F80': '9.0.260.0',
'AE71D92D2812430FA05238C52F7E20310': '9.0.246.0',
'6761F4FA49B5F55833D66CAC0BBF8CB80': '9.0.246.0',
'27CC04C9588E482A948FB5A87E22687B0': '9.0.159.0',
'1C8715E734B31A2EACE3B0CFC1CF21EB0': '9.0.159.0',
'F43004FFC4944F26AF228334F2CDA80B0': '9.0.151.0',
'890664D4EF567481ACFD2A21E9D2A2420': '9.0.151.0',
'8355DCF076564B6784C517FD0ECCB2F20': '9.0.124.0',
'51C00B72112812428EFA8F4A37F683A80': '9.0.124.0',
'9FA57B6DC7FF4CFE9A518442325E91CB0': '9.0.115.0',
'03D99C42D7475B46D77E64D4D5386D6D0': '9.0.115.0',
'0CFAF1611A3C4AA382D26424D609F00B0': '9.0.47.0',
'0F3262B5501A34B963E5DF3F0386C9910': '9.0.47.0',
'C5B5651B46B7612E118339D19A6E66360': '9.0.45.0',
'BF6B3B51ACB255B38FCD8AA5AEB9F1030': '9.0.28.0',
'83CF4DC03621B778E931FC713889E8F10': '9.0.16.0',
},
from_string_converter=ujson.loads
)
required_config.add_option(
'flash_re',
doc='a regular expression to match Flash file names',
default=(
r'NPSWF32_?(.*)\.dll|'
'FlashPlayerPlugin_?(.*)\.exe|'
'libflashplayer(.*)\.(.*)|'
'Flash ?Player-?(.*)'
),
from_string_converter=re.compile
)
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
def _get_flash_version(self, **kwargs):
"""If (we recognize this module as Flash and figure out a version):
Returns version; else (None or '')"""
filename = kwargs.get('filename', None)
version = kwargs.get('version', None)
debug_id = kwargs.get('debug_id', None)
m = self.config.flash_re.match(filename)
if m:
if version:
return version
# we didn't get a version passed into us
# try do deduce it
groups = m.groups()
if groups[0]:
return groups[0].replace('_', '.')
if groups[1]:
return groups[1].replace('_', '.')
if groups[2]:
return groups[2]
if groups[4]:
return groups[4]
return self.config.known_flash_identifiers.get(
debug_id,
None
)
return None
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
processed_crash.flash_version = ''
flash_version = None
for index, a_module in enumerate(
processed_crash['json_dump']['modules']
):
flash_version = self._get_flash_version(**a_module)
if flash_version:
break
if flash_version:
processed_crash.flash_version = flash_version
else:
processed_crash.flash_version = '[blank]'
return True
#==============================================================================
class Winsock_LSPRule(Rule):
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
processed_crash.Winsock_LSP = raw_crash.get('Winsock_LSP', None)
#==============================================================================
class TopMostFilesRule(Rule):
"""Origninating from Bug 519703, the topmost_filenames was specified as
singular, there would be only one. The original programmer, in the
source code stated "Lets build in some flex" and allowed the field to
have more than one in a list. However, in all the years that this existed
it was never expanded to use more than just one. Meanwhile, the code
ambiguously would sometimes give this as as single value and other times
return it as a list of one item.
This rule does not try to reproduce that imbiguity and avoids the list
entirely, just giving one single value. The fact that the destination
varible in the processed_crash is plural rather than singular is
unfortunate."""
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
processed_crash.topmost_filenames = None
try:
crashing_thread = (
processed_crash.json_dump['crash_info']['crashing_thread']
)
stack_frames = (
processed_crash.json_dump['threads'][crashing_thread]['frames']
)
except KeyError, x:
# guess we don't have frames or crashing_thread or json_dump
# we have to give up
processor_meta.processor_notes.append(
"no 'topmost_file' name because '%s' is missing" % x
)
return True
for a_frame in stack_frames:
source_filename = a_frame.get('file', None)
if source_filename:
processed_crash.topmost_filenames = source_filename
return True
return True
#==============================================================================
class MissingSymbolsRule(Rule):
required_config = Namespace()
required_config.add_option(
'database_class',
doc="the class of the database",
default=
'socorro.external.postgresql.connection_context.ConnectionContext',
from_string_converter=str_to_python_object,
reference_value_from='resource.postgresql',
)
required_config.add_option(
'transaction_executor_class',
default="socorro.database.transaction_executor."
"TransactionExecutorWithInfiniteBackoff",
doc='a class that will manage transactions',
from_string_converter=str_to_python_object,
reference_value_from='resource.postgresql',
)
#--------------------------------------------------------------------------
def __init__(self, config):
super(MissingSymbolsRule, self).__init__(config)
self.database = self.config.database_class(config)
self.transaction = self.config.transaction_executor_class(
config,
self.database,
)
self.sql = (
"INSERT INTO missing_symbols_%s (date_processed, debug_file, debug_id)"
" VALUES (%%s, %%s, %%s)"
)
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
try:
date = processed_crash['date_processed']
# update partition information based on date processed
sql = self.sql % datestring_to_weekly_partition(date)
for module in processed_crash['json_dump']['modules']:
try:
if module['missing_symbols']:
self.transaction(
execute_no_results,
sql,
(
date,
module['debug_file'],
module['debug_id']
)
)
except self.database.ProgrammingError as e:
processor_meta.processor_notes.append(
"WARNING: missing symbols rule failed for"
" %s" % raw_crash.uuid
)
except KeyError:
pass
except KeyError:
return False
return True
#==============================================================================
class BetaVersionRule(Rule):
required_config = Namespace()
required_config.add_option(
'database_class',
doc="the class of the database",
default=
'socorro.external.postgresql.connection_context.ConnectionContext',
from_string_converter=str_to_python_object,
reference_value_from='resource.postgresql',
)
required_config.add_option(
'transaction_executor_class',
default="socorro.database.transaction_executor."
"TransactionExecutorWithInfiniteBackoff",
doc='a class that will manage transactions',
from_string_converter=str_to_python_object,
reference_value_from='resource.postgresql',
)
#--------------------------------------------------------------------------
def __init__(self, config):
super(BetaVersionRule, self).__init__(config)
database = config.database_class(config)
self.transaction = config.transaction_executor_class(
config,
database,
)
self._versions_data_cache = {}
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
def _get_version_data(self, product, version, release_channel, build_id):
key = '%s:%s:%s:%s' % (product, version, release_channel, build_id)
if key in self._versions_data_cache:
return self._versions_data_cache[key]
sql = """
SELECT
pv.version_string
FROM product_versions pv
LEFT JOIN product_version_builds pvb ON
(pv.product_version_id = pvb.product_version_id)
WHERE pv.product_name = %(product)s
AND pv.release_version = %(version)s
AND pv.build_type ILIKE %(release_channel)s
AND pvb.build_id = %(build_id)s
"""
params = {
'product': product,
'version': version,
'release_channel': release_channel,
'build_id': build_id,
}
results = self.transaction(
execute_query_fetchall,
sql,
params
)
for real_version, in results:
self._versions_data_cache[key] = real_version
return self._versions_data_cache.get(key)
#--------------------------------------------------------------------------
def _predicate(self, raw_crash, raw_dumps, processed_crash, proc_meta):
try:
# We apply this Rule only if the release channel is beta, because
# beta versions are the only ones sending an "incorrect" version
# number in their data.
return processed_crash['release_channel'].lower() == 'beta'
except KeyError:
# No release_channel.
return False
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
try:
# Sanitize the build id to avoid errors during the SQL query.
try:
build_id = int(processed_crash['build'])
except ValueError:
build_id = None
real_version = self._get_version_data(
processed_crash['product'],
processed_crash['version'],
processed_crash['release_channel'],
build_id,
)
if real_version:
processed_crash['version'] = real_version
else:
# This is a beta version but we do not have data about it. It
# could be because we don't have it yet (if the cron jobs are
# running late for example), so we mark this crash. This way,
# we can reprocess it later to give it the correct version.
processed_crash['version'] += 'b0'
processor_meta.processor_notes.append(
'release channel is beta but no version data was found '
'- added "b0" suffix to version number'
)
except KeyError:
return False
return True
#==============================================================================
class FennecBetaError20150430(Rule):
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
def _predicate(self, raw_crash, raw_dumps, processed_crash, proc_meta):
return raw_crash['ProductName'].startswith('Fennec') and \
raw_crash['BuildID'] == '20150427090529' and \
raw_crash['ReleaseChannel'] == 'release'
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
raw_crash['ReleaseChannel'] = 'beta'
return True
|
mpl-2.0
|
Big-B702/python-for-android
|
python3-alpha/python3-src/Lib/test/test_reprlib.py
|
56
|
12336
|
"""
Test cases for the repr module
Nick Mathewson
"""
import sys
import os
import shutil
import unittest
from test.support import run_unittest
from reprlib import repr as r # Don't shadow builtin repr
from reprlib import Repr
from reprlib import recursive_repr
def nestedTuple(nesting):
t = ()
for i in range(nesting):
t = (t,)
return t
class ReprTests(unittest.TestCase):
def test_string(self):
eq = self.assertEqual
eq(r("abc"), "'abc'")
eq(r("abcdefghijklmnop"),"'abcdefghijklmnop'")
s = "a"*30+"b"*30
expected = repr(s)[:13] + "..." + repr(s)[-14:]
eq(r(s), expected)
eq(r("\"'"), repr("\"'"))
s = "\""*30+"'"*100
expected = repr(s)[:13] + "..." + repr(s)[-14:]
eq(r(s), expected)
def test_tuple(self):
eq = self.assertEqual
eq(r((1,)), "(1,)")
t3 = (1, 2, 3)
eq(r(t3), "(1, 2, 3)")
r2 = Repr()
r2.maxtuple = 2
expected = repr(t3)[:-2] + "...)"
eq(r2.repr(t3), expected)
def test_container(self):
from array import array
from collections import deque
eq = self.assertEqual
# Tuples give up after 6 elements
eq(r(()), "()")
eq(r((1,)), "(1,)")
eq(r((1, 2, 3)), "(1, 2, 3)")
eq(r((1, 2, 3, 4, 5, 6)), "(1, 2, 3, 4, 5, 6)")
eq(r((1, 2, 3, 4, 5, 6, 7)), "(1, 2, 3, 4, 5, 6, ...)")
# Lists give up after 6 as well
eq(r([]), "[]")
eq(r([1]), "[1]")
eq(r([1, 2, 3]), "[1, 2, 3]")
eq(r([1, 2, 3, 4, 5, 6]), "[1, 2, 3, 4, 5, 6]")
eq(r([1, 2, 3, 4, 5, 6, 7]), "[1, 2, 3, 4, 5, 6, ...]")
# Sets give up after 6 as well
eq(r(set([])), "set([])")
eq(r(set([1])), "set([1])")
eq(r(set([1, 2, 3])), "set([1, 2, 3])")
eq(r(set([1, 2, 3, 4, 5, 6])), "set([1, 2, 3, 4, 5, 6])")
eq(r(set([1, 2, 3, 4, 5, 6, 7])), "set([1, 2, 3, 4, 5, 6, ...])")
# Frozensets give up after 6 as well
eq(r(frozenset([])), "frozenset([])")
eq(r(frozenset([1])), "frozenset([1])")
eq(r(frozenset([1, 2, 3])), "frozenset([1, 2, 3])")
eq(r(frozenset([1, 2, 3, 4, 5, 6])), "frozenset([1, 2, 3, 4, 5, 6])")
eq(r(frozenset([1, 2, 3, 4, 5, 6, 7])), "frozenset([1, 2, 3, 4, 5, 6, ...])")
# collections.deque after 6
eq(r(deque([1, 2, 3, 4, 5, 6, 7])), "deque([1, 2, 3, 4, 5, 6, ...])")
# Dictionaries give up after 4.
eq(r({}), "{}")
d = {'alice': 1, 'bob': 2, 'charles': 3, 'dave': 4}
eq(r(d), "{'alice': 1, 'bob': 2, 'charles': 3, 'dave': 4}")
d['arthur'] = 1
eq(r(d), "{'alice': 1, 'arthur': 1, 'bob': 2, 'charles': 3, ...}")
# array.array after 5.
eq(r(array('i')), "array('i', [])")
eq(r(array('i', [1])), "array('i', [1])")
eq(r(array('i', [1, 2])), "array('i', [1, 2])")
eq(r(array('i', [1, 2, 3])), "array('i', [1, 2, 3])")
eq(r(array('i', [1, 2, 3, 4])), "array('i', [1, 2, 3, 4])")
eq(r(array('i', [1, 2, 3, 4, 5])), "array('i', [1, 2, 3, 4, 5])")
eq(r(array('i', [1, 2, 3, 4, 5, 6])),
"array('i', [1, 2, 3, 4, 5, ...])")
def test_numbers(self):
eq = self.assertEqual
eq(r(123), repr(123))
eq(r(123), repr(123))
eq(r(1.0/3), repr(1.0/3))
n = 10**100
expected = repr(n)[:18] + "..." + repr(n)[-19:]
eq(r(n), expected)
def test_instance(self):
eq = self.assertEqual
i1 = ClassWithRepr("a")
eq(r(i1), repr(i1))
i2 = ClassWithRepr("x"*1000)
expected = repr(i2)[:13] + "..." + repr(i2)[-14:]
eq(r(i2), expected)
i3 = ClassWithFailingRepr()
eq(r(i3), ("<ClassWithFailingRepr instance at %x>"%id(i3)))
s = r(ClassWithFailingRepr)
self.assertTrue(s.startswith("<class "))
self.assertTrue(s.endswith(">"))
self.assertIn(s.find("..."), [12, 13])
def test_lambda(self):
self.assertTrue(repr(lambda x: x).startswith(
"<function <lambda"))
# XXX anonymous functions? see func_repr
def test_builtin_function(self):
eq = self.assertEqual
# Functions
eq(repr(hash), '<built-in function hash>')
# Methods
self.assertTrue(repr(''.split).startswith(
'<built-in method split of str object at 0x'))
def test_range(self):
eq = self.assertEqual
eq(repr(range(1)), 'range(0, 1)')
eq(repr(range(1, 2)), 'range(1, 2)')
eq(repr(range(1, 4, 3)), 'range(1, 4, 3)')
def test_nesting(self):
eq = self.assertEqual
# everything is meant to give up after 6 levels.
eq(r([[[[[[[]]]]]]]), "[[[[[[[]]]]]]]")
eq(r([[[[[[[[]]]]]]]]), "[[[[[[[...]]]]]]]")
eq(r(nestedTuple(6)), "(((((((),),),),),),)")
eq(r(nestedTuple(7)), "(((((((...),),),),),),)")
eq(r({ nestedTuple(5) : nestedTuple(5) }),
"{((((((),),),),),): ((((((),),),),),)}")
eq(r({ nestedTuple(6) : nestedTuple(6) }),
"{((((((...),),),),),): ((((((...),),),),),)}")
eq(r([[[[[[{}]]]]]]), "[[[[[[{}]]]]]]")
eq(r([[[[[[[{}]]]]]]]), "[[[[[[[...]]]]]]]")
def test_cell(self):
# XXX Hmm? How to get at a cell object?
pass
def test_descriptors(self):
eq = self.assertEqual
# method descriptors
eq(repr(dict.items), "<method 'items' of 'dict' objects>")
# XXX member descriptors
# XXX attribute descriptors
# XXX slot descriptors
# static and class methods
class C:
def foo(cls): pass
x = staticmethod(C.foo)
self.assertTrue(repr(x).startswith('<staticmethod object at 0x'))
x = classmethod(C.foo)
self.assertTrue(repr(x).startswith('<classmethod object at 0x'))
def test_unsortable(self):
# Repr.repr() used to call sorted() on sets, frozensets and dicts
# without taking into account that not all objects are comparable
x = set([1j, 2j, 3j])
y = frozenset(x)
z = {1j: 1, 2j: 2}
r(x)
r(y)
r(z)
def touch(path, text=''):
fp = open(path, 'w')
fp.write(text)
fp.close()
class LongReprTest(unittest.TestCase):
def setUp(self):
longname = 'areallylongpackageandmodulenametotestreprtruncation'
self.pkgname = os.path.join(longname)
self.subpkgname = os.path.join(longname, longname)
# Make the package and subpackage
shutil.rmtree(self.pkgname, ignore_errors=True)
os.mkdir(self.pkgname)
touch(os.path.join(self.pkgname, '__init__.py'))
shutil.rmtree(self.subpkgname, ignore_errors=True)
os.mkdir(self.subpkgname)
touch(os.path.join(self.subpkgname, '__init__.py'))
# Remember where we are
self.here = os.getcwd()
sys.path.insert(0, self.here)
def tearDown(self):
actions = []
for dirpath, dirnames, filenames in os.walk(self.pkgname):
for name in dirnames + filenames:
actions.append(os.path.join(dirpath, name))
actions.append(self.pkgname)
actions.sort()
actions.reverse()
for p in actions:
if os.path.isdir(p):
os.rmdir(p)
else:
os.remove(p)
del sys.path[0]
def test_module(self):
eq = self.assertEqual
touch(os.path.join(self.subpkgname, self.pkgname + '.py'))
from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import areallylongpackageandmodulenametotestreprtruncation
eq(repr(areallylongpackageandmodulenametotestreprtruncation),
"<module '%s' from '%s'>" % (areallylongpackageandmodulenametotestreprtruncation.__name__, areallylongpackageandmodulenametotestreprtruncation.__file__))
eq(repr(sys), "<module 'sys' (built-in)>")
def test_type(self):
eq = self.assertEqual
touch(os.path.join(self.subpkgname, 'foo.py'), '''\
class foo(object):
pass
''')
from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import foo
eq(repr(foo.foo),
"<class '%s.foo'>" % foo.__name__)
def test_object(self):
# XXX Test the repr of a type with a really long tp_name but with no
# tp_repr. WIBNI we had ::Inline? :)
pass
def test_class(self):
touch(os.path.join(self.subpkgname, 'bar.py'), '''\
class bar:
pass
''')
from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import bar
# Module name may be prefixed with "test.", depending on how run.
self.assertEqual(repr(bar.bar), "<class '%s.bar'>" % bar.__name__)
def test_instance(self):
touch(os.path.join(self.subpkgname, 'baz.py'), '''\
class baz:
pass
''')
from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import baz
ibaz = baz.baz()
self.assertTrue(repr(ibaz).startswith(
"<%s.baz object at 0x" % baz.__name__))
def test_method(self):
eq = self.assertEqual
touch(os.path.join(self.subpkgname, 'qux.py'), '''\
class aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:
def amethod(self): pass
''')
from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import qux
# Unbound methods first
self.assertTrue(repr(qux.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.amethod).startswith(
'<function amethod'))
# Bound method next
iqux = qux.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa()
self.assertTrue(repr(iqux.amethod).startswith(
'<bound method aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.amethod of <%s.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa object at 0x' \
% (qux.__name__,) ))
def test_builtin_function(self):
# XXX test built-in functions and methods with really long names
pass
class ClassWithRepr:
def __init__(self, s):
self.s = s
def __repr__(self):
return "ClassWithRepr(%r)" % self.s
class ClassWithFailingRepr:
def __repr__(self):
raise Exception("This should be caught by Repr.repr_instance")
class MyContainer:
'Helper class for TestRecursiveRepr'
def __init__(self, values):
self.values = list(values)
def append(self, value):
self.values.append(value)
@recursive_repr()
def __repr__(self):
return '<' + ', '.join(map(str, self.values)) + '>'
class MyContainer2(MyContainer):
@recursive_repr('+++')
def __repr__(self):
return '<' + ', '.join(map(str, self.values)) + '>'
class TestRecursiveRepr(unittest.TestCase):
def test_recursive_repr(self):
m = MyContainer(list('abcde'))
m.append(m)
m.append('x')
m.append(m)
self.assertEqual(repr(m), '<a, b, c, d, e, ..., x, ...>')
m = MyContainer2(list('abcde'))
m.append(m)
m.append('x')
m.append(m)
self.assertEqual(repr(m), '<a, b, c, d, e, +++, x, +++>')
def test_main():
run_unittest(ReprTests)
run_unittest(LongReprTest)
run_unittest(TestRecursiveRepr)
if __name__ == "__main__":
test_main()
|
apache-2.0
|
EDUlib/edx-platform
|
lms/djangoapps/discussion/rest_api/tests/test_forms.py
|
5
|
7043
|
"""
Tests for Discussion API forms
"""
import itertools
from unittest import TestCase
import ddt
from django.http import QueryDict
from opaque_keys.edx.locator import CourseLocator
from six.moves.urllib.parse import urlencode
from lms.djangoapps.discussion.rest_api.forms import CommentListGetForm, ThreadListGetForm
from openedx.core.djangoapps.util.test_forms import FormTestMixin
class PaginationTestMixin:
"""A mixin for testing forms with pagination fields"""
def test_missing_page(self):
self.form_data.pop("page")
self.assert_field_value("page", 1)
def test_invalid_page(self):
self.form_data["page"] = "0"
self.assert_error("page", "Ensure this value is greater than or equal to 1.")
def test_missing_page_size(self):
self.form_data.pop("page_size")
self.assert_field_value("page_size", 10)
def test_zero_page_size(self):
self.form_data["page_size"] = "0"
self.assert_error("page_size", "Ensure this value is greater than or equal to 1.")
def test_excessive_page_size(self):
self.form_data["page_size"] = "101"
self.assert_field_value("page_size", 100)
@ddt.ddt
class ThreadListGetFormTest(FormTestMixin, PaginationTestMixin, TestCase):
"""Tests for ThreadListGetForm"""
FORM_CLASS = ThreadListGetForm
def setUp(self):
super().setUp()
self.form_data = QueryDict(
urlencode(
{
"course_id": "Foo/Bar/Baz",
"page": "2",
"page_size": "13",
}
),
mutable=True
)
def test_basic(self):
form = self.get_form(expected_valid=True)
assert form.cleaned_data == {
'course_id': CourseLocator.from_string('Foo/Bar/Baz'),
'page': 2,
'page_size': 13,
'topic_id': set(),
'text_search': '',
'following': None,
'view': '',
'order_by': 'last_activity_at',
'order_direction': 'desc',
'requested_fields': set()
}
def test_topic_id(self):
self.form_data.setlist("topic_id", ["example topic_id", "example 2nd topic_id"])
form = self.get_form(expected_valid=True)
assert form.cleaned_data['topic_id'] == {'example topic_id', 'example 2nd topic_id'}
def test_text_search(self):
self.form_data["text_search"] = "test search string"
form = self.get_form(expected_valid=True)
assert form.cleaned_data['text_search'] == 'test search string'
def test_missing_course_id(self):
self.form_data.pop("course_id")
self.assert_error("course_id", "This field is required.")
def test_invalid_course_id(self):
self.form_data["course_id"] = "invalid course id"
self.assert_error("course_id", "'invalid course id' is not a valid course id")
def test_empty_topic_id(self):
self.form_data.setlist("topic_id", ["", "not empty"])
self.assert_error("topic_id", "This field cannot be empty.")
@ddt.data("True", "true", 1, True)
def test_following_true(self, value):
self.form_data["following"] = value
self.assert_field_value("following", True)
@ddt.data("False", "false", 0, False)
def test_following_false(self, value):
self.form_data["following"] = value
self.assert_error("following", "The value of the 'following' parameter must be true.")
def test_invalid_following(self):
self.form_data["following"] = "invalid-boolean"
self.assert_error("following", "Invalid Boolean Value.")
@ddt.data(*itertools.combinations(["topic_id", "text_search", "following"], 2))
def test_mutually_exclusive(self, params):
self.form_data.update({param: "True" for param in params})
self.assert_error(
"__all__",
"The following query parameters are mutually exclusive: topic_id, text_search, following"
)
def test_invalid_view_choice(self):
self.form_data["view"] = "not_a_valid_choice"
self.assert_error("view", "Select a valid choice. not_a_valid_choice is not one of the available choices.")
def test_invalid_sort_by_choice(self):
self.form_data["order_by"] = "not_a_valid_choice"
self.assert_error(
"order_by",
"Select a valid choice. not_a_valid_choice is not one of the available choices."
)
def test_invalid_sort_direction_choice(self):
self.form_data["order_direction"] = "not_a_valid_choice"
self.assert_error(
"order_direction",
"Select a valid choice. not_a_valid_choice is not one of the available choices."
)
@ddt.data(
("view", "unread"),
("view", "unanswered"),
("order_by", "last_activity_at"),
("order_by", "comment_count"),
("order_by", "vote_count"),
("order_direction", "desc"),
)
@ddt.unpack
def test_valid_choice_fields(self, field, value):
self.form_data[field] = value
self.assert_field_value(field, value)
def test_requested_fields(self):
self.form_data["requested_fields"] = "profile_image"
form = self.get_form(expected_valid=True)
assert form.cleaned_data['requested_fields'] == {'profile_image'}
@ddt.ddt
class CommentListGetFormTest(FormTestMixin, PaginationTestMixin, TestCase):
"""Tests for CommentListGetForm"""
FORM_CLASS = CommentListGetForm
def setUp(self):
super().setUp()
self.form_data = {
"thread_id": "deadbeef",
"endorsed": "False",
"page": "2",
"page_size": "13",
}
def test_basic(self):
form = self.get_form(expected_valid=True)
assert form.cleaned_data == {
'thread_id': 'deadbeef',
'endorsed': False,
'page': 2,
'page_size': 13,
'requested_fields': set()
}
def test_missing_thread_id(self):
self.form_data.pop("thread_id")
self.assert_error("thread_id", "This field is required.")
def test_missing_endorsed(self):
self.form_data.pop("endorsed")
self.assert_field_value("endorsed", None)
@ddt.data("True", "true", True, 1)
def test_endorsed_true(self, value):
self.form_data["endorsed"] = value
self.assert_field_value("endorsed", True)
@ddt.data("False", "false", False, 0)
def test_endorsed_false(self, value):
self.form_data["endorsed"] = value
self.assert_field_value("endorsed", False)
def test_invalid_endorsed(self):
self.form_data["endorsed"] = "invalid-boolean"
self.assert_error("endorsed", "Invalid Boolean Value.")
def test_requested_fields(self):
self.form_data["requested_fields"] = {"profile_image"}
form = self.get_form(expected_valid=True)
assert form.cleaned_data['requested_fields'] == {'profile_image'}
|
agpl-3.0
|
alsrgv/tensorflow
|
tensorflow/contrib/recurrent/python/recurrent_api.py
|
39
|
1204
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent computations library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.recurrent.python.ops.functional_rnn import bidirectional_functional_rnn
from tensorflow.contrib.recurrent.python.ops.functional_rnn import functional_rnn
from tensorflow.contrib.recurrent.python.ops.recurrent import Recurrent
# pylint: enable=unused-import
del absolute_import
del division
del print_function
|
apache-2.0
|
dmlc/mxnet
|
ci/docker/qemu/vmcontrol.py
|
6
|
12402
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
"""Utilities to control a guest VM, used for virtual testing with QEMU"""
__author__ = 'Pedro Larroy'
__version__ = '0.1'
import os
import sys
import subprocess
import argparse
import logging
from subprocess import call, check_call, Popen, DEVNULL, PIPE
import time
import sys
import multiprocessing
import shlex
###################################################
#
# Virtual testing with QEMU
#
# We start QEMU instances that have a local port in the host redirected to the ssh port.
#
# The VMs are provisioned after boot, tests are run and then they are stopped
#
QEMU_SSH_PORT=2222
QEMU_RAM=4096
QEMU_RUN="""
qemu-system-arm -M virt -m {ram} \
-kernel vmlinuz \
-initrd initrd.img \
-append 'root=/dev/vda1' \
-drive if=none,file=vda.qcow2,format=qcow2,id=hd \
-device virtio-blk-device,drive=hd \
-netdev user,id=mynet,hostfwd=tcp::{ssh_port}-:22 \
-device virtio-net-device,netdev=mynet \
-display none -nographic
"""
QEMU_RUN_INTERACTIVE="""
qemu-system-arm -M virt -m {ram} \
-kernel vmlinuz \
-initrd initrd.img \
-append 'root=/dev/vda1' \
-drive if=none,file=vda.qcow2,format=qcow2,id=hd \
-device virtio-blk-device,drive=hd \
-netdev user,id=mynet,hostfwd=tcp::{ssh_port}-:22 \
-device virtio-net-device,netdev=mynet \
-nographic
"""
def retry(target_exception, tries=4, delay_s=1, backoff=2):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param target_exception: the exception to check. may be a tuple of
exceptions to check
:type target_exception: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay_s: initial delay between retries in seconds
:type delay_s: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
"""
import time
from functools import wraps
def decorated_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay_s
while mtries > 1:
try:
return f(*args, **kwargs)
except target_exception as e:
logging.warning("Exception: %s, Retrying in %d seconds...", str(e), mdelay)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return decorated_retry
class VMError(RuntimeError):
pass
class VM:
"""Control of the virtual machine"""
def __init__(self, ssh_port=QEMU_SSH_PORT, ram=QEMU_RAM, interactive=False):
self.log = logging.getLogger(VM.__name__)
self.ssh_port = ssh_port
self.timeout_s = 300
self.qemu_process = None
self._detach = False
self._interactive = interactive
self.ram = ram
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self._detach:
self.shutdown()
self.terminate()
def start(self):
sys.stderr.flush()
call(['toilet', '-f', 'smbraille', 'Starting QEMU'])
sys.stdout.flush()
self.log.info("Starting VM, ssh port redirected to localhost:%s (inside docker, not exposed by default)", self.ssh_port)
if self.is_running():
raise VMError("VM is running, shutdown first")
if self._interactive:
self.qemu_process = Popen(shlex.split(QEMU_RUN_INTERACTIVE.format(ssh_port=self.ssh_port, ram=self.ram)))
return
else:
self.log.info("Starting in non-interactive mode. Terminal output is disabled.")
self.qemu_process = Popen(shlex.split(QEMU_RUN.format(ssh_port=self.ssh_port, ram=self.ram)), stdout=DEVNULL, stdin=DEVNULL, stderr=PIPE)
def keep_waiting():
return self.is_running()
logging.info("waiting for ssh to be open in the VM (timeout {}s)".format(self.timeout_s))
ssh_working = wait_ssh_open('127.0.0.1', self.ssh_port, keep_waiting, self.timeout_s)
if not self.is_running():
(_, stderr) = self.qemu_process.communicate()
raise VMError("VM failed to start, retcode: {}, stderr: {}".format( self.retcode(), stderr.decode()))
if not ssh_working:
if self.is_running():
self.log.error("VM running but SSH is not working")
self.terminate()
raise VMError("SSH is not working after {} seconds".format(self.timeout_s))
self.log.info("VM is online and SSH is up")
def is_running(self):
return self.qemu_process and self.qemu_process.poll() is None
def retcode(self):
if self.qemu_process:
return self.qemu_process.poll()
else:
raise RuntimeError('qemu process was not started')
def terminate(self):
if self.qemu_process:
logging.info("send term signal")
self.qemu_process.terminate()
time.sleep(3)
logging.info("send kill signal")
self.qemu_process.kill()
self.qemu_process.wait()
self.qemu_process = None
else:
logging.warn("VM.terminate: QEMU process not running")
def detach(self):
self._detach = True
def shutdown(self):
if self.qemu_process:
logging.info("Shutdown via ssh")
# ssh connection will be closed with an error
call(["ssh", "-o", "StrictHostKeyChecking=no", "-p", str(self.ssh_port), "qemu@localhost",
"sudo", "poweroff"])
ret = self.qemu_process.wait(timeout=90)
self.log.info("VM on port %s has shutdown (exit code %d)", self.ssh_port, ret)
self.qemu_process = None
def wait(self):
if self.qemu_process:
self.qemu_process.wait()
def __del__(self):
if self.is_running and not self._detach:
logging.info("VM destructor hit")
self.terminate()
def qemu_ssh(ssh_port=QEMU_SSH_PORT, *args):
check_call(["ssh", "-o", "ServerAliveInterval=5", "-o", "StrictHostKeyChecking=no", "-p{}".format(ssh_port), "qemu@localhost", *args])
def qemu_rsync(ssh_port, local_path, remote_path):
check_call(['rsync', '-e', 'ssh -o StrictHostKeyChecking=no -p{}'.format(ssh_port), '-a', local_path, 'qemu@localhost:{}'.format(remote_path)])
def qemu_rsync_to_host(ssh_port, remote_path, local_path):
check_call(['rsync', '-e', 'ssh -o StrictHostKeyChecking=no -p{}'.format(ssh_port), '-va', 'qemu@localhost:{}'.format(remote_path), local_path])
@retry(subprocess.CalledProcessError)
def qemu_provision(ssh_port=QEMU_SSH_PORT):
import glob
logging.info("Provisioning the VM with artifacts and sources")
artifact = glob.glob('/work/mxnet/build/*.whl')
for x in artifact:
qemu_rsync(ssh_port, x, 'mxnet_dist/')
qemu_rsync(ssh_port, '/work/runtime_functions.py','')
qemu_rsync(ssh_port, '/work/vmcontrol.py','')
qemu_rsync(ssh_port, 'mxnet/tests', 'mxnet')
qemu_rsync(ssh_port, 'mxnet/ci/qemu/test_requirements.txt', 'mxnet/test_requirements.txt')
logging.info("Provisioning completed successfully.")
def wait_ssh_open(server, port, keep_waiting=None, timeout=None):
""" Wait for network service to appear
@param server: host to connect to (str)
@param port: port (int)
@param timeout: in seconds, if None or 0 wait forever
@return: True of False, if timeout is None may return only True or
throw unhandled network exception
"""
import socket
import errno
import time
log = logging.getLogger('wait_ssh_open')
sleep_s = 1
if timeout:
from time import time as now
# time module is needed to calc timeout shared between two exceptions
end = now() + timeout
while True:
log.debug("Sleeping for %s second(s)", sleep_s)
time.sleep(sleep_s)
s = socket.socket()
try:
if keep_waiting and not keep_waiting():
log.debug("keep_waiting() is set and evaluates to False")
return False
if timeout:
next_timeout = end - now()
if next_timeout < 0:
log.debug("connect time out")
return False
else:
log.debug("connect timeout %d s", next_timeout)
s.settimeout(next_timeout)
log.debug("connect %s:%d", server, port)
s.connect((server, port))
ret = s.recv(1024).decode()
if ret and ret.startswith('SSH'):
s.close()
log.info("wait_ssh_open: port %s:%s is open and ssh is ready", server, port)
return True
else:
log.debug("Didn't get the SSH banner")
s.close()
except ConnectionError as err:
log.debug("ConnectionError %s", err)
if sleep_s == 0:
sleep_s = 1
else:
sleep_s *= 2
except socket.gaierror as err:
log.debug("gaierror %s",err)
return False
except socket.timeout as err:
# this exception occurs only if timeout is set
if timeout:
return False
except TimeoutError as err:
# catch timeout exception from underlying network library
# this one is different from socket.timeout
raise
def wait_port_open(server, port, timeout=None):
""" Wait for network service to appear
@param server: host to connect to (str)
@param port: port (int)
@param timeout: in seconds, if None or 0 wait forever
@return: True of False, if timeout is None may return only True or
throw unhandled network exception
"""
import socket
import errno
import time
sleep_s = 0
if timeout:
from time import time as now
# time module is needed to calc timeout shared between two exceptions
end = now() + timeout
while True:
logging.debug("Sleeping for %s second(s)", sleep_s)
time.sleep(sleep_s)
s = socket.socket()
try:
if timeout:
next_timeout = end - now()
if next_timeout < 0:
return False
else:
s.settimeout(next_timeout)
logging.info("connect %s %d", server, port)
s.connect((server, port))
except ConnectionError as err:
logging.debug("ConnectionError %s", err)
if sleep_s == 0:
sleep_s = 1
except socket.gaierror as err:
logging.debug("gaierror %s",err)
return False
except socket.timeout as err:
# this exception occurs only if timeout is set
if timeout:
return False
except TimeoutError as err:
# catch timeout exception from underlying network library
# this one is different from socket.timeout
raise
else:
s.close()
logging.info("wait_port_open: port %s:%s is open", server, port)
return True
|
apache-2.0
|
mlperf/inference_results_v0.7
|
closed/Cisco/code/bert/tensorrt/int8_builder_var_seqlen.py
|
18
|
10254
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import onnx
import tensorrt as trt
import json
from code.bert.tensorrt.builder_utils import add_gelu, mark
def bert_encoder_layer_int8_var_seqlen(cfg, max_seqlen, weights_dict, network, input_tensor, cu_seqlens, layer, mask):
"""builds one encoder layer, setting the dynamic ranges extracted from the qat checkpoint"""
plg_registry = trt.get_plugin_registry()
qkv_plg_creator = plg_registry.get_plugin_creator("CustomQKVToContextPluginDynamic", "2", "")
pc_skln = plg_registry.get_plugin_creator("CustomSkipLayerNormPluginDynamic", "2", "")
dtype=trt.int8
N = cfg.N
H = cfg.H
prefix = 'l{}_'.format(layer)
dr_input = weights_dict[prefix + 'attention_self_query_input_amax']
assert(dr_input ==weights_dict[prefix + 'attention_self_key_input_amax'] )
assert(dr_input ==weights_dict[prefix + 'attention_self_value_input_amax'] )
input_tensor.set_dynamic_range(-dr_input, dr_input)
##### FC QKV
dr_qkv = max(
weights_dict[prefix + 'attention_self_qv_a_input_quantizer_amax'],
weights_dict[prefix + 'attention_self_qv_b_input_quantizer_amax'],
weights_dict[prefix + 'attention_self_av_b_input_quantizer_amax'],
)
Wqkv = np.zeros((3, cfg.hidden_size, cfg.hidden_size), np.float32)
Bqkv = np.zeros((3, cfg.hidden_size), np.float32)
Wqkv[0,:,:] =weights_dict[prefix + 'attention_self_query_kernel']
Wqkv[1,:,:] =weights_dict[prefix + 'attention_self_key_kernel']
Wqkv[2,:,:] =weights_dict[prefix + 'attention_self_value_kernel']
Bqkv[0,:] =weights_dict[prefix + 'attention_self_query_bias']
Bqkv[1,:] =weights_dict[prefix + 'attention_self_key_bias']
Bqkv[2,:] =weights_dict[prefix + 'attention_self_value_bias']
Wqkv = np.ascontiguousarray(Wqkv.reshape((3, N, H, N, H)).transpose((1,0,2,3,4)))
Bqkv = np.ascontiguousarray(Bqkv.reshape((3, N, H)).transpose((1,0,2)))
fc_qkv = network.add_convolution(input_tensor, cfg.qkv_size, (1,1), Wqkv, Bqkv)
fc_qkv.name = prefix + 'fc_qkv'
fc_qkv_out = fc_qkv.get_output(0)
fc_qkv_out.name = prefix + 'attention_self_qkv_mult'
fc_qkv_out.set_dynamic_range(-dr_qkv, dr_qkv)
##### QKV2CTX
dr_probs = weights_dict[prefix + 'attention_self_av_a_input_quantizer_amax']
dq_probs = dr_probs / 127.0
pf_type = trt.PluginField("type_id", np.array([int(trt.int8)], np.int32), trt.PluginFieldType.INT32)
pf_hidden_size = trt.PluginField("hidden_size", np.array([cfg.hidden_size], np.int32), trt.PluginFieldType.INT32)
pf_num_heads = trt.PluginField("num_heads", np.array([cfg.N], np.int32), trt.PluginFieldType.INT32)
pf_has_mask = trt.PluginField("has_mask", np.array([1], np.int32), trt.PluginFieldType.INT32)
pf_dq_probs = trt.PluginField("dq_probs", np.array([dq_probs], np.float32), trt.PluginFieldType.FLOAT32)
pf_var_seqlen = trt.PluginField("var_seqlen", np.array([int(1)], np.int32), trt.PluginFieldType.FLOAT32)
pfc = trt.PluginFieldCollection([pf_hidden_size, pf_num_heads, pf_has_mask, pf_type, pf_dq_probs, pf_var_seqlen])
qkv2ctx_plug = qkv_plg_creator.create_plugin("qkv2ctx", pfc)
dr_ctx = weights_dict[prefix+'attention_output_dense_input_amax']
qkv2ctx_layer = network.add_plugin_v2([fc_qkv_out, mask, cu_seqlens, max_seqlen], qkv2ctx_plug)
qkv2ctx_layer.name = prefix + 'qkv_to_ctx'
qkv2ctx_out = qkv2ctx_layer.get_output(0)
qkv2ctx_out.set_dynamic_range(-dr_ctx, dr_ctx)
##### FC AOUT
dr_fc_aout = weights_dict[prefix + 'attention_output_add_local_input_quantizer_amax']
Waout = weights_dict[prefix + 'attention_output_dense_kernel']
Baout = weights_dict[prefix + 'attention_output_dense_bias']
fc_aout = network.add_convolution(qkv2ctx_out, cfg.hidden_size, (1,1), Waout, Baout)
fc_aout.precision=dtype
fc_aout.name = prefix + 'fc_aout'
fc_aout_out = fc_aout.get_output(0)
fc_aout_out.dtype = dtype
fc_aout_out.set_dynamic_range(-dr_fc_aout, dr_fc_aout)
##### Skip-Layernorm 1
dr_skln1 = weights_dict[prefix + 'intermediate_dense_input_amax']
pf_ld = trt.PluginField("ld", np.array([cfg.hidden_size], np.int32), trt.PluginFieldType.INT32)
pf_type = trt.PluginField("type_id", np.array([int(dtype)], np.int32), trt.PluginFieldType.INT32)
pf_beta = trt.PluginField("beta", weights_dict[prefix+'attention_output_layernorm_beta'], trt.PluginFieldType.FLOAT32)
pf_gamma = trt.PluginField("gamma", weights_dict[prefix+'attention_output_layernorm_gamma'], trt.PluginFieldType.FLOAT32)
pf_bias = trt.PluginField("bias", Baout, trt.PluginFieldType.FLOAT32)
fields = [pf_ld, pf_beta, pf_gamma, pf_type]
pfc = trt.PluginFieldCollection(fields)
skipln_plug = pc_skln.create_plugin("skipln", pfc)
fc_aout_out.dtype = dtype
skipln_inputs = [fc_aout_out, input_tensor]
skln1 = network.add_plugin_v2(skipln_inputs, skipln_plug)
skln1.name = prefix+'skln_1'
skln1_out = skln1.get_output(0)
skln1_out.dtype = dtype
skln1_out.set_dynamic_range(-dr_skln1, dr_skln1)
##### FC MID
Wmid = weights_dict[prefix + 'intermediate_dense_kernel']
Bmid = weights_dict[prefix + 'intermediate_dense_bias']
fc_mid = network.add_convolution(skln1_out, cfg.mid_size, (1,1),Wmid, Bmid)
fc_mid.name =prefix+'fc_mid'
fc_mid_out = fc_mid.get_output(0)
##### GELU
dr_gelu = weights_dict[prefix + 'output_dense_input_amax']
gelu_layer = add_gelu(network, fc_mid_out)
gelu_layer.name= prefix + 'gelu'
gelu_out = gelu_layer.get_output(0)
gelu_out.set_dynamic_range(-dr_gelu, dr_gelu)
##### FC OUT
dr_fc_out = weights_dict[prefix + 'output_add_local_input_quantizer_amax']
Wout = weights_dict[prefix + 'output_dense_kernel']
Bout = weights_dict[prefix + 'output_dense_bias']
fc_out = network.add_convolution(gelu_out, cfg.hidden_size, (1,1),Wout, Bout)
fc_out.name =prefix+'fc_out'
fc_out.precision = dtype
fc_out_out = fc_out.get_output(0)
fc_out_out.dtype = dtype
fc_out_out.set_dynamic_range(-dr_fc_out, dr_fc_out)
##### Skip-Layernorm 2
pf_beta = trt.PluginField("beta", weights_dict[prefix+'output_layernorm_beta'], trt.PluginFieldType.FLOAT32)
pf_gamma = trt.PluginField("gamma", weights_dict[prefix+'output_layernorm_gamma'], trt.PluginFieldType.FLOAT32)
pf_bias = trt.PluginField("bias", Bout, trt.PluginFieldType.FLOAT32)
fields = [pf_ld, pf_beta, pf_gamma, pf_type]
pfc = trt.PluginFieldCollection(fields)
skipln_plug = pc_skln.create_plugin("skipln", pfc)
skln1_out.dtype = dtype #It does not build without setting this here, in addition to above. WHY??!?!
skipln_inputs = [fc_out_out, skln1_out]
skln2 = network.add_plugin_v2(skipln_inputs, skipln_plug)
skln2.name = prefix+'skln_2'
skln2_out = skln2.get_output(0)
return skln2_out
def bert_squad_int8_var_seqlen(network, weights_dict, cfg, input_shape, cu_seqlens_shape):
#instantiate all the plugins
plg_registry = trt.get_plugin_registry()
pc_emb = plg_registry.get_plugin_creator("CustomEmbLayerNormPluginDynamic", "2", "")
wbeta = trt.PluginField("bert_embeddings_layernorm_beta", weights_dict["bert_embeddings_layernorm_beta"], trt.PluginFieldType.FLOAT32)
wgamma = trt.PluginField("bert_embeddings_layernorm_gamma", weights_dict["bert_embeddings_layernorm_gamma"], trt.PluginFieldType.FLOAT32)
wwordemb = trt.PluginField("bert_embeddings_word_embeddings", weights_dict["bert_embeddings_word_embeddings"], trt.PluginFieldType.FLOAT32)
wtokemb = trt.PluginField("bert_embeddings_token_type_embeddings", weights_dict["bert_embeddings_token_type_embeddings"], trt.PluginFieldType.FLOAT32)
wposemb = trt.PluginField("bert_embeddings_position_embeddings", weights_dict["bert_embeddings_position_embeddings"], trt.PluginFieldType.FLOAT32)
output_fp16 = trt.PluginField("output_fp16", np.array([int(trt.float16)]).astype(np.int32), trt.PluginFieldType.INT32)
pfc = trt.PluginFieldCollection([wbeta, wgamma, wwordemb, wtokemb, wposemb, output_fp16])
embln_plugin = pc_emb.create_plugin("embeddings", pfc)
dtype = trt.int8
input_ids = network.add_input(name="input_ids", dtype=trt.int32, shape=input_shape)
segment_ids = network.add_input(name="segment_ids", dtype=trt.int32, shape=input_shape)
cu_seqlens = network.add_input(name="cu_seqlens", dtype=trt.int32, shape=cu_seqlens_shape)
#dummy input used to indicate maximum sequence length to plugins
max_seqlen = network.add_input(name="max_seqlen", dtype=trt.int32, shape=(-1,))
inputs = [input_ids, segment_ids, cu_seqlens, max_seqlen]
emb_layer = network.add_plugin_v2(inputs, embln_plugin)
emb_layer.name = 'embln'
embeddings = emb_layer.get_output(0)
mask = emb_layer.get_output(1)
embeddings.dtype = dtype
mask.set_dynamic_range(-1,1)
layer = 0
for layer in range(cfg.L):
embeddings = bert_encoder_layer_int8_var_seqlen(cfg, max_seqlen, weights_dict, network, embeddings, cu_seqlens, layer, mask)
Wsquad = weights_dict['cls_squad_output_weights']
Bsquad = weights_dict['cls_squad_output_bias']
dr_out = weights_dict['bert_encoder_final_input_quantizer_amax']
embeddings.set_dynamic_range(-dr_out, dr_out)
#squad_output = network.add_fully_connected(embeddings, 2, Wsquad, Bsquad)
squad_output = network.add_convolution(embeddings, 2, (1,1), Wsquad, Bsquad)
squad_output.name = 'squad_logits'
logits = squad_output.get_output(0)
# output shape will be sum_s x 2 (x 1 x 1)
mark(network, logits, trt.float16)
|
apache-2.0
|
iDTLabssl/hr
|
hr_unported/hr_public_holidays/__openerp__.py
|
1
|
1365
|
# -*- coding:utf-8 -*-
#
#
# Copyright (C) 2011,2013 Michael Telahun Makonnen <[email protected]>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{
'name': 'Public Holidays',
'version': '1.0',
'category': 'Generic Modules/Human Resources',
'author': "Michael Telahun Makonnen <[email protected]>,Odoo Community Association (OCA)",
'description': """
Manage Public Holidays
======================
""",
'website': 'http://miketelahun.wordpress.com',
'license': 'AGPL-3',
'depends': [
'hr',
],
'data': [
'security/ir.model.access.csv',
'hr_public_holidays_view.xml',
],
'test': [
],
'installable': True,
}
|
agpl-3.0
|
minhphung171093/GreenERP_V9
|
openerp/addons/payment_buckaroo/controllers/main.py
|
10
|
1210
|
# -*- coding: utf-8 -*-
import json
import logging
import pprint
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
_logger = logging.getLogger(__name__)
class BuckarooController(http.Controller):
_return_url = '/payment/buckaroo/return'
_cancel_url = '/payment/buckaroo/cancel'
_exception_url = '/payment/buckaroo/error'
_reject_url = '/payment/buckaroo/reject'
@http.route([
'/payment/buckaroo/return',
'/payment/buckaroo/cancel',
'/payment/buckaroo/error',
'/payment/buckaroo/reject',
], type='http', auth='none')
def buckaroo_return(self, **post):
""" Buckaroo."""
_logger.info('Buckaroo: entering form_feedback with post data %s', pprint.pformat(post)) # debug
request.registry['payment.transaction'].form_feedback(request.cr, SUPERUSER_ID, post, 'buckaroo', context=request.context)
return_url = post.pop('return_url', '')
if not return_url:
data ='' + post.pop('ADD_RETURNDATA', '{}').replace("'", "\"")
custom = json.loads(data)
return_url = custom.pop('return_url', '/')
return werkzeug.utils.redirect(return_url)
|
gpl-3.0
|
DepthDeluxe/ansible
|
lib/ansible/plugins/action/ce_config.py
|
89
|
4192
|
#
# Copyright 2015 Peter Sprygada <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.ce import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
|
gpl-3.0
|
Bitl/RBXLegacy-src
|
Cut/RBXLegacyDiscordBot/lib/pip/commands/freeze.py
|
342
|
2835
|
from __future__ import absolute_import
import sys
import pip
from pip.compat import stdlib_pkgs
from pip.basecommand import Command
from pip.operations.freeze import freeze
from pip.wheel import WheelCache
DEV_PKGS = ('pip', 'setuptools', 'distribute', 'wheel')
class FreezeCommand(Command):
"""
Output installed packages in requirements format.
packages are listed in a case-insensitive sorted order.
"""
name = 'freeze'
usage = """
%prog [options]"""
summary = 'Output installed packages in requirements format.'
log_streams = ("ext://sys.stderr", "ext://sys.stderr")
def __init__(self, *args, **kw):
super(FreezeCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help="Use the order in the given requirements file and its "
"comments when generating output. This option can be "
"used multiple times.")
self.cmd_opts.add_option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='URL',
help='URL for finding packages, which will be added to the '
'output.')
self.cmd_opts.add_option(
'-l', '--local',
dest='local',
action='store_true',
default=False,
help='If in a virtualenv that has global access, do not output '
'globally-installed packages.')
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
self.cmd_opts.add_option(
'--all',
dest='freeze_all',
action='store_true',
help='Do not skip these packages in the output:'
' %s' % ', '.join(DEV_PKGS))
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
format_control = pip.index.FormatControl(set(), set())
wheel_cache = WheelCache(options.cache_dir, format_control)
skip = set(stdlib_pkgs)
if not options.freeze_all:
skip.update(DEV_PKGS)
freeze_kwargs = dict(
requirement=options.requirements,
find_links=options.find_links,
local_only=options.local,
user_only=options.user,
skip_regex=options.skip_requirements_regex,
isolated=options.isolated_mode,
wheel_cache=wheel_cache,
skip=skip)
for line in freeze(**freeze_kwargs):
sys.stdout.write(line + '\n')
|
gpl-3.0
|
coberger/DIRAC
|
Interfaces/scripts/dirac-admin-sync-users-from-file.py
|
10
|
1861
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-admin-sync-users-from-file
# Author : Adrian Casajus
########################################################################
"""
Sync users in Configuration with the cfg contents.
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.CFG import CFG
Script.registerSwitch( "t", "test", "Only test. Don't commit changes" )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... UserCfg' % Script.scriptName,
'Arguments:',
' UserCfg: Cfg FileName with Users as sections containing DN, Groups, and other properties as options' ] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getExtraCLICFGFiles()
if len( args ) < 1:
Script.showHelp()
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
testOnly = False
errorList = []
for unprocSw in Script.getUnprocessedSwitches():
if unprocSw[0] in ( "t", "test" ):
testOnly = True
try:
usersCFG = CFG().loadFromFile( args[0] )
except Exception, e:
errorList.append( "file open", "Can't parse file %s: %s" % ( args[0], str( e ) ) )
errorCode = 1
else:
if not diracAdmin.csSyncUsersWithCFG( usersCFG ):
errorList.append( ( "modify users", "Cannot sync with %s" % args[0] ) )
exitCode = 255
if not exitCode and not testOnly:
result = diracAdmin.csCommitChanges()
if not result[ 'OK' ]:
errorList.append( ( "commit", result[ 'Message' ] ) )
exitCode = 255
for error in errorList:
print "ERROR %s: %s" % error
DIRAC.exit( exitCode )
|
gpl-3.0
|
WSDC-NITWarangal/django
|
tests/view_tests/tests/py3_test_debug.py
|
335
|
1849
|
"""
Since this file contains Python 3 specific syntax, it's named without a test_
prefix so the test runner won't try to import it. Instead, the test class is
imported in test_debug.py, but only on Python 3.
This filename is also in setup.cfg flake8 exclude since the Python 2 syntax
error (raise ... from ...) can't be silenced using NOQA.
"""
import sys
from django.test import RequestFactory, TestCase
from django.views.debug import ExceptionReporter
class Py3ExceptionReporterTests(TestCase):
rf = RequestFactory()
def test_reporting_of_nested_exceptions(self):
request = self.rf.get('/test_view/')
try:
try:
raise AttributeError('Top level')
except AttributeError as explicit:
try:
raise ValueError('Second exception') from explicit
except ValueError:
raise IndexError('Final exception')
except Exception:
# Custom exception handler, just pass it into ExceptionReporter
exc_type, exc_value, tb = sys.exc_info()
explicit_exc = 'The above exception ({0}) was the direct cause of the following exception:'
implicit_exc = 'During handling of the above exception ({0}), another exception occurred:'
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
# Both messages are twice on page -- one rendered as html,
# one as plain text (for pastebin)
self.assertEqual(2, html.count(explicit_exc.format("Top level")))
self.assertEqual(2, html.count(implicit_exc.format("Second exception")))
text = reporter.get_traceback_text()
self.assertIn(explicit_exc.format("Top level"), text)
self.assertIn(implicit_exc.format("Second exception"), text)
|
bsd-3-clause
|
Gitden/pdf-plugin
|
scripts/glyphdump.py
|
126
|
1435
|
#!/usr/bin/python
import sys
agl = []
agltab = []
aglmap = {}
print "/*"
f = open("glyphlist.txt", "r")
for line in f.readlines():
if line[0] == '#':
print line.strip()
continue
line = line[:-1]
name, list = line.split(';')
list = map(lambda x: int(x, 16), list.split(' '))
agl.append((name, list))
for name, ucslist in agl:
num = len(ucslist)
ucs = ucslist[0]
agltab.append((name, ucs))
if ucs not in aglmap:
aglmap[ucs] = []
aglmap[ucs].append(name)
print "*/"
print
def dumplist(list):
n = 0;
for item in list:
n += len(item) + 1
if n > 78:
sys.stdout.write("\n")
n = len(item) + 1
sys.stdout.write(item)
sys.stdout.write(",")
sys.stdout.write("\n")
agltab.sort()
namelist = []
codelist = []
for name, ucs in agltab:
namelist.append("\"%s\"" % name)
codelist.append("%d" % ucs)
keys = aglmap.keys()
keys.sort()
dupoffsets = []
dupnames = []
for ucs in keys:
list = aglmap[ucs]
ofs = len(dupnames)
if len(list) > 1:
dupoffsets.append("%d,%d" % (ucs, ofs))
for name in list:
dupnames.append("\"%s\"" % name)
dupnames.append("0")
print "static const char *agl_name_list[] = {"
dumplist(namelist)
print "};"
print
print "static const unsigned short agl_code_list[] = {"
dumplist(codelist)
print "};"
print
print "static const unsigned short agl_dup_offsets[] = {"
dumplist(dupoffsets)
print "};"
print
print "static const char *agl_dup_names[] = {"
dumplist(dupnames)
print "};"
|
agpl-3.0
|
adamhooper/quebec-municipal-elections-2013
|
data/gen-csvs/make_montreal_txt.py
|
1
|
5005
|
#!/usr/bin/env python3
import csv
import os.path
import json
from xml.etree import ElementTree
XML_FILE = os.path.join(os.path.dirname(__file__), '..', 'raw', 'media.xml')
OUT_FILE = os.path.join(os.path.dirname(__file__), 'montreal-data.txt')
# Returns a dict with { id, name, party, nVotes }
def parseCandidateNode(node):
candidate = { 'id': node.attrib['id'] }
firstName = None
lastName = None
for childNode in node:
if childNode.tag == 'prenom':
firstName = childNode.text
elif childNode.tag == 'nom':
lastName = childNode.text
elif childNode.tag == 'parti':
candidate['party'] = childNode.text
elif childNode.tag == 'nb_voix_obtenues':
candidate['nVotes'] = int(childNode.text)
candidate['name'] = "%s %s" % (firstName, lastName)
return candidate
# Returns a "Post" JSON-like object.
def parsePostNode(postNode):
if postNode.tag == 'sommaire':
postId = '0,00'
else:
postId = postNode.attrib['id']
post = { 'id': postId, 'district': None, 'borough': None }
post['candidates'] = candidates = []
boroughId = None
districtId = None
for childNode in postNode:
if childNode.tag == 'type':
post['type'] = childNode.text
elif childNode.tag == 'arrondissement' and childNode.text:
post['borough'] = childNode.text
elif childNode.tag == 'district' and childNode.text:
post['district'] = childNode.text
elif childNode.tag == 'nb_electeurs_inscrits':
post['nVoters'] = int(childNode.text)
elif childNode.tag == 'nb_total_voix_recueillies':
post['nVotes'] = int(childNode.text)
elif childNode.tag == 'nb_bulletins_rejetes':
post['nBallotsRejected'] = int(childNode.text)
elif childNode.tag == 'nb_bureaux_total':
post['nStations'] = int(childNode.text)
elif childNode.tag == 'nb_bureaux_depouilles':
post['nStationsReported'] = int(childNode.text)
elif childNode.tag == 'candidat':
candidate = parseCandidateNode(childNode)
candidates.append(candidate)
post['candidates'].sort(key=lambda c: -c['nVotes'])
return post
# Returns a list of posts from the XML file
# posts: [
# 'id': id,
# 'type': type,
# 'district': String or null,
# 'borough': String or null,
# 'nVoters': Number,
# 'nVotes': Number,
# 'nBallotsRejected': Number,
# 'nStations': Number,
# 'nStationsReported': Number,
# candidates: List of {
# 'id': id,
# 'name': full name,
# 'party': String,
# 'nVotes': Number
# }
# ]
# }
def readData():
print('Reading from %s...' % (XML_FILE,))
posts = []
tree = ElementTree.parse(XML_FILE)
root = tree.getroot()
for node in root:
if node.tag == 'resultats_postes':
for postNode in node:
post = parsePostNode(postNode)
posts.append(post)
elif node.tag == 'resultats_maire':
# The mayor post does not appear elsewhere. (The borough one does.)
for postNode in node:
if postNode.tag == 'sommaire':
post = parsePostNode(postNode)
post['type'] = 'M'
posts.append(post)
posts.sort(key=lambda p: p['id'])
return posts
def main():
posts = readData()
lastBorough = None
lastDistrict = None
with open(OUT_FILE, 'w') as outFile:
outFile.write('MONTREAL - MAYOR\n\n')
for post in posts:
if post['borough'] != lastBorough:
lastBorough = post['borough']
lastDistrict = post['district']
outFile.write('\nBOROUGH: %s\n\n' % lastBorough)
outFile.write('\nDISTRICT: %s\n\n' % lastDistrict)
elif post['district'] != lastDistrict:
lastDistrict = post['district']
outFile.write('\nDISTRICT: %s (still borough %s)\n\n' % (lastDistrict, post['borough']))
postType = {
'M': 'Mayor',
'CA': 'Borough councillor',
'CV': 'City councillor',
'C': 'Councillor',
'MC': 'Borough mayor'
}[post['type']]
outFile.write('\nPOST: %s\n\n' % postType)
for rank, candidate in enumerate(post['candidates']):
party = candidate['party']
if party == 'Indépendant':
party = 'Independent'
if len(post['candidates']) == 1:
state = 'Elected without opposition'
elif rank == 0:
state = 'Elected'
else:
state = 'Not elected'
outFile.write('{0}, {1}\t{2:,d}\n'.format(candidate['name'], party, candidate['nVotes']))
if __name__ == '__main__':
main()
|
unlicense
|
rajsadho/django
|
tests/utils_tests/test_timezone.py
|
149
|
7857
|
import copy
import datetime
import pickle
import unittest
from django.test import override_settings
from django.utils import timezone
try:
import pytz
except ImportError:
pytz = None
requires_pytz = unittest.skipIf(pytz is None, "this test requires pytz")
if pytz is not None:
CET = pytz.timezone("Europe/Paris")
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
class TimezoneTests(unittest.TestCase):
def test_localtime(self):
now = datetime.datetime.utcnow().replace(tzinfo=timezone.utc)
local_tz = timezone.LocalTimezone()
local_now = timezone.localtime(now, local_tz)
self.assertEqual(local_now.tzinfo, local_tz)
def test_localtime_naive(self):
with self.assertRaises(ValueError):
timezone.localtime(datetime.datetime.now())
def test_localtime_out_of_range(self):
local_tz = timezone.LocalTimezone()
long_ago = datetime.datetime(1900, 1, 1, tzinfo=timezone.utc)
try:
timezone.localtime(long_ago, local_tz)
except (OverflowError, ValueError) as exc:
self.assertIn("install pytz", exc.args[0])
else:
raise unittest.SkipTest("Failed to trigger an OverflowError or ValueError")
def test_now(self):
with override_settings(USE_TZ=True):
self.assertTrue(timezone.is_aware(timezone.now()))
with override_settings(USE_TZ=False):
self.assertTrue(timezone.is_naive(timezone.now()))
def test_override(self):
default = timezone.get_default_timezone()
try:
timezone.activate(ICT)
with timezone.override(EAT):
self.assertIs(EAT, timezone.get_current_timezone())
self.assertIs(ICT, timezone.get_current_timezone())
with timezone.override(None):
self.assertIs(default, timezone.get_current_timezone())
self.assertIs(ICT, timezone.get_current_timezone())
timezone.deactivate()
with timezone.override(EAT):
self.assertIs(EAT, timezone.get_current_timezone())
self.assertIs(default, timezone.get_current_timezone())
with timezone.override(None):
self.assertIs(default, timezone.get_current_timezone())
self.assertIs(default, timezone.get_current_timezone())
finally:
timezone.deactivate()
def test_override_decorator(self):
default = timezone.get_default_timezone()
@timezone.override(EAT)
def func_tz_eat():
self.assertIs(EAT, timezone.get_current_timezone())
@timezone.override(None)
def func_tz_none():
self.assertIs(default, timezone.get_current_timezone())
try:
timezone.activate(ICT)
func_tz_eat()
self.assertIs(ICT, timezone.get_current_timezone())
func_tz_none()
self.assertIs(ICT, timezone.get_current_timezone())
timezone.deactivate()
func_tz_eat()
self.assertIs(default, timezone.get_current_timezone())
func_tz_none()
self.assertIs(default, timezone.get_current_timezone())
finally:
timezone.deactivate()
def test_copy(self):
self.assertIsInstance(copy.copy(timezone.UTC()), timezone.UTC)
self.assertIsInstance(copy.copy(timezone.LocalTimezone()), timezone.LocalTimezone)
def test_deepcopy(self):
self.assertIsInstance(copy.deepcopy(timezone.UTC()), timezone.UTC)
self.assertIsInstance(copy.deepcopy(timezone.LocalTimezone()), timezone.LocalTimezone)
def test_pickling_unpickling(self):
self.assertIsInstance(pickle.loads(pickle.dumps(timezone.UTC())), timezone.UTC)
self.assertIsInstance(pickle.loads(pickle.dumps(timezone.LocalTimezone())), timezone.LocalTimezone)
def test_is_aware(self):
self.assertTrue(timezone.is_aware(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)))
self.assertFalse(timezone.is_aware(datetime.datetime(2011, 9, 1, 13, 20, 30)))
def test_is_naive(self):
self.assertFalse(timezone.is_naive(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)))
self.assertTrue(timezone.is_naive(datetime.datetime(2011, 9, 1, 13, 20, 30)))
def test_make_aware(self):
self.assertEqual(
timezone.make_aware(datetime.datetime(2011, 9, 1, 13, 20, 30), EAT),
datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))
with self.assertRaises(ValueError):
timezone.make_aware(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), EAT)
def test_make_naive(self):
self.assertEqual(
timezone.make_naive(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), EAT),
datetime.datetime(2011, 9, 1, 13, 20, 30))
self.assertEqual(
timezone.make_naive(datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT), EAT),
datetime.datetime(2011, 9, 1, 13, 20, 30))
with self.assertRaises(ValueError):
timezone.make_naive(datetime.datetime(2011, 9, 1, 13, 20, 30), EAT)
@requires_pytz
def test_make_aware2(self):
self.assertEqual(
timezone.make_aware(datetime.datetime(2011, 9, 1, 12, 20, 30), CET),
CET.localize(datetime.datetime(2011, 9, 1, 12, 20, 30)))
with self.assertRaises(ValueError):
timezone.make_aware(CET.localize(datetime.datetime(2011, 9, 1, 12, 20, 30)), CET)
@requires_pytz
def test_make_aware_pytz(self):
self.assertEqual(
timezone.make_naive(CET.localize(datetime.datetime(2011, 9, 1, 12, 20, 30)), CET),
datetime.datetime(2011, 9, 1, 12, 20, 30))
self.assertEqual(
timezone.make_naive(
pytz.timezone("Asia/Bangkok").localize(datetime.datetime(2011, 9, 1, 17, 20, 30)), CET
),
datetime.datetime(2011, 9, 1, 12, 20, 30))
with self.assertRaises(ValueError):
timezone.make_naive(datetime.datetime(2011, 9, 1, 12, 20, 30), CET)
@requires_pytz
def test_make_aware_pytz_ambiguous(self):
# 2:30 happens twice, once before DST ends and once after
ambiguous = datetime.datetime(2015, 10, 25, 2, 30)
with self.assertRaises(pytz.AmbiguousTimeError):
timezone.make_aware(ambiguous, timezone=CET)
std = timezone.make_aware(ambiguous, timezone=CET, is_dst=False)
dst = timezone.make_aware(ambiguous, timezone=CET, is_dst=True)
self.assertEqual(std - dst, datetime.timedelta(hours=1))
self.assertEqual(std.tzinfo.utcoffset(std), datetime.timedelta(hours=1))
self.assertEqual(dst.tzinfo.utcoffset(dst), datetime.timedelta(hours=2))
@requires_pytz
def test_make_aware_pytz_non_existent(self):
# 2:30 never happened due to DST
non_existent = datetime.datetime(2015, 3, 29, 2, 30)
with self.assertRaises(pytz.NonExistentTimeError):
timezone.make_aware(non_existent, timezone=CET)
std = timezone.make_aware(non_existent, timezone=CET, is_dst=False)
dst = timezone.make_aware(non_existent, timezone=CET, is_dst=True)
self.assertEqual(std - dst, datetime.timedelta(hours=1))
self.assertEqual(std.tzinfo.utcoffset(std), datetime.timedelta(hours=1))
self.assertEqual(dst.tzinfo.utcoffset(dst), datetime.timedelta(hours=2))
# round trip to UTC then back to CET
std = timezone.localtime(timezone.localtime(std, timezone.UTC()), CET)
dst = timezone.localtime(timezone.localtime(dst, timezone.UTC()), CET)
self.assertEqual((std.hour, std.minute), (3, 30))
self.assertEqual((dst.hour, dst.minute), (1, 30))
|
bsd-3-clause
|
paulrouget/servo
|
tests/wpt/web-platform-tests/tools/third_party/pytest/src/_pytest/pastebin.py
|
34
|
3631
|
""" submit failure or test session information to a pastebin service. """
from __future__ import absolute_import, division, print_function
import pytest
import six
import sys
import tempfile
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting")
group._addoption(
"--pastebin",
metavar="mode",
action="store",
dest="pastebin",
default=None,
choices=["failed", "all"],
help="send failed|all info to bpaste.net pastebin service.",
)
@pytest.hookimpl(trylast=True)
def pytest_configure(config):
if config.option.pastebin == "all":
tr = config.pluginmanager.getplugin("terminalreporter")
# if no terminal reporter plugin is present, nothing we can do here;
# this can happen when this function executes in a slave node
# when using pytest-xdist, for example
if tr is not None:
# pastebin file will be utf-8 encoded binary file
config._pastebinfile = tempfile.TemporaryFile("w+b")
oldwrite = tr._tw.write
def tee_write(s, **kwargs):
oldwrite(s, **kwargs)
if isinstance(s, six.text_type):
s = s.encode("utf-8")
config._pastebinfile.write(s)
tr._tw.write = tee_write
def pytest_unconfigure(config):
if hasattr(config, "_pastebinfile"):
# get terminal contents and delete file
config._pastebinfile.seek(0)
sessionlog = config._pastebinfile.read()
config._pastebinfile.close()
del config._pastebinfile
# undo our patching in the terminal reporter
tr = config.pluginmanager.getplugin("terminalreporter")
del tr._tw.__dict__["write"]
# write summary
tr.write_sep("=", "Sending information to Paste Service")
pastebinurl = create_new_paste(sessionlog)
tr.write_line("pastebin session-log: %s\n" % pastebinurl)
def create_new_paste(contents):
"""
Creates a new paste using bpaste.net service.
:contents: paste contents as utf-8 encoded bytes
:returns: url to the pasted contents
"""
import re
if sys.version_info < (3, 0):
from urllib import urlopen, urlencode
else:
from urllib.request import urlopen
from urllib.parse import urlencode
params = {
"code": contents,
"lexer": "python3" if sys.version_info[0] == 3 else "python",
"expiry": "1week",
}
url = "https://bpaste.net"
response = urlopen(url, data=urlencode(params).encode("ascii")).read()
m = re.search(r'href="/raw/(\w+)"', response.decode("utf-8"))
if m:
return "%s/show/%s" % (url, m.group(1))
else:
return "bad response: " + response
def pytest_terminal_summary(terminalreporter):
import _pytest.config
if terminalreporter.config.option.pastebin != "failed":
return
tr = terminalreporter
if "failed" in tr.stats:
terminalreporter.write_sep("=", "Sending information to Paste Service")
for rep in terminalreporter.stats.get("failed"):
try:
msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc
except AttributeError:
msg = tr._getfailureheadline(rep)
tw = _pytest.config.create_terminal_writer(
terminalreporter.config, stringio=True
)
rep.toterminal(tw)
s = tw.stringio.getvalue()
assert len(s)
pastebinurl = create_new_paste(s)
tr.write_line("%s --> %s" % (msg, pastebinurl))
|
mpl-2.0
|
netscaler/neutron
|
neutron/extensions/l3.py
|
7
|
9764
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nicira Networks, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Dan Wendlandt, Nicira, Inc
#
from abc import abstractmethod
from oslo.config import cfg
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import base
from neutron.common import exceptions as qexception
from neutron import manager
from neutron.plugins.common import constants
from neutron import quota
# L3 Exceptions
class RouterNotFound(qexception.NotFound):
message = _("Router %(router_id)s could not be found")
class RouterInUse(qexception.InUse):
message = _("Router %(router_id)s still has active ports")
class RouterInterfaceNotFound(qexception.NotFound):
message = _("Router %(router_id)s does not have "
"an interface with id %(port_id)s")
class RouterInterfaceNotFoundForSubnet(qexception.NotFound):
message = _("Router %(router_id)s has no interface "
"on subnet %(subnet_id)s")
class RouterInterfaceInUseByFloatingIP(qexception.InUse):
message = _("Router interface for subnet %(subnet_id)s on router "
"%(router_id)s cannot be deleted, as it is required "
"by one or more floating IPs.")
class FloatingIPNotFound(qexception.NotFound):
message = _("Floating IP %(floatingip_id)s could not be found")
class ExternalGatewayForFloatingIPNotFound(qexception.NotFound):
message = _("External network %(external_network_id)s is not reachable "
"from subnet %(subnet_id)s. Therefore, cannot associate "
"Port %(port_id)s with a Floating IP.")
class FloatingIPPortAlreadyAssociated(qexception.InUse):
message = _("Cannot associate floating IP %(floating_ip_address)s "
"(%(fip_id)s) with port %(port_id)s "
"using fixed IP %(fixed_ip)s, as that fixed IP already "
"has a floating IP on external network %(net_id)s.")
class L3PortInUse(qexception.InUse):
message = _("Port %(port_id)s has owner %(device_owner)s and therefore"
" cannot be deleted directly via the port API.")
class RouterExternalGatewayInUseByFloatingIp(qexception.InUse):
message = _("Gateway cannot be updated for router %(router_id)s, since a "
"gateway to external network %(net_id)s is required by one or "
"more floating IPs.")
ROUTERS = 'routers'
EXTERNAL_GW_INFO = 'external_gateway_info'
RESOURCE_ATTRIBUTE_MAP = {
ROUTERS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': None},
'is_visible': True},
EXTERNAL_GW_INFO: {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': None,
'enforce_policy': True}
},
'floatingips': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'floating_ip_address': {'allow_post': False, 'allow_put': False,
'validate': {'type:ip_address_or_none': None},
'is_visible': True},
'floating_network_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'router_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'port_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'fixed_ip_address': {'allow_post': True, 'allow_put': True,
'validate': {'type:ip_address_or_none': None},
'is_visible': True, 'default': None},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': None},
'is_visible': True}
},
}
l3_quota_opts = [
cfg.IntOpt('quota_router',
default=10,
help=_('Number of routers allowed per tenant, -1 for '
'unlimited')),
cfg.IntOpt('quota_floatingip',
default=50,
help=_('Number of floating IPs allowed per tenant, '
'-1 for unlimited')),
]
cfg.CONF.register_opts(l3_quota_opts, 'QUOTAS')
class L3(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Neutron L3 Router"
@classmethod
def get_alias(cls):
return "router"
@classmethod
def get_description(cls):
return ("Router abstraction for basic L3 forwarding"
" between L2 Neutron networks and access to external"
" networks via a NAT gateway.")
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/neutron/router/api/v1.0"
@classmethod
def get_updated(cls):
return "2012-07-20T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()]
attr.PLURALS.update(dict(my_plurals))
exts = []
plugin = manager.NeutronManager.get_service_plugins()[
constants.L3_ROUTER_NAT]
for resource_name in ['router', 'floatingip']:
collection_name = resource_name + "s"
params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict())
member_actions = {}
if resource_name == 'router':
member_actions = {'add_router_interface': 'PUT',
'remove_router_interface': 'PUT'}
quota.QUOTAS.register_resource_by_name(resource_name)
controller = base.create_resource(
collection_name, resource_name, plugin, params,
member_actions=member_actions,
allow_pagination=cfg.CONF.allow_pagination,
allow_sorting=cfg.CONF.allow_sorting)
ex = extensions.ResourceExtension(collection_name,
controller,
member_actions=member_actions,
attr_map=params)
exts.append(ex)
return exts
def update_attributes_map(self, attributes):
super(L3, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
class RouterPluginBase(object):
@abstractmethod
def create_router(self, context, router):
pass
@abstractmethod
def update_router(self, context, id, router):
pass
@abstractmethod
def get_router(self, context, id, fields=None):
pass
@abstractmethod
def delete_router(self, context, id):
pass
@abstractmethod
def get_routers(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
pass
@abstractmethod
def add_router_interface(self, context, router_id, interface_info):
pass
@abstractmethod
def remove_router_interface(self, context, router_id, interface_info):
pass
@abstractmethod
def create_floatingip(self, context, floatingip):
pass
@abstractmethod
def update_floatingip(self, context, id, floatingip):
pass
@abstractmethod
def get_floatingip(self, context, id, fields=None):
pass
@abstractmethod
def delete_floatingip(self, context, id):
pass
@abstractmethod
def get_floatingips(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
pass
def get_routers_count(self, context, filters=None):
raise NotImplementedError()
def get_floatingips_count(self, context, filters=None):
raise NotImplementedError()
|
apache-2.0
|
sergiopasra/numina
|
numina/tests/plugins.py
|
3
|
4762
|
#
# Copyright 2014-2019 Universidad Complutense de Madrid
#
# This file is part of Numina
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
from __future__ import print_function
import os
import tarfile
import warnings
import sys
import pytest
if "pytest_benchmark" in sys.modules:
HAS_BENCHMARCK = True
else:
from .nobenchmark import benchmark
HAS_BENCHMARCK = False
import numina.util.context as ctx
from .drpmocker import DRPMocker
from .testcache import download_cache
from .pytest_resultcmp import ResultCompPlugin
@pytest.fixture
def numinatmpdir(tmpdir):
"""Return a temporary directory for recipe testing"""
tmpdir.mkdir('_work')
tmpdir.mkdir('_data')
return tmpdir
@pytest.fixture
def numinatpldir(tmpdir, request):
"""Return a temporary dataset for recipe testing.
Return a temporary directory path object
for numina, where a dataset has been downloaded
from a remote location, based on
the module variable BASE_URL and the test function name
"""
# Name of the dataset based on the function name
tarname = request.function.__name__[5:]
# Base url to donwload
base = getattr(request.module, 'BASE_URL')
url = base + tarname + '.tar.gz'
downloaded = download_cache(url)
tmpdir.chdir()
# Uncompress
with tarfile.open(downloaded.name, mode="r:gz") as tar:
tar.extractall()
os.remove(downloaded.name)
os.chdir('tpl')
return tmpdir
@pytest.fixture
def drpmocker(monkeypatch):
"""A fixture that mocks the loading of DRPs"""
return DRPMocker(monkeypatch)
@pytest.fixture(scope='module')
def datamanager_remote(tmp_path_factory, request):
"""Return a DataManager object create from a remote dataset"""
from numina.user.helpers import create_datamanager
req_base_default = "https://guaix.fis.ucm.es/data/"
req_base = getattr(request.module, 'TEST_SET_HOST', req_base_default)
req_tarname = getattr(request.module, 'TEST_SET_FILE')
req_datadir = getattr(request.module, 'TEST_SET_DATADIR', 'data')
req_control = getattr(request.module, 'TEST_SET_CONTROL', "control_v2.yaml")
basedir = tmp_path_factory.mktemp('manager')
datadir = basedir / req_datadir # pathlib syntax
reqfile = basedir / req_control
if req_tarname is None:
raise ValueError('Undefined TEST_SET_FILE')
url = req_base + req_tarname
# Download everything
with ctx.working_directory(basedir):
downloaded = download_cache(url)
# Uncompress
with tarfile.open(downloaded.name, mode="r:gz") as tar:
tar.extractall()
os.remove(downloaded.name)
# Insert OBS in the control file....
dm = create_datamanager(reqfile, basedir, datadir)
# This is not really needed...
# If everything is in the file already
# with working_directory(basedir):
# obsresults = ['obs_ids.yaml']
# sessions, loaded_obs = load_observations(obsresults, is_session=False)
# dm.backend.add_obs(loaded_obs)
return dm
def pytest_report_header(config):
if not HAS_BENCHMARCK:
return "pytest-benchmark not installed"
return ""
def pytest_addoption(parser):
parser.addoption('--resultcmp', action='store_true',
help="enable comparison of recipe results to reference results stored")
parser.addoption('--resultcmp-generate-path',
help="directory to generate reference files in, relative to location where py.test is run", action='store')
parser.addoption('--resultcmp-reference-path',
help="directory containing reference files, relative to location where py.test is run", action='store')
def pytest_configure(config):
config.getini('markers').append(
'result_compare: Apply to tests that provide recipe results to compare with a reference')
if config.getoption("--resultcmp", default=False) or config.getoption("--resultcmp-generate-path", default=None) is not None:
reference_dir = config.getoption("--resultcmp-reference-path")
generate_dir = config.getoption("--resultcmp-generate-path")
if reference_dir is not None and generate_dir is not None:
warnings.warn("Ignoring --resultcmp-reference-path since --resultcmp-generate-path is set")
if reference_dir is not None:
reference_dir = os.path.abspath(reference_dir)
if generate_dir is not None:
reference_dir = os.path.abspath(generate_dir)
# default_format = config.getoption("--resultcmp-default-format") or 'text'
config.pluginmanager.register(ResultCompPlugin(
config, reference_dir=reference_dir, generate_dir=generate_dir
))
|
gpl-3.0
|
babyliynfg/cross
|
tools/project-creator/Python2.6.6/Lib/encodings/iso8859_11.py
|
93
|
12898
|
""" Python Character Mapping Codec iso8859_11 generated from 'MAPPINGS/ISO8859/8859-11.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-11',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
u'\u0e24' # 0xC4 -> THAI CHARACTER RU
u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
u'\u0e26' # 0xC6 -> THAI CHARACTER LU
u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
u'\u0e50' # 0xF0 -> THAI DIGIT ZERO
u'\u0e51' # 0xF1 -> THAI DIGIT ONE
u'\u0e52' # 0xF2 -> THAI DIGIT TWO
u'\u0e53' # 0xF3 -> THAI DIGIT THREE
u'\u0e54' # 0xF4 -> THAI DIGIT FOUR
u'\u0e55' # 0xF5 -> THAI DIGIT FIVE
u'\u0e56' # 0xF6 -> THAI DIGIT SIX
u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
u'\u0e59' # 0xF9 -> THAI DIGIT NINE
u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
mit
|
srivassumit/servo
|
tests/wpt/css-tests/tools/pytest/doc/en/example/multipython.py
|
171
|
1735
|
"""
module containing a parametrized tests testing cross-python
serialization via the pickle module.
"""
import py
import pytest
import _pytest._code
pythonlist = ['python2.6', 'python2.7', 'python3.3']
@pytest.fixture(params=pythonlist)
def python1(request, tmpdir):
picklefile = tmpdir.join("data.pickle")
return Python(request.param, picklefile)
@pytest.fixture(params=pythonlist)
def python2(request, python1):
return Python(request.param, python1.picklefile)
class Python:
def __init__(self, version, picklefile):
self.pythonpath = py.path.local.sysfind(version)
if not self.pythonpath:
pytest.skip("%r not found" %(version,))
self.picklefile = picklefile
def dumps(self, obj):
dumpfile = self.picklefile.dirpath("dump.py")
dumpfile.write(_pytest._code.Source("""
import pickle
f = open(%r, 'wb')
s = pickle.dump(%r, f, protocol=2)
f.close()
""" % (str(self.picklefile), obj)))
py.process.cmdexec("%s %s" %(self.pythonpath, dumpfile))
def load_and_is_true(self, expression):
loadfile = self.picklefile.dirpath("load.py")
loadfile.write(_pytest._code.Source("""
import pickle
f = open(%r, 'rb')
obj = pickle.load(f)
f.close()
res = eval(%r)
if not res:
raise SystemExit(1)
""" % (str(self.picklefile), expression)))
print (loadfile)
py.process.cmdexec("%s %s" %(self.pythonpath, loadfile))
@pytest.mark.parametrize("obj", [42, {}, {1:3},])
def test_basic_objects(python1, python2, obj):
python1.dumps(obj)
python2.load_and_is_true("obj == %s" % obj)
|
mpl-2.0
|
40223222/40223222
|
static/Brython3.1.1-20150328-091302/Lib/xml/sax/expatreader.py
|
870
|
14659
|
"""
SAX driver for the pyexpat C module. This driver works with
pyexpat.__version__ == '2.22'.
"""
version = "0.20"
from xml.sax._exceptions import *
from xml.sax.handler import feature_validation, feature_namespaces
from xml.sax.handler import feature_namespace_prefixes
from xml.sax.handler import feature_external_ges, feature_external_pes
from xml.sax.handler import feature_string_interning
from xml.sax.handler import property_xml_string, property_interning_dict
# xml.parsers.expat does not raise ImportError in Jython
import sys
if sys.platform[:4] == "java":
raise SAXReaderNotAvailable("expat not available in Java", None)
del sys
try:
from xml.parsers import expat
except ImportError:
raise SAXReaderNotAvailable("expat not supported", None)
else:
if not hasattr(expat, "ParserCreate"):
raise SAXReaderNotAvailable("expat not supported", None)
from xml.sax import xmlreader, saxutils, handler
AttributesImpl = xmlreader.AttributesImpl
AttributesNSImpl = xmlreader.AttributesNSImpl
# If we're using a sufficiently recent version of Python, we can use
# weak references to avoid cycles between the parser and content
# handler, otherwise we'll just have to pretend.
try:
import _weakref
except ImportError:
def _mkproxy(o):
return o
else:
import weakref
_mkproxy = weakref.proxy
del weakref, _weakref
# --- ExpatLocator
class ExpatLocator(xmlreader.Locator):
"""Locator for use with the ExpatParser class.
This uses a weak reference to the parser object to avoid creating
a circular reference between the parser and the content handler.
"""
def __init__(self, parser):
self._ref = _mkproxy(parser)
def getColumnNumber(self):
parser = self._ref
if parser._parser is None:
return None
return parser._parser.ErrorColumnNumber
def getLineNumber(self):
parser = self._ref
if parser._parser is None:
return 1
return parser._parser.ErrorLineNumber
def getPublicId(self):
parser = self._ref
if parser is None:
return None
return parser._source.getPublicId()
def getSystemId(self):
parser = self._ref
if parser is None:
return None
return parser._source.getSystemId()
# --- ExpatParser
class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
"""SAX driver for the pyexpat C module."""
def __init__(self, namespaceHandling=0, bufsize=2**16-20):
xmlreader.IncrementalParser.__init__(self, bufsize)
self._source = xmlreader.InputSource()
self._parser = None
self._namespaces = namespaceHandling
self._lex_handler_prop = None
self._parsing = 0
self._entity_stack = []
self._external_ges = 1
self._interning = None
# XMLReader methods
def parse(self, source):
"Parse an XML document from a URL or an InputSource."
source = saxutils.prepare_input_source(source)
self._source = source
self.reset()
self._cont_handler.setDocumentLocator(ExpatLocator(self))
xmlreader.IncrementalParser.parse(self, source)
def prepareParser(self, source):
if source.getSystemId() is not None:
self._parser.SetBase(source.getSystemId())
# Redefined setContentHandler to allow changing handlers during parsing
def setContentHandler(self, handler):
xmlreader.IncrementalParser.setContentHandler(self, handler)
if self._parsing:
self._reset_cont_handler()
def getFeature(self, name):
if name == feature_namespaces:
return self._namespaces
elif name == feature_string_interning:
return self._interning is not None
elif name in (feature_validation, feature_external_pes,
feature_namespace_prefixes):
return 0
elif name == feature_external_ges:
return self._external_ges
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
if self._parsing:
raise SAXNotSupportedException("Cannot set features while parsing")
if name == feature_namespaces:
self._namespaces = state
elif name == feature_external_ges:
self._external_ges = state
elif name == feature_string_interning:
if state:
if self._interning is None:
self._interning = {}
else:
self._interning = None
elif name == feature_validation:
if state:
raise SAXNotSupportedException(
"expat does not support validation")
elif name == feature_external_pes:
if state:
raise SAXNotSupportedException(
"expat does not read external parameter entities")
elif name == feature_namespace_prefixes:
if state:
raise SAXNotSupportedException(
"expat does not report namespace prefixes")
else:
raise SAXNotRecognizedException(
"Feature '%s' not recognized" % name)
def getProperty(self, name):
if name == handler.property_lexical_handler:
return self._lex_handler_prop
elif name == property_interning_dict:
return self._interning
elif name == property_xml_string:
if self._parser:
if hasattr(self._parser, "GetInputContext"):
return self._parser.GetInputContext()
else:
raise SAXNotRecognizedException(
"This version of expat does not support getting"
" the XML string")
else:
raise SAXNotSupportedException(
"XML string cannot be returned when not parsing")
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
if name == handler.property_lexical_handler:
self._lex_handler_prop = value
if self._parsing:
self._reset_lex_handler_prop()
elif name == property_interning_dict:
self._interning = value
elif name == property_xml_string:
raise SAXNotSupportedException("Property '%s' cannot be set" %
name)
else:
raise SAXNotRecognizedException("Property '%s' not recognized" %
name)
# IncrementalParser methods
def feed(self, data, isFinal = 0):
if not self._parsing:
self.reset()
self._parsing = 1
self._cont_handler.startDocument()
try:
# The isFinal parameter is internal to the expat reader.
# If it is set to true, expat will check validity of the entire
# document. When feeding chunks, they are not normally final -
# except when invoked from close.
self._parser.Parse(data, isFinal)
except expat.error as e:
exc = SAXParseException(expat.ErrorString(e.code), e, self)
# FIXME: when to invoke error()?
self._err_handler.fatalError(exc)
def close(self):
if self._entity_stack:
# If we are completing an external entity, do nothing here
return
self.feed("", isFinal = 1)
self._cont_handler.endDocument()
self._parsing = 0
# break cycle created by expat handlers pointing to our methods
self._parser = None
bs = self._source.getByteStream()
if bs is not None:
bs.close()
def _reset_cont_handler(self):
self._parser.ProcessingInstructionHandler = \
self._cont_handler.processingInstruction
self._parser.CharacterDataHandler = self._cont_handler.characters
def _reset_lex_handler_prop(self):
lex = self._lex_handler_prop
parser = self._parser
if lex is None:
parser.CommentHandler = None
parser.StartCdataSectionHandler = None
parser.EndCdataSectionHandler = None
parser.StartDoctypeDeclHandler = None
parser.EndDoctypeDeclHandler = None
else:
parser.CommentHandler = lex.comment
parser.StartCdataSectionHandler = lex.startCDATA
parser.EndCdataSectionHandler = lex.endCDATA
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EndDoctypeDeclHandler = lex.endDTD
def reset(self):
if self._namespaces:
self._parser = expat.ParserCreate(self._source.getEncoding(), " ",
intern=self._interning)
self._parser.namespace_prefixes = 1
self._parser.StartElementHandler = self.start_element_ns
self._parser.EndElementHandler = self.end_element_ns
else:
self._parser = expat.ParserCreate(self._source.getEncoding(),
intern = self._interning)
self._parser.StartElementHandler = self.start_element
self._parser.EndElementHandler = self.end_element
self._reset_cont_handler()
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
self._parser.NotationDeclHandler = self.notation_decl
self._parser.StartNamespaceDeclHandler = self.start_namespace_decl
self._parser.EndNamespaceDeclHandler = self.end_namespace_decl
self._decl_handler_prop = None
if self._lex_handler_prop:
self._reset_lex_handler_prop()
# self._parser.DefaultHandler =
# self._parser.DefaultHandlerExpand =
# self._parser.NotStandaloneHandler =
self._parser.ExternalEntityRefHandler = self.external_entity_ref
try:
self._parser.SkippedEntityHandler = self.skipped_entity_handler
except AttributeError:
# This pyexpat does not support SkippedEntity
pass
self._parser.SetParamEntityParsing(
expat.XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE)
self._parsing = 0
self._entity_stack = []
# Locator methods
def getColumnNumber(self):
if self._parser is None:
return None
return self._parser.ErrorColumnNumber
def getLineNumber(self):
if self._parser is None:
return 1
return self._parser.ErrorLineNumber
def getPublicId(self):
return self._source.getPublicId()
def getSystemId(self):
return self._source.getSystemId()
# event handlers
def start_element(self, name, attrs):
self._cont_handler.startElement(name, AttributesImpl(attrs))
def end_element(self, name):
self._cont_handler.endElement(name)
def start_element_ns(self, name, attrs):
pair = name.split()
if len(pair) == 1:
# no namespace
pair = (None, name)
elif len(pair) == 3:
pair = pair[0], pair[1]
else:
# default namespace
pair = tuple(pair)
newattrs = {}
qnames = {}
for (aname, value) in attrs.items():
parts = aname.split()
length = len(parts)
if length == 1:
# no namespace
qname = aname
apair = (None, aname)
elif length == 3:
qname = "%s:%s" % (parts[2], parts[1])
apair = parts[0], parts[1]
else:
# default namespace
qname = parts[1]
apair = tuple(parts)
newattrs[apair] = value
qnames[apair] = qname
self._cont_handler.startElementNS(pair, None,
AttributesNSImpl(newattrs, qnames))
def end_element_ns(self, name):
pair = name.split()
if len(pair) == 1:
pair = (None, name)
elif len(pair) == 3:
pair = pair[0], pair[1]
else:
pair = tuple(pair)
self._cont_handler.endElementNS(pair, None)
# this is not used (call directly to ContentHandler)
def processing_instruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
# this is not used (call directly to ContentHandler)
def character_data(self, data):
self._cont_handler.characters(data)
def start_namespace_decl(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def end_namespace_decl(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
self._lex_handler_prop.startDTD(name, pubid, sysid)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
self._dtd_handler.unparsedEntityDecl(name, pubid, sysid, notation_name)
def notation_decl(self, name, base, sysid, pubid):
self._dtd_handler.notationDecl(name, pubid, sysid)
def external_entity_ref(self, context, base, sysid, pubid):
if not self._external_ges:
return 1
source = self._ent_handler.resolveEntity(pubid, sysid)
source = saxutils.prepare_input_source(source,
self._source.getSystemId() or
"")
self._entity_stack.append((self._parser, self._source))
self._parser = self._parser.ExternalEntityParserCreate(context)
self._source = source
try:
xmlreader.IncrementalParser.parse(self, source)
except:
return 0 # FIXME: save error info here?
(self._parser, self._source) = self._entity_stack[-1]
del self._entity_stack[-1]
return 1
def skipped_entity_handler(self, name, is_pe):
if is_pe:
# The SAX spec requires to report skipped PEs with a '%'
name = '%'+name
self._cont_handler.skippedEntity(name)
# ---
def create_parser(*args, **kwargs):
return ExpatParser(*args, **kwargs)
# ---
if __name__ == "__main__":
import xml.sax.saxutils
p = create_parser()
p.setContentHandler(xml.sax.saxutils.XMLGenerator())
p.setErrorHandler(xml.sax.ErrorHandler())
p.parse("http://www.ibiblio.org/xml/examples/shakespeare/hamlet.xml")
|
gpl-3.0
|
RNAer/qiita
|
qiita_pet/handlers/study_handlers/listing_handlers.py
|
1
|
11653
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from json import dumps
from future.utils import viewitems
from collections import defaultdict
from tornado.web import authenticated, HTTPError
from tornado.gen import coroutine, Task
from pyparsing import ParseException
from qiita_db.user import User
from qiita_db.study import Study, StudyPerson
from qiita_db.search import QiitaStudySearch
from qiita_db.metadata_template import SampleTemplate
from qiita_db.logger import LogEntry
from qiita_db.exceptions import QiitaDBIncompatibleDatatypeError
from qiita_db.util import get_table_cols
from qiita_db.data import ProcessedData
from qiita_core.exceptions import IncompetentQiitaDeveloperError
from qiita_pet.handlers.base_handlers import BaseHandler
from qiita_pet.handlers.util import study_person_linkifier, pubmed_linkifier
def _get_shared_links_for_study(study):
shared = []
for person in study.shared_with:
person = User(person)
name = person.info['name']
email = person.email
# Name is optional, so default to email if non existant
if name:
shared.append(study_person_linkifier(
(email, name)))
else:
shared.append(study_person_linkifier(
(email, email)))
return ", ".join(shared)
def _build_single_study_info(study, info, study_proc, proc_samples):
"""Clean up and add to the study info for HTML purposes
Parameters
----------
study : Study object
The study to build information for
info : dict
Information from Study.get_info
study_proc : dict of dict of lists
Dictionary keyed on study_id that lists all processed data associated
with that study. This list of processed data ids is keyed by data type
proc_samples : dict of lists
Dictionary keyed on proc_data_id that lists all samples associated with
that processed data.
Returns
-------
dict
info-information + extra information for the study,
slightly HTML formatted
"""
PI = StudyPerson(info['principal_investigator_id'])
status = study.status
if info['pmid'] is not None:
info['pmid'] = ", ".join([pubmed_linkifier([p])
for p in info['pmid']])
else:
info['pmid'] = ""
if info["number_samples_collected"] is None:
info["number_samples_collected"] = 0
info["shared"] = _get_shared_links_for_study(study)
info["num_raw_data"] = len(study.raw_data())
info["status"] = status
info["study_id"] = study.id
info["pi"] = study_person_linkifier((PI.email, PI.name))
del info["principal_investigator_id"]
del info["email"]
# Build the proc data info list for the child row in datatable
info["proc_data_info"] = []
for data_type, proc_datas in viewitems(study_proc[study.id]):
info["proc_data_info"].extend([
_build_single_proc_data_info(pd_id, data_type, proc_samples[pd_id])
for pd_id in proc_datas])
return info
def _build_single_proc_data_info(proc_data_id, data_type, samples):
"""Build the proc data info list for the child row in datatable
Parameters
----------
proc_data_id : int
The processed data attached to he study, in the form
{study_id: [proc_data_id, proc_data_id, ...], ...}
data_type : str
Data type of the processed data
proc_samples : dict of lists
The samples available in the processed data, in the form
{proc_data_id: [samp1, samp2, ...], ...}
Returns
-------
dict
The information for the processed data, in the form {info: value, ...}
"""
proc_data = ProcessedData(proc_data_id)
proc_info = proc_data.processing_info
proc_info['pid'] = proc_data_id
proc_info['data_type'] = data_type
proc_info['samples'] = sorted(samples)
proc_info['processed_date'] = str(proc_info['processed_date'])
return proc_info
def _build_study_info(user, study_proc=None, proc_samples=None):
"""Builds list of dicts for studies table, with all HTML formatted
Parameters
----------
user : User object
logged in user
study_proc : dict of lists, optional
Dictionary keyed on study_id that lists all processed data associated
with that study. Required if proc_samples given.
proc_samples : dict of lists, optional
Dictionary keyed on proc_data_id that lists all samples associated with
that processed data. Required if study_proc given.
Returns
-------
infolist: list of dict of lists and dicts
study and processed data info for JSON serialiation for datatables
Each dict in the list is a single study, and contains the text
Notes
-----
Both study_proc and proc_samples must be passed, or neither passed.
"""
build_samples = False
# Logic check to make sure both needed parts passed
if study_proc is not None and proc_samples is None:
raise IncompetentQiitaDeveloperError(
'Must pass proc_samples when study_proc given')
elif proc_samples is not None and study_proc is None:
raise IncompetentQiitaDeveloperError(
'Must pass study_proc when proc_samples given')
elif study_proc is None:
build_samples = True
# get list of studies for table
study_set = user.user_studies.union(
Study.get_by_status('public')).union(user.shared_studies)
if study_proc is not None:
study_set = study_set.intersection(study_proc)
if not study_set:
# No studies left so no need to continue
return []
# get info for the studies
cols = ['study_id', 'email', 'principal_investigator_id',
'pmid', 'study_title', 'metadata_complete',
'number_samples_collected', 'study_abstract']
study_info = Study.get_info(study_set, cols)
infolist = []
for info in study_info:
# Convert DictCursor to proper dict
info = dict(info)
study = Study(info['study_id'])
# Build the processed data info for the study if none passed
if build_samples:
proc_data_list = study.processed_data()
proc_samples = {}
study_proc = {study.id: defaultdict(list)}
for pid in proc_data_list:
proc_data = ProcessedData(pid)
study_proc[study.id][proc_data.data_type()].append(pid)
proc_samples[pid] = proc_data.samples
study_info = _build_single_study_info(study, info, study_proc,
proc_samples)
infolist.append(study_info)
return infolist
def _check_owner(user, study):
"""make sure user is the owner of the study requested"""
if not user.id == study.owner:
raise HTTPError(403, "User %s does not own study %d" %
(user.id, study.id))
class ListStudiesHandler(BaseHandler):
@authenticated
@coroutine
def get(self, message="", msg_level=None):
all_emails_except_current = yield Task(self._get_all_emails)
all_emails_except_current.remove(self.current_user.id)
avail_meta = SampleTemplate.metadata_headers() +\
get_table_cols("study")
self.render('list_studies.html',
availmeta=avail_meta,
all_emails_except_current=all_emails_except_current,
message=message,
msg_level=msg_level)
def _get_all_emails(self, callback):
callback(list(User.iter()))
class StudyApprovalList(BaseHandler):
@authenticated
def get(self):
user = self.current_user
if user.level != 'admin':
raise HTTPError(403, 'User %s is not admin' % self.current_user)
result_generator = viewitems(
ProcessedData.get_by_status_grouped_by_study('awaiting_approval'))
study_generator = ((Study(sid), pds) for sid, pds in result_generator)
parsed_studies = [(s.id, s.title, s.owner, pds)
for s, pds in study_generator]
self.render('admin_approval.html',
study_info=parsed_studies)
class ShareStudyAJAX(BaseHandler):
def _get_shared_for_study(self, study, callback):
shared_links = _get_shared_links_for_study(study)
users = study.shared_with
callback((users, shared_links))
def _share(self, study, user, callback):
user = User(user)
callback(study.share(user))
def _unshare(self, study, user, callback):
user = User(user)
callback(study.unshare(user))
@authenticated
@coroutine
def get(self):
study_id = int(self.get_argument('study_id'))
study = Study(study_id)
_check_owner(self.current_user, study)
selected = self.get_argument('selected', None)
deselected = self.get_argument('deselected', None)
if selected is not None:
yield Task(self._share, study, selected)
if deselected is not None:
yield Task(self._unshare, study, deselected)
users, links = yield Task(self._get_shared_for_study, study)
self.write(dumps({'users': users, 'links': links}))
class SearchStudiesAJAX(BaseHandler):
@authenticated
def get(self, ignore):
user = self.get_argument('user')
query = self.get_argument('query')
echo = int(self.get_argument('sEcho'))
if user != self.current_user.id:
raise HTTPError(403, 'Unauthorized search!')
if query:
# Search for samples matching the query
search = QiitaStudySearch()
try:
search(query, self.current_user)
study_proc, proc_samples, _ = search.filter_by_processed_data()
except ParseException:
self.clear()
self.set_status(400)
self.write('Malformed search query. Please read "search help" '
'and try again.')
return
except QiitaDBIncompatibleDatatypeError as e:
self.clear()
self.set_status(400)
searchmsg = ''.join(e)
self.write(searchmsg)
return
except Exception as e:
# catch any other error as generic server error
self.clear()
self.set_status(500)
self.write("Server error during search. Please try again "
"later")
LogEntry.create('Runtime', str(e),
info={'User': self.current_user.id,
'query': query})
return
else:
study_proc = proc_samples = None
info = _build_study_info(self.current_user, study_proc=study_proc,
proc_samples=proc_samples)
# build the table json
results = {
"sEcho": echo,
"iTotalRecords": len(info),
"iTotalDisplayRecords": len(info),
"aaData": info
}
# return the json in compact form to save transmit size
self.write(dumps(results, separators=(',', ':')))
|
bsd-3-clause
|
maxwell-demon/grpc
|
src/python/grpcio/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py
|
2
|
18353
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test code for the Face layer of RPC Framework."""
import abc
import contextlib
import threading
import unittest
# test_interfaces is referenced from specification in this module.
from grpc.framework.foundation import logging_pool
from grpc.framework.interfaces.face import face
from tests.unit.framework.common import test_constants
from tests.unit.framework.common import test_control
from tests.unit.framework.common import test_coverage
from tests.unit.framework.interfaces.face import _3069_test_constant
from tests.unit.framework.interfaces.face import _digest
from tests.unit.framework.interfaces.face import _stock_service
from tests.unit.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
class _PauseableIterator(object):
def __init__(self, upstream):
self._upstream = upstream
self._condition = threading.Condition()
self._paused = False
@contextlib.contextmanager
def pause(self):
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
def __iter__(self):
return self
def next(self):
with self._condition:
while self._paused:
self._condition.wait()
return next(self._upstream)
class _Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._called = False
self._passed_future = None
self._passed_other_stuff = None
def __call__(self, *args, **kwargs):
with self._condition:
self._called = True
if args:
self._passed_future = args[0]
if 1 < len(args) or kwargs:
self._passed_other_stuff = tuple(args[1:]), dict(kwargs)
self._condition.notify_all()
def future(self):
with self._condition:
while True:
if self._passed_other_stuff is not None:
raise ValueError(
'Test callback passed unexpected values: %s',
self._passed_other_stuff)
elif self._called:
return self._passed_future
else:
self._condition.wait()
class TestCase(test_coverage.Coverage, unittest.TestCase):
"""A test of the Face layer of RPC Framework.
Concrete subclasses must have an "implementation" attribute of type
test_interfaces.Implementation and an "invoker_constructor" attribute of type
_invocation.InvokerConstructor.
"""
__metaclass__ = abc.ABCMeta
NAME = 'FutureInvocationAsynchronousEventServiceTest'
def setUp(self):
"""See unittest.TestCase.setUp for full specification.
Overriding implementations must call this implementation.
"""
self._control = test_control.PauseFailControl()
self._digest_pool = logging_pool.pool(test_constants.POOL_SIZE)
self._digest = _digest.digest(
_stock_service.STOCK_TEST_SERVICE, self._control, self._digest_pool)
generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
self._digest.methods, self._digest.event_method_implementations, None)
self._invoker = self.invoker_constructor.construct_invoker(
generic_stub, dynamic_stubs, self._digest.methods)
def tearDown(self):
"""See unittest.TestCase.tearDown for full specification.
Overriding implementations must call this implementation.
"""
self._invoker = None
self.implementation.destantiate(self._memo)
self._digest_pool.shutdown(wait=True)
def testSuccessfulUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
response = response_future.result()
test_messages.verify(request, response, self)
self.assertIs(callback.future(), response_future)
def testSuccessfulUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
response_iterator = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(request, responses, self)
def testSuccessfulStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
request_iterator = _PauseableIterator(iter(requests))
callback = _Callback()
# Use of a paused iterator of requests allows us to test that control is
# returned to calling code before the iterator yields any requests.
with request_iterator.pause():
response_future = self._invoker.future(group, method)(
request_iterator, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
future_passed_to_callback = callback.future()
response = future_passed_to_callback.result()
test_messages.verify(requests, response, self)
self.assertIs(future_passed_to_callback, response_future)
def testSuccessfulStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
request_iterator = _PauseableIterator(iter(requests))
# Use of a paused iterator of requests allows us to test that control is
# returned to calling code before the iterator yields any requests.
with request_iterator.pause():
response_iterator = self._invoker.future(group, method)(
request_iterator, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(requests, responses, self)
def testSequentialInvocations(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response_future = self._invoker.future(group, method)(
first_request, test_constants.LONG_TIMEOUT)
first_response = first_response_future.result()
test_messages.verify(first_request, first_response, self)
second_response_future = self._invoker.future(group, method)(
second_request, test_constants.LONG_TIMEOUT)
second_response = second_response_future.result()
test_messages.verify(second_request, second_response, self)
def testParallelInvocations(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response_future = self._invoker.future(group, method)(
first_request, test_constants.LONG_TIMEOUT)
second_response_future = self._invoker.future(group, method)(
second_request, test_constants.LONG_TIMEOUT)
first_response = first_response_future.result()
second_response = second_response_future.result()
test_messages.verify(first_request, first_response, self)
test_messages.verify(second_request, second_response, self)
@unittest.skip('TODO(nathaniel): implement.')
def testWaitingForSomeButNotAllParallelInvocations(self):
raise NotImplementedError()
def testCancelledUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
cancel_method_return_value = response_future.cancel()
self.assertIs(callback.future(), response_future)
self.assertFalse(cancel_method_return_value)
self.assertTrue(response_future.cancelled())
def testCancelledUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_iterator.cancel()
with self.assertRaises(face.CancellationError):
next(response_iterator)
def testCancelledStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
cancel_method_return_value = response_future.cancel()
self.assertIs(callback.future(), response_future)
self.assertFalse(cancel_method_return_value)
self.assertTrue(response_future.cancelled())
def testCancelledStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
response_iterator.cancel()
with self.assertRaises(face.CancellationError):
next(response_iterator)
def testExpiredUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(
group, method)(request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testExpiredUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(response_iterator)
def testExpiredStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testExpiredStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(response_iterator)
def testFailedUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
with self._control.fail():
response_future = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is
# indistinguishable from simply not having called its
# response_callback before the expiration of the RPC.
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testFailedUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is indistinguishable
# from simply not having called its response_consumer before the
# expiration of the RPC.
with self._control.fail(), self.assertRaises(face.ExpirationError):
response_iterator = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
list(response_iterator)
def testFailedStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
with self._control.fail():
response_future = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is
# indistinguishable from simply not having called its
# response_callback before the expiration of the RPC.
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testFailedStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is indistinguishable
# from simply not having called its response_consumer before the
# expiration of the RPC.
with self._control.fail(), self.assertRaises(face.ExpirationError):
response_iterator = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
list(response_iterator)
|
bsd-3-clause
|
storborg/livetest
|
ez_setup.py
|
358
|
9716
|
#!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c9"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
|
mit
|
javier3407/Plugin.Video.ElJavi.tv
|
resources/lib/chardet/sjisprober.py
|
1182
|
3734
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "SHIFT_JIS"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
gpl-3.0
|
minhlongdo/scipy
|
scipy/ndimage/tests/test_regression.py
|
123
|
1429
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal, run_module_suite
import scipy.ndimage as ndimage
def test_byte_order_median():
"""Regression test for #413: median_filter does not handle bytes orders."""
a = np.arange(9, dtype='<f4').reshape(3, 3)
ref = ndimage.filters.median_filter(a,(3, 3))
b = np.arange(9, dtype='>f4').reshape(3, 3)
t = ndimage.filters.median_filter(b, (3, 3))
assert_array_almost_equal(ref, t)
def test_zoom_output_shape():
"""Ticket #643"""
x = np.arange(12).reshape((3,4))
ndimage.zoom(x, 2, output=np.zeros((6,8)))
def test_ticket_742():
def SE(img, thresh=.7, size=4):
mask = img > thresh
rank = len(mask.shape)
la, co = ndimage.label(mask,
ndimage.generate_binary_structure(rank, rank))
slices = ndimage.find_objects(la)
if np.dtype(np.intp) != np.dtype('i'):
shape = (3,1240,1240)
a = np.random.rand(np.product(shape)).reshape(shape)
# shouldn't crash
SE(a)
def test_gh_issue_3025():
"""Github issue #3025 - improper merging of labels"""
d = np.zeros((60,320))
d[:,:257] = 1
d[:,260:] = 1
d[36,257] = 1
d[35,258] = 1
d[35,259] = 1
assert ndimage.label(d, np.ones((3,3)))[1] == 1
if __name__ == "__main__":
run_module_suite()
|
bsd-3-clause
|
HyperBaton/ansible
|
lib/ansible/modules/network/check_point/cp_mgmt_administrator_facts.py
|
20
|
3990
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage Check Point Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cp_mgmt_administrator_facts
short_description: Get administrator objects facts on Check Point over Web Services API
description:
- Get administrator objects facts on Check Point devices.
- All operations are performed over Web Services API.
- This module handles both operations, get a specific object and get several objects,
For getting a specific object use the parameter 'name'.
version_added: "2.9"
author: "Or Soffer (@chkp-orso)"
options:
name:
description:
- Object name.
This parameter is relevant only for getting a specific object.
type: str
details_level:
description:
- The level of detail for some of the fields in the response can vary from showing only the UID value of the object to a fully detailed
representation of the object.
type: str
choices: ['uid', 'standard', 'full']
limit:
description:
- No more than that many results will be returned.
This parameter is relevant only for getting few objects.
type: int
offset:
description:
- Skip that many results before beginning to return them.
This parameter is relevant only for getting few objects.
type: int
order:
description:
- Sorts results by the given field. By default the results are sorted in the ascending order by name.
This parameter is relevant only for getting few objects.
type: list
suboptions:
ASC:
description:
- Sorts results by the given field in ascending order.
type: str
choices: ['name']
DESC:
description:
- Sorts results by the given field in descending order.
type: str
choices: ['name']
extends_documentation_fragment: checkpoint_facts
"""
EXAMPLES = """
- name: show-administrator
cp_mgmt_administrator_facts:
name: admin
- name: show-administrators
cp_mgmt_administrator_facts:
details_level: standard
limit: 50
offset: 0
"""
RETURN = """
ansible_facts:
description: The checkpoint object facts.
returned: always.
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec_for_facts, api_call_facts
def main():
argument_spec = dict(
name=dict(type='str'),
details_level=dict(type='str', choices=['uid', 'standard', 'full']),
limit=dict(type='int'),
offset=dict(type='int'),
order=dict(type='list', options=dict(
ASC=dict(type='str', choices=['name']),
DESC=dict(type='str', choices=['name'])
))
)
argument_spec.update(checkpoint_argument_spec_for_facts)
module = AnsibleModule(argument_spec=argument_spec)
api_call_object = "administrator"
api_call_object_plural_version = "administrators"
result = api_call_facts(module, api_call_object, api_call_object_plural_version)
module.exit_json(ansible_facts=result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
javiermarcon/gui2py
|
gui/controls/button.py
|
14
|
3397
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"gui2py's Button control (uses wx.Button and wx.BitmapButton)"
__author__ = "Mariano Reingart ([email protected])"
__copyright__ = "Copyright (C) 2013- Mariano Reingart" # where applicable
# Initial implementation was based on PythonCard's Button component,
# but redesigned and overhauled a lot (specs renamed, events refactorized, etc.)
import wx
from ..event import FormEvent
from ..component import Control, Spec, EventSpec, InitSpec, StyleSpec
from .image import Image
from .. import images
class Button(Image):
"A simple push-button with a label (or image)"
_wx_class = wx.Button
_image = images.button
def __init__(self, parent=None, **kwargs):
if 'filename' in kwargs and kwargs['filename']:
self._wx_class = wx_BitmapButton
kwargs['label'] = ''
##if 'border' in kwargs and kwargs['border'] == 'default':
## kwargs['border'] = 'none'
## kwargs['auto_redraw'] = True # Windows specific ?!
# TODO: refactor for Disabled, Focus, Hover, Selected bitmap support
# Use the common image contructor (TODO: ImageMixin!)
Image.__init__(self, parent, **kwargs)
def _getDefault(self):
#return self == self._parent.GetDefaultItem()
# KEA 2002-03-26
# for some reason wxDialog doesn't have a
# GetDefaultItem and SetDefaultItem
return self._default
def _setDefault(self, aBoolean):
self._default = aBoolean
if aBoolean:
self.wx_obj.SetDefault()
default = Spec(_getDefault, _setDefault, default=False, type="boolean")
label = InitSpec(lambda self: self.wx_obj.GetLabel(),
lambda self, label: self.wx_obj.SetLabel(label),
optional=False, default='Button', type="string",
doc="text to show as caption")
auto_redraw = StyleSpec(wx.BU_AUTODRAW, default=False,
doc="drawn automatically using bitmap only, providing a 3D-look border")
exact_fit = StyleSpec(wx.BU_EXACTFIT, default=False,
doc="small as possible instead of standard size (which is the default)")
onclick = EventSpec('click', binding=wx.EVT_BUTTON, kind=FormEvent)
class wx_BitmapButton(wx.BitmapButton):
def __init__(self, *args, **kwargs):
# remove label as for bitmap button, it is a image (bitmap)
if 'label' in kwargs:
del kwargs['label']
wx.BitmapButton.__init__(self, *args, **kwargs)
# WORKAROUND: 2.8 has no SetBitmap:
if wx.VERSION < (2, 9):
def SetBitmap(self, bitmap):
self.SetBitmapLabel(bitmap)
if __name__ == "__main__":
# basic test until unit_test
app = wx.App(redirect=False)
frame = wx.Frame(None)
b = Button(frame, name="btnTest", label="click me!", default=True)
assert b.get_parent() is frame
assert b.name == "btnTest"
assert b.default == True
assert b.label == "click me!"
from pprint import pprint
# assign some event handlers:
b.onclick = "print event.timestamp; event.prevent_default()"
b.onblur = b.onfocus = lambda event: pprint(event.name)
# remove an event handler:
b.onblur = None
frame.Show()
app.MainLoop()
|
lgpl-3.0
|
gqwest-erp/server
|
openerp/addons/base/ir/ir_fields.py
|
13
|
18163
|
# -*- coding: utf-8 -*-
import datetime
import functools
import operator
import itertools
import time
import psycopg2
import pytz
from openerp.osv import orm
from openerp.tools.translate import _
from openerp.tools.misc import DEFAULT_SERVER_DATE_FORMAT,\
DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools import html_sanitize
REFERENCING_FIELDS = set([None, 'id', '.id'])
def only_ref_fields(record):
return dict((k, v) for k, v in record.iteritems()
if k in REFERENCING_FIELDS)
def exclude_ref_fields(record):
return dict((k, v) for k, v in record.iteritems()
if k not in REFERENCING_FIELDS)
CREATE = lambda values: (0, False, values)
UPDATE = lambda id, values: (1, id, values)
DELETE = lambda id: (2, id, False)
FORGET = lambda id: (3, id, False)
LINK_TO = lambda id: (4, id, False)
DELETE_ALL = lambda: (5, False, False)
REPLACE_WITH = lambda ids: (6, False, ids)
class ConversionNotFound(ValueError): pass
class ColumnWrapper(object):
def __init__(self, column, cr, uid, pool, fromtype, context=None):
self._converter = None
self._column = column
if column._obj:
self._pool = pool
self._converter_args = {
'cr': cr,
'uid': uid,
'model': pool[column._obj],
'fromtype': fromtype,
'context': context
}
@property
def converter(self):
if not self._converter:
self._converter = self._pool['ir.fields.converter'].for_model(
**self._converter_args)
return self._converter
def __getattr__(self, item):
return getattr(self._column, item)
class ir_fields_converter(orm.Model):
_name = 'ir.fields.converter'
def for_model(self, cr, uid, model, fromtype=str, context=None):
""" Returns a converter object for the model. A converter is a
callable taking a record-ish (a dictionary representing an openerp
record with values of typetag ``fromtype``) and returning a converted
records matching what :meth:`openerp.osv.orm.Model.write` expects.
:param model: :class:`openerp.osv.orm.Model` for the conversion base
:returns: a converter callable
:rtype: (record: dict, logger: (field, error) -> None) -> dict
"""
columns = dict(
(k, ColumnWrapper(v.column, cr, uid, self.pool, fromtype, context))
for k, v in model._all_columns.iteritems())
converters = dict(
(k, self.to_field(cr, uid, model, column, fromtype, context))
for k, column in columns.iteritems())
def fn(record, log):
converted = {}
for field, value in record.iteritems():
if field in (None, 'id', '.id'): continue
if not value:
converted[field] = False
continue
try:
converted[field], ws = converters[field](value)
for w in ws:
if isinstance(w, basestring):
# wrap warning string in an ImportWarning for
# uniform handling
w = ImportWarning(w)
log(field, w)
except ValueError, e:
log(field, e)
return converted
return fn
def to_field(self, cr, uid, model, column, fromtype=str, context=None):
""" Fetches a converter for the provided column object, from the
specified type.
A converter is simply a callable taking a value of type ``fromtype``
(or a composite of ``fromtype``, e.g. list or dict) and returning a
value acceptable for a write() on the column ``column``.
By default, tries to get a method on itself with a name matching the
pattern ``_$fromtype_to_$column._type`` and returns it.
Converter callables can either return a value and a list of warnings
to their caller or raise ``ValueError``, which will be interpreted as a
validation & conversion failure.
ValueError can have either one or two parameters. The first parameter
is mandatory, **must** be a unicode string and will be used as the
user-visible message for the error (it should be translatable and
translated). It can contain a ``field`` named format placeholder so the
caller can inject the field's translated, user-facing name (@string).
The second parameter is optional and, if provided, must be a mapping.
This mapping will be merged into the error dictionary returned to the
client.
If a converter can perform its function but has to make assumptions
about the data, it can send a warning to the user through adding an
instance of :class:`~openerp.osv.orm.ImportWarning` to the second value
it returns. The handling of a warning at the upper levels is the same
as ``ValueError`` above.
:param column: column object to generate a value for
:type column: :class:`fields._column`
:param type fromtype: type to convert to something fitting for ``column``
:param context: openerp request context
:return: a function (fromtype -> column.write_type), if a converter is found
:rtype: Callable | None
"""
# FIXME: return None
converter = getattr(
self, '_%s_to_%s' % (fromtype.__name__, column._type), None)
if not converter: return None
return functools.partial(
converter, cr, uid, model, column, context=context)
def _str_to_boolean(self, cr, uid, model, column, value, context=None):
# all translatables used for booleans
true, yes, false, no = _(u"true"), _(u"yes"), _(u"false"), _(u"no")
# potentially broken casefolding? What about locales?
trues = set(word.lower() for word in itertools.chain(
[u'1', u"true", u"yes"], # don't use potentially translated values
self._get_translations(cr, uid, ['code'], u"true", context=context),
self._get_translations(cr, uid, ['code'], u"yes", context=context),
))
if value.lower() in trues: return True, []
# potentially broken casefolding? What about locales?
falses = set(word.lower() for word in itertools.chain(
[u'', u"0", u"false", u"no"],
self._get_translations(cr, uid, ['code'], u"false", context=context),
self._get_translations(cr, uid, ['code'], u"no", context=context),
))
if value.lower() in falses: return False, []
return True, [orm.ImportWarning(
_(u"Unknown value '%s' for boolean field '%%(field)s', assuming '%s'")
% (value, yes), {
'moreinfo': _(u"Use '1' for yes and '0' for no")
})]
def _str_to_integer(self, cr, uid, model, column, value, context=None):
try:
return int(value), []
except ValueError:
raise ValueError(
_(u"'%s' does not seem to be an integer for field '%%(field)s'")
% value)
def _str_to_float(self, cr, uid, model, column, value, context=None):
try:
return float(value), []
except ValueError:
raise ValueError(
_(u"'%s' does not seem to be a number for field '%%(field)s'")
% value)
def _str_id(self, cr, uid, model, column, value, context=None):
return value, []
_str_to_reference = _str_to_char = _str_to_text = _str_to_binary = _str_to_html = _str_id
def _str_to_date(self, cr, uid, model, column, value, context=None):
try:
time.strptime(value, DEFAULT_SERVER_DATE_FORMAT)
return value, []
except ValueError:
raise ValueError(
_(u"'%s' does not seem to be a valid date for field '%%(field)s'") % value, {
'moreinfo': _(u"Use the format '%s'") % u"2012-12-31"
})
def _input_tz(self, cr, uid, context):
# if there's a tz in context, try to use that
if context.get('tz'):
try:
return pytz.timezone(context['tz'])
except pytz.UnknownTimeZoneError:
pass
# if the current user has a tz set, try to use that
user = self.pool['res.users'].read(
cr, uid, [uid], ['tz'], context=context)[0]
if user['tz']:
try:
return pytz.timezone(user['tz'])
except pytz.UnknownTimeZoneError:
pass
# fallback if no tz in context or on user: UTC
return pytz.UTC
def _str_to_datetime(self, cr, uid, model, column, value, context=None):
if context is None: context = {}
try:
parsed_value = datetime.datetime.strptime(
value, DEFAULT_SERVER_DATETIME_FORMAT)
except ValueError:
raise ValueError(
_(u"'%s' does not seem to be a valid datetime for field '%%(field)s'") % value, {
'moreinfo': _(u"Use the format '%s'") % u"2012-12-31 23:59:59"
})
input_tz = self._input_tz(cr, uid, context)# Apply input tz to the parsed naive datetime
dt = input_tz.localize(parsed_value, is_dst=False)
# And convert to UTC before reformatting for writing
return dt.astimezone(pytz.UTC).strftime(DEFAULT_SERVER_DATETIME_FORMAT), []
def _get_translations(self, cr, uid, types, src, context):
types = tuple(types)
# Cache translations so they don't have to be reloaded from scratch on
# every row of the file
tnx_cache = cr.cache.setdefault(self._name, {})
if tnx_cache.setdefault(types, {}) and src in tnx_cache[types]:
return tnx_cache[types][src]
Translations = self.pool['ir.translation']
tnx_ids = Translations.search(
cr, uid, [('type', 'in', types), ('src', '=', src)], context=context)
tnx = Translations.read(cr, uid, tnx_ids, ['value'], context=context)
result = tnx_cache[types][src] = map(operator.itemgetter('value'), tnx)
return result
def _str_to_selection(self, cr, uid, model, column, value, context=None):
selection = column.selection
if not isinstance(selection, (tuple, list)):
# FIXME: Don't pass context to avoid translations?
# Or just copy context & remove lang?
selection = selection(model, cr, uid, context=None)
for item, label in selection:
labels = self._get_translations(
cr, uid, ('selection', 'model', 'code'), label, context=context)
labels.append(label)
if value == unicode(item) or value in labels:
return item, []
raise ValueError(
_(u"Value '%s' not found in selection field '%%(field)s'") % (
value), {
'moreinfo': [label or unicode(item) for item, label in selection
if label or item]
})
def db_id_for(self, cr, uid, model, column, subfield, value, context=None):
""" Finds a database id for the reference ``value`` in the referencing
subfield ``subfield`` of the provided column of the provided model.
:param model: model to which the column belongs
:param column: relational column for which references are provided
:param subfield: a relational subfield allowing building of refs to
existing records: ``None`` for a name_get/name_search,
``id`` for an external id and ``.id`` for a database
id
:param value: value of the reference to match to an actual record
:param context: OpenERP request context
:return: a pair of the matched database identifier (if any), the
translated user-readable name for the field and the list of
warnings
:rtype: (ID|None, unicode, list)
"""
if context is None: context = {}
id = None
warnings = []
action = {'type': 'ir.actions.act_window', 'target': 'new',
'view_mode': 'tree,form', 'view_type': 'form',
'views': [(False, 'tree'), (False, 'form')],
'help': _(u"See all possible values")}
if subfield is None:
action['res_model'] = column._obj
elif subfield in ('id', '.id'):
action['res_model'] = 'ir.model.data'
action['domain'] = [('model', '=', column._obj)]
RelatedModel = self.pool[column._obj]
if subfield == '.id':
field_type = _(u"database id")
try: tentative_id = int(value)
except ValueError: tentative_id = value
try:
if RelatedModel.search(cr, uid, [('id', '=', tentative_id)],
context=context):
id = tentative_id
except psycopg2.DataError:
# type error
raise ValueError(
_(u"Invalid database id '%s' for the field '%%(field)s'") % value,
{'moreinfo': action})
elif subfield == 'id':
field_type = _(u"external id")
if '.' in value:
module, xid = value.split('.', 1)
else:
module, xid = context.get('_import_current_module', ''), value
ModelData = self.pool['ir.model.data']
try:
_model, id = ModelData.get_object_reference(
cr, uid, module, xid)
except ValueError: pass # leave id is None
elif subfield is None:
field_type = _(u"name")
ids = RelatedModel.name_search(
cr, uid, name=value, operator='=', context=context)
if ids:
if len(ids) > 1:
warnings.append(orm.ImportWarning(
_(u"Found multiple matches for field '%%(field)s' (%d matches)")
% (len(ids))))
id, _name = ids[0]
else:
raise Exception(_(u"Unknown sub-field '%s'") % subfield)
if id is None:
raise ValueError(
_(u"No matching record found for %(field_type)s '%(value)s' in field '%%(field)s'")
% {'field_type': field_type, 'value': value},
{'moreinfo': action})
return id, field_type, warnings
def _referencing_subfield(self, record):
""" Checks the record for the subfields allowing referencing (an
existing record in an other table), errors out if it finds potential
conflicts (multiple referencing subfields) or non-referencing subfields
returns the name of the correct subfield.
:param record:
:return: the record subfield to use for referencing and a list of warnings
:rtype: str, list
"""
# Can import by name_get, external id or database id
fieldset = set(record.iterkeys())
if fieldset - REFERENCING_FIELDS:
raise ValueError(
_(u"Can not create Many-To-One records indirectly, import the field separately"))
if len(fieldset) > 1:
raise ValueError(
_(u"Ambiguous specification for field '%(field)s', only provide one of name, external id or database id"))
# only one field left possible, unpack
[subfield] = fieldset
return subfield, []
def _str_to_many2one(self, cr, uid, model, column, values, context=None):
# Should only be one record, unpack
[record] = values
subfield, w1 = self._referencing_subfield(record)
reference = record[subfield]
id, subfield_type, w2 = self.db_id_for(
cr, uid, model, column, subfield, reference, context=context)
return id, w1 + w2
def _str_to_many2many(self, cr, uid, model, column, value, context=None):
[record] = value
subfield, warnings = self._referencing_subfield(record)
ids = []
for reference in record[subfield].split(','):
id, subfield_type, ws = self.db_id_for(
cr, uid, model, column, subfield, reference, context=context)
ids.append(id)
warnings.extend(ws)
return [REPLACE_WITH(ids)], warnings
def _str_to_one2many(self, cr, uid, model, column, records, context=None):
commands = []
warnings = []
if len(records) == 1 and exclude_ref_fields(records[0]) == {}:
# only one row with only ref field, field=ref1,ref2,ref3 as in
# m2o/m2m
record = records[0]
subfield, ws = self._referencing_subfield(record)
warnings.extend(ws)
# transform [{subfield:ref1,ref2,ref3}] into
# [{subfield:ref1},{subfield:ref2},{subfield:ref3}]
records = ({subfield:item} for item in record[subfield].split(','))
def log(_, e):
if not isinstance(e, Warning):
raise e
warnings.append(e)
for record in records:
id = None
refs = only_ref_fields(record)
# there are ref fields in the record
if refs:
subfield, w1 = self._referencing_subfield(refs)
warnings.extend(w1)
reference = record[subfield]
id, subfield_type, w2 = self.db_id_for(
cr, uid, model, column, subfield, reference, context=context)
warnings.extend(w2)
writable = column.converter(exclude_ref_fields(record), log)
if id:
commands.append(LINK_TO(id))
commands.append(UPDATE(id, writable))
else:
commands.append(CREATE(writable))
return commands, warnings
|
agpl-3.0
|
jblomer/ramcloud
|
scripts/cluster.py
|
1
|
35327
|
#!/usr/bin/env python
# Copyright (c) 2010-2016 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Runs a RAMCloud.
Used to exercise a RAMCloud cluster (e.g., for performance measurements)
by running a collection of servers and clients.
"""
from __future__ import division, print_function
from common import *
import config
import itertools
import log
import os
import random
import pprint
import re
import subprocess
import sys
import time
from optparse import OptionParser
# Locations of various RAMCloud executables.
coordinator_binary = '%s/coordinator' % config.hooks.get_remote_obj_path()
server_binary = '%s/server' % config.hooks.get_remote_obj_path()
ensure_servers_bin = '%s/apps/ensureServers' % config.hooks.get_remote_obj_path()
# valgrind
valgrind_command = ''
# Info used to construct service locators for each of the transports
# supported by RAMCloud. In some cases the locator for the coordinator
# needs to be different from that for the servers.
server_locator_templates = {
'tcp': 'tcp:host=%(host)s,port=%(port)d',
'tcp-1g': 'tcp:host=%(host1g)s,port=%(port)d',
'basic+udp': 'basic+udp:host=%(host)s,port=%(port)d',
'basic+udp-1g': 'basic+udp:host=%(host1g)s,port=%(port)d',
'unreliable+udp': 'unreliable+udp:host=%(host)s,port=%(port)d',
'infrc': 'infrc:host=%(host)s,port=%(port)d',
'basic+infud': 'basic+infud:host=%(host1g)s',
'unreliable+infud': 'unreliable+infud:host=%(host1g)s',
'unreliable+infeth': 'unreliable+infeth:mac=00:11:22:33:44:%(id)02x',
'basic+dpdk': 'basic+dpdk:',
}
coord_locator_templates = {
'tcp': 'tcp:host=%(host)s,port=%(port)d',
'tcp-1g': 'tcp:host=%(host1g)s,port=%(port)d',
'basic+udp': 'basic+udp:host=%(host)s,port=%(port)d',
'basic+udp-1g': 'basic+udp:host=%(host1g)s,port=%(port)d',
'unreliable+udp': 'unreliable+udp:host=%(host)s,port=%(port)d',
'infrc': 'infrc:host=%(host)s,port=%(port)d',
# Coordinator uses udp even when rest of cluster uses infud
# or infeth.
'basic+infud': 'basic+udp:host=%(host)s,port=%(port)d',
'basic+dpdk': 'basic+udp:host=%(host)s,port=%(port)d',
}
def server_locator(transport, host, port=server_port):
"""Generate a service locator for a master/backup process.
@param transport: A transport name (e.g. infrc, basic+infud, tcp, ...)
@type transport: C{str}
@param host: A 3-tuple of (hostname, ip, id).
@type host: C{(str, str, int)}
@param port: Port which should be part of the locator (if any).
Allows multiple services to be started on the same host.
@type port: C{int}
@return: A service locator.
@rtype: C{str}
"""
locator = (server_locator_templates[transport] %
{'host': host[1],
'host1g': host[0],
'port': port,
'id': host[2]})
return locator
def coord_locator(transport, host):
"""Generate a service locator for a coordinator process.
@param transport: A transport name (e.g. infrc, basic+infud, tcp, ...)
@type transport: C{str}
@param host: A 3-tuple of (hostname, ip, id).
@type host: C{(str, str, int)}
@return: A service locator.
@rtype: C{str}
"""
locator = (coord_locator_templates[transport] %
{'host': host[1],
'host1g': host[0],
'port': coordinator_port,
'id': host[2]})
return locator
class Cluster(object):
"""Helper context manager for scripting and coordinating RAMCloud on a
cluster. Useful for configuring and running experiments. See run() for
a simpler interface useful for running single-shot experiments where the
cluster configuration is (mostly) static throughout the experiment.
=== Configuration/Defaults ===
The fields below control various aspects of the run as well as some
of the defaults that are used when creating processes in the cluster.
Most users of this class will want to override some of these after
an instance of Cluster is created but before any operations are
performed with it.
@ivar log_level: Log level to use for spawned servers. (default: NOTICE)
@ivar verbose: If True then print progress of starting clients/servers.
(default: False)
@ivar transport: Transport name to use for servers
(see server_locator_templates) (default: basic+infud).
@ivar replicas: Replication factor to use for each log segment. (default: 3)
@ivar disk: Server args for specifying the storage device to use for
backups (default: default_disk1 taken from {,local}config.py).
@ivar disjunct: Disjunct (not collocate) entities on each server.
=== Other Stuff ===
@ivar coordinator: None until start_coordinator() is run, then a
Sandbox.Process corresponding to the coordinator process.
@ivar servers: List of Sandbox.Process corresponding to each of the
server processes started with start_server().
@ivar masters_started: Number of masters started via start_server(). Notice,
ensure_servers() uses this, so if you kill processes
in the cluster ensure this is consistent.
@ivar backups_started: Number of backups started via start_server(). Notice,
ensure_servers() uses this, so if you kill processes
in the cluster ensure this is consistent.
@ivar log_subdir: Specific directory where all the processes created by
this cluster will log.
@ivar sandbox: Nested context manager that cleans up processes when the
the context of this cluster is exited.
"""
def __init__(self, log_dir='logs', log_exists=False,
cluster_name_exists=False):
"""
@param log_dir: Top-level directory in which to write log files.
A separate subdirectory will be created in this
directory for the log files from this run. This can
only be overridden by passing it to __init__() since
that method creates the subdirectory.
(default: logs)
@param log_exists:
Indicates whether the log directory already exists.
This will be true for cluster objects that are
created after starting the clusterperf test.
(default: False)
@param cluster_name_exists:
Indicates whether a cluster name already exists as
part of this test. Backups that are started/restarted
using the same cluster name will read data from the
replicas
(default: False)
"""
self.log_level = 'NOTICE'
self.verbose = False
self.transport = 'basic+infud'
self.replicas = 3
self.disk = default_disk1
self.disjunct = False
if cluster_name_exists: # do nothing if it exists
self.cluster_name = None
if self.verbose:
print ('Cluster name exists')
else:
self.cluster_name = 'cluster_' + ''.join([chr(random.choice(
range(ord('a'), ord('z'))))
for c in range(20)])
if self.verbose:
print ('Cluster name is %s' % (self.cluster_name))
self.coordinator = None
self.next_server_id = 1
self.next_client_id = 1
self.masters_started = 0
self.backups_started = 0
self.coordinator_host= getHosts()[0]
self.coordinator_locator = coord_locator(self.transport,
self.coordinator_host)
self.log_subdir = log.createDir(log_dir, log_exists)
# Create a perfcounters directory under the log directory.
os.mkdir(self.log_subdir + '/perfcounters')
if not log_exists:
self.sandbox = Sandbox()
else:
self.sandbox = Sandbox(cleanup=False)
# create the shm directory to store shared files
try:
os.mkdir('%s/logs/shm' % os.getcwd())
except:
pass
f = open('%s/logs/shm/README' % os.getcwd(), 'w+')
f.write('This directory contains files that correspond to'
'different server processes that were started during'
'the last run of clusterperf. Filename is\n'
'"<hostname>_<pid>". Each of these files stores'
'the service locator of the respective server which is'
'used to give information to the client.\nThe existence'
'of this file at the end of a clusterperf run means'
'that processes were not cleaned up properly the last'
' time. So one can use these pids during manual clean up')
if not cluster_name_exists:
# store the name of the cluster by creating an empty file with
# the appropriate file name in shm so that new backups when
# created using a different cluster object can use it to read
# data from their disks
f = open('%s/logs/shm/%s' % (os.getcwd(), self.cluster_name),
'w+')
def start_coordinator(self, host, args=''):
"""Start a coordinator on a node.
@param host: (hostname, ip, id) tuple describing the node on which
to start the RAMCloud coordinator.
@param args: Additional command-line args to pass to the coordinator.
(default: '')
@return: Sandbox.Process representing the coordinator process.
"""
if self.coordinator:
raise Exception('Coordinator already started')
self.coordinator_host = host
self.coordinator_locator = coord_locator(self.transport,
self.coordinator_host)
if not self.enable_logcabin:
command = (
'%s %s -C %s -l %s --logFile %s/coordinator.%s.log %s' %
(valgrind_command,
coordinator_binary, self.coordinator_locator,
self.log_level, self.log_subdir,
self.coordinator_host[0], args))
self.coordinator = self.sandbox.rsh(self.coordinator_host[0],
command, bg=True, stderr=subprocess.STDOUT)
else:
# currently hardcoding logcabin server because ankita's logcabin
# scripts are not on git.
command = (
'%s %s -C %s -z logcabin21:61023 -l %s '
'--logFile %s/coordinator.%s.log %s' %
(valgrind_command,
coordinator_binary, self.coordinator_locator,
self.log_level, self.log_subdir,
self.coordinator_host[0], args))
self.coordinator = self.sandbox.rsh(self.coordinator_host[0],
command, bg=True, stderr=subprocess.STDOUT)
# just wait for coordinator to start
time.sleep(1)
# invoke the script that restarts the coordinator if it dies
restart_command = ('%s/restart_coordinator %s/coordinator.%s.log'
' %s %s logcabin21:61023' %
(local_scripts_path, self.log_subdir,
self.coordinator_host[0],
obj_path, self.coordinator_locator))
restarted_coord = self.sandbox.rsh(self.coordinator_host[0],
restart_command, kill_on_exit=True, bg=True,
stderr=subprocess.STDOUT)
self.ensure_servers(0, 0)
if self.verbose:
print('Coordinator started on %s at %s' %
(self.coordinator_host[0], self.coordinator_locator))
print('Coordinator command line arguments %s' %
(command))
return self.coordinator
def start_server(self,
host,
args='',
master=True,
backup=True,
disk=None,
port=server_port,
kill_on_exit=True
):
"""Start a server on a node.
@param host: (hostname, ip, id) tuple describing the node on which
to start the RAMCloud server.
@param args: Additional command-line args to pass to the server.
(default: '')
@param master: If True then the started server provides a master
service. (default: True)
@param backup: If True then the started server provides a backup
service. (default: True)
@param disk: If backup is True then the started server passes these
additional arguments to select the storage type and
location. (default: self.disk)
@param port: The port the server should listen on.
(default: see server_locator())
@param kill_on_exit:
If False, this server process is not reaped at the end
of the clusterperf test.
(default: True)
@return: Sandbox.Process representing the server process.
"""
log_prefix = '%s/server%d.%s' % (
self.log_subdir, self.next_server_id, host[0])
command = ('%s %s -C %s -L %s -r %d -l %s --clusterName __unnamed__ '
'--logFile %s.log --preferredIndex %d %s' %
(valgrind_command,
server_binary, self.coordinator_locator,
server_locator(self.transport, host, port),
self.replicas,
self.log_level,
log_prefix,
self.next_server_id,
args))
self.next_server_id += 1
if master and backup:
pass
elif master:
command += ' --masterOnly'
elif backup:
command += ' --backupOnly'
else:
raise Exception('Cannot start a server that is neither a master '
'nor backup')
if backup:
if not disk:
disk = self.disk
command += ' %s' % disk
self.backups_started += 1
if master:
self.masters_started += 1
# Adding redirection for stdout and stderr.
stdout = open(log_prefix + '.out', 'w')
stderr = open(log_prefix + '.err', 'w')
if not kill_on_exit:
server = self.sandbox.rsh(host[0], command, is_server=True,
locator=server_locator(self.transport,
host, port),
kill_on_exit=False, bg=True,
stdout=stdout,
stderr=stderr)
else:
server = self.sandbox.rsh(host[0], command, is_server=True,
locator=server_locator(self.transport,
host, port),
bg=True,
stdout=stdout,
stderr=stderr)
if self.verbose:
print('Server started on %s at %s: %s' %
(host[0],
server_locator(self.transport, host, port), command))
return server
def kill_server(self, locator):
"""Kill a running server.
@param locator: service locator for the server that needs to be
killed.
"""
path = '%s/logs/shm' % os.getcwd()
files = sorted([f for f in os.listdir(path)
if os.path.isfile( os.path.join(path, f) )])
for file in files:
f = open('%s/logs/shm/%s' % (os.getcwd(), file),'r')
service_locator = f.read()
if (locator in service_locator):
to_kill = '1'
mhost = file
subprocess.Popen(['ssh', mhost.split('_')[0],
'%s/killserver' % config.hooks.get_remote_scripts_path(),
to_kill, os.getcwd(), mhost])
f.close()
try:
os.remove('%s/logs/shm/%s' % (os.getcwd(), file))
except:
pass
else:
f.close()
def ensure_servers(self, numMasters=None, numBackups=None, timeout=30):
"""Poll the coordinator and block until the specified number of
masters and backups have enlisted. Useful for ensuring that the
cluster is in the expected state before experiments begin.
If the expected state isn't acheived within 5 seconds the call
will throw an exception.
@param numMasters: Number of masters that must be part of the
cluster before this call returns successfully.
If unspecified then wait until all the masters
started with start_servers() have enlisted.
@param numBackups: Number of backups that must be part of the
cluster before this call returns successfully.
If unspecified then wait until all the backups
started with start_servers() have enlisted.
"""
if not numMasters:
numMasters = self.masters_started
if not numBackups:
numBackups = self.backups_started
self.sandbox.checkFailures()
try:
ensureCommand = ('%s -C %s -m %d -b %d -l 1 --wait %d '
'--logFile %s/ensureServers.log' %
(ensure_servers_bin, self.coordinator_locator,
numMasters, numBackups, timeout,
self.log_subdir))
if self.verbose:
print("ensureServers command: %s" % ensureCommand)
self.sandbox.rsh(self.coordinator_host[0], ensureCommand)
except:
# prefer exceptions from dead processes to timeout error
self.sandbox.checkFailures()
raise
def start_clients(self, hosts, client):
"""Start a client binary on a set of nodes.
@param hosts: List of (hostname, ip, id) tuples describing the
nodes on which to start the client binary.
Each binary is launch with a --numClients and
--clientIndex argument.
@param client: Path to the client binary to run along with any
args to pass to each client.
@return: Sandbox.Process representing the client process.
"""
num_clients = len(hosts)
args = client.split(' ')
client_bin = args[0]
client_args = ' '.join(args[1:])
clients = []
for i, client_host in enumerate(hosts):
command = ('%s %s -C %s --numClients %d --clientIndex %d '
'--logFile %s/client%d.%s.log %s' %
(valgrind_command,
client_bin, self.coordinator_locator, num_clients,
i, self.log_subdir, self.next_client_id,
client_host[0], client_args))
self.next_client_id += 1
clients.append(self.sandbox.rsh(client_host[0], command, bg=True))
if self.verbose:
print('Client %d started on %s: %s' % (i, client_host[0],
command))
return clients
def wait(self, processes, timeout=30):
"""Wait for a set of processes to exit.
@param processes: List of Sandbox.Process instances as returned by
start_coordinator, start_server, and start_clients
whose exit should be waited on.
@param timeout: Seconds to wait for exit before giving up and throwing
an exception. (default: 30)
"""
start = time.time()
for i, p in enumerate(processes):
while p.proc.returncode is None:
self.sandbox.checkFailures()
time.sleep(.1)
if time.time() - start > timeout:
raise Exception('timeout exceeded %s' % self.log_subdir)
if self.verbose:
print('%s finished' % p.sonce)
def remove_empty_files(self):
"""Remove blank files and empty directories within the log directory.
"""
root = self.log_subdir
for item in os.listdir(root):
path = os.path.join(root, item)
if os.path.isfile(path):
if os.path.getsize(path) == 0:
os.remove(path)
elif os.path.isdir(path):
try:
os.rmdir(path)
except:
None
def shutdown():
"""Kill all remaining processes started as part of this cluster and
wait for their exit. Usually called implicitly if 'with' keyword is
used with the cluster."""
self.__exit__(None, None, None)
def __enter__(self):
self.sandbox.__enter__()
config.hooks.cluster_enter(self)
return self
def __exit__(self, exc_type=None, exc_value=None, exc_tb=None):
config.hooks.cluster_exit()
self.sandbox.__exit__(exc_type, exc_value, exc_tb)
self.remove_empty_files()
return False # rethrow exception, if any
def run(
num_servers=4, # Number of hosts on which to start
# servers (not including coordinator).
backup_disks_per_server=2, # Number of backup disks to use on each
# server host (0, 1, or 2).
replicas=3, # Replication factor to use for each
# log segment.
disk1=default_disk1, # Server arguments specifying the
# backing device when one backup disk
# is used on each server.
disk2=default_disk2, # Server arguments specifying the
# backing devices when two backup disks
# are used on each server
# (if backup_disks_per_server= 2).
timeout=20, # How many seconds to wait for the
# clients to complete.
coordinator_args='', # Additional arguments for the
# coordinator.
master_args='', # Additional arguments for each server
# that runs a master
backup_args='', # Additional arguments for each server
# that runs a backup.
log_level='NOTICE', # Log level to use for all servers.
log_dir='logs', # Top-level directory in which to write
# log files. A separate subdirectory
# will be created in this directory
# for the log files from this run.
client=None, # Command-line to invoke for each client
# additional arguments will be prepended
# with configuration information such as
# -C.
num_clients=0, # Number of client processes to run.
# They will all run on separate
# machines, if possible, but if there
# aren't enough available machines then
# multiple clients will run on some
# machines.
client_hosts=None, # An explicit list of hosts (in
# host, ip, id triples) on which clients
# should be run. If this is set and
# share_hosts is set then share_hosts is
# ignored.
share_hosts=False, # True means clients can be run on
# machines running servers, if needed.
transport='basic+infud', # Name of transport to use for servers.
verbose=False, # Print information about progress in
# starting clients and servers
debug=False, # If True, pause after starting all
# to allow for debugging setup such as
# attaching gdb.
old_master_host=None, # Pass a (hostname, ip, id) tuple to
# construct a large master on that host
# before the others are started. Useful
# for creating the old master for
# recoveries.
old_master_args='', # Additional arguments to run on the
# old master (e.g. total RAM).
enable_logcabin=False, # Do not enable logcabin.
valgrind=False, # Do not run under valgrind
valgrind_args='', # Additional arguments for valgrind
disjunct=False, # Disjunct entities on a server
coordinator_host=None
):
"""
Start a coordinator and servers, as indicated by the arguments. If a
client is specified, then start one or more client processes and wait for
them to complete. Otherwise leave the cluster running.
@return: string indicating the path to the log files for this run.
"""
# client_hosts = [('rc52', '192.168.1.152', 52)]
if client:
if num_clients == 0:
num_clients = 1
if verbose:
print('num_servers=(%d), available hosts=(%d) defined in config.py'
% (num_servers, len(getHosts())))
print ('disjunct=', disjunct)
# When disjunct=True, disjuncts Coordinator and Clients on Server nodes.
if disjunct:
if num_servers + num_clients + 1 > len(getHosts()):
raise Exception('num_servers (%d)+num_clients (%d)+1(coord) exceeds the available hosts (%d)'
% (num_servers, num_clients, len(getHosts())))
else:
if num_servers > len(getHosts()):
raise Exception('num_servers (%d) exceeds the available hosts (%d)'
% (num_servers, len(getHosts())))
if not share_hosts and not client_hosts:
if (len(getHosts()) - num_servers) < 1:
raise Exception('Asked for %d servers without sharing hosts with %d '
'clients, but only %d hosts were available'
% (num_servers, num_clients, len(getHosts())))
masters_started = 0
backups_started = 0
global valgrind_command
if valgrind:
valgrind_command = ('valgrind %s' % valgrind_args)
with Cluster(log_dir) as cluster:
cluster.log_level = log_level
cluster.verbose = verbose
cluster.transport = transport
cluster.replicas = replicas
cluster.timeout = timeout
cluster.disk = disk1
cluster.enable_logcabin = enable_logcabin
cluster.disjunct = disjunct
cluster.hosts = getHosts()
if not coordinator_host:
coordinator_host = cluster.hosts[len(cluster.hosts)-1]
coordinator = cluster.start_coordinator(coordinator_host,
coordinator_args)
if disjunct:
cluster.hosts.pop(0)
if old_master_host:
oldMaster = cluster.start_server(old_master_host,
old_master_args,
backup=False)
oldMaster.ignoreFailures = True
masters_started += 1
cluster.ensure_servers(timeout=60)
for host in cluster.hosts[:num_servers]:
backup = False
args = master_args
disk_args = None
if backup_disks_per_server > 0:
backup = True
args += ' %s' % backup_args
backups_started += 1
disk_args = disk1 if backup_disks_per_server == 1 else disk2
cluster.start_server(host, args, backup=backup, disk=disk_args)
masters_started += 1
if disjunct:
cluster.hosts = cluster.hosts[num_servers:]
if masters_started > 0 or backups_started > 0:
cluster.ensure_servers()
if verbose:
print('All servers running')
if not client:
print('Servers started.')
raw_input('Type <Enter> to shutdown servers: ')
elif debug:
print('Servers started; pausing for debug setup.')
raw_input('Type <Enter> to continue: ')
if client:
# Note: even if it's OK to share hosts between clients and servers,
# don't do it unless necessary.
if not client_hosts:
if disjunct:
host_list = cluster.hosts[:]
else:
host_list = cluster.hosts[num_servers:]
if share_hosts:
host_list.extend(cluster.hosts[:num_servers])
client_hosts = [host_list[i % len(host_list)]
for i in range(num_clients)]
assert(len(client_hosts) == num_clients)
clients = cluster.start_clients(client_hosts, client)
cluster.wait(clients, timeout)
return cluster.log_subdir
if __name__ == '__main__':
parser = OptionParser(description=
'Start RAMCloud servers and run a client application.',
conflict_handler='resolve')
parser.add_option('--backupArgs', metavar='ARGS', default='',
dest='backup_args',
help='Additional command-line arguments to pass to '
'each backup')
parser.add_option('-b', '--backupDisks', type=int, default=1,
metavar='N', dest='backup_disks_per_server',
help='Number of backup disks to run on each server host '
'(0, 1, or 2)')
parser.add_option('--client', metavar='ARGS',
help='Command line to invoke the client application '
'(additional arguments will be inserted at the beginning '
'of the argument list)')
parser.add_option('-n', '--clients', type=int, default=1,
metavar='N', dest='num_clients',
help='Number of instances of the client application '
'to run')
parser.add_option('--coordinatorArgs', metavar='ARGS', default='',
dest='coordinator_args',
help='Additional command-line arguments to pass to the '
'cluster coordinator')
parser.add_option('--debug', action='store_true', default=False,
help='Pause after starting servers but before running '
'clients to enable debugging setup')
parser.add_option('--disk1', default=default_disk1,
help='Server arguments to specify disk for first backup')
parser.add_option('--disk2', default=default_disk2,
help='Server arguments to specify disk for second backup')
parser.add_option('-l', '--logLevel', default='NOTICE',
choices=['DEBUG', 'NOTICE', 'WARNING', 'ERROR', 'SILENT'],
metavar='L', dest='log_level',
help='Controls degree of logging in servers')
parser.add_option('-d', '--logDir', default='logs',
metavar='DIR',
dest='log_dir',
help='Top level directory for log files; the files for '
'each invocation will go in a subdirectory.')
parser.add_option('--masterArgs', metavar='ARGS', default='',
dest='master_args',
help='Additional command-line arguments to pass to '
'each master')
parser.add_option('-r', '--replicas', type=int, default=3,
metavar='N',
help='Number of disk backup copies for each segment')
parser.add_option('-s', '--servers', type=int, default=4,
metavar='N', dest='num_servers',
help='Number of hosts on which to run servers')
parser.add_option('--shareHosts', action='store_true', default=False,
dest='share_hosts',
help='Allow clients to run on machines running servers '
'(by default clients run on different machines than '
'the servers, though multiple clients may run on a '
'single machine)')
parser.add_option('-t', '--timeout', type=int, default=20,
metavar='SECS',
help="Abort if the client application doesn't finish within "
'SECS seconds')
parser.add_option('-T', '--transport', default='basic+infud',
help='Transport to use for communication with servers')
parser.add_option('-v', '--verbose', action='store_true', default=False,
help='Print progress messages')
parser.add_option('--valgrind', action='store_true', default=False,
help='Run all the processes under valgrind')
parser.add_option('--valgrindArgs', metavar='ARGS', default='',
dest='valgrind_args',
help='Arguments to pass to valgrind')
parser.add_option('--disjunct', action='store_true', default=False,
help='Disjunct entities (disable collocation) on each server')
(options, args) = parser.parse_args()
status = 0
try:
run(**vars(options))
finally:
logInfo = log.scan("logs/latest", ["WARNING", "ERROR"])
if len(logInfo) > 0:
print(logInfo, file=sys.stderr)
status = 1
quit(status)
|
isc
|
andcor02/mbed-os
|
targets/TARGET_NORDIC/TARGET_NRF5/porting_tools/sdk_update.py
|
40
|
10384
|
#!python3
import os, shutil, json, pprint, sys, string, json, argparse
from collections import OrderedDict
from shutil import copyfile, copytree
def rename_sdk_old_dirs(path, dry_run = False):
# I make assumption that all old sdk dirs have "sdk" names.
sdk_dir_name = "sdk"
path = "."
for root, dirs, files in os.walk(path):
for name in dirs:
if name == "sdk":
full_path = root + "\\" + name
new_full_path = root + "\\_old_" + name
print("rename " + full_path + " ---> " + new_full_path)
if not dry_run:
os.rename(full_path, new_full_path)
os.mkdir(full_path)
def rename_dirs(sdk_dirs_in_mbed, new_name, dry_run=False):
for dir_path in sdk_dirs_in_mbed:
xdir_path = os.path.join('.',dir_path)
new_dir_path = os.path.join(os.path.dirname(xdir_path), new_name)
print("rename " + xdir_path + " ---> " + new_dir_path)
if not dry_run:
os.rename(xdir_path, new_dir_path)
def get_file_pathes_couples(path_sdk_componets, skip_dirs = [], skip_files = [], verbose = False):
mbed_list = []
cutted_roots = []
cutted_files = []
path_sdk_componets = path_sdk_componets + '\\'
for root, dirs, files in os.walk(path_sdk_componets):
procced = True
cutted_root = root[len(path_sdk_componets):]
for skip_d in skip_dirs:
if 0 == string.find(cutted_root, skip_d):
cutted_roots.append(cutted_root)
procced = False
if procced:
for file_name in files:
procced = True
for skip_f in skip_files:
if (-1) != string.find(file_name, skip_f):
cutted_files.append(file_name)
procced = False
if procced:
if file_name.endswith((".c", ".h")):
#cutted_path = cutted_root + "\\" + file_name
cutted_path = os.path.join(cutted_root, file_name)
#full_path = root + "\\" + file_name
full_path = os.path.join(root, file_name)
item = {"full_path": full_path, "id": cutted_path, "cutted_root": cutted_root}
#mbed_list.append([full_path, cutted_path])
mbed_list.append(item)
if verbose:
print("\r\nskipped directories: {0:#d}".format(len(cutted_roots)))
for xitem in cutted_roots:
print(xitem)
print("\r\nskipped files: {0:#d}".format(len(cutted_files)))
for kitem in cutted_files:
print(kitem)
return mbed_list
def apply_replacement_id(mbed_list, replacemet_couples):
for item in mbed_list:
splited = os.path.split(item["id"])
result = string.find(splited[1], replacemet_couples["old"])
if result != -1:
new_tail = replacemet_couples["new"] + splited[1][len(replacemet_couples["old"]):]
item["id"] = os.path.join(splited[0],new_tail)
#print('bingo!')
#print(item)
return mbed_list
def get_copying_automatic_list(list_mbed, list_sdk, mbed_port_path = '', verbose = False):
copy_list = [] #list of copy items
orphan_list = []
licz = 0
for pathes_mbed in list_mbed:
empty = True
for pathes_sdk in list_sdk:
if pathes_mbed["id"] == pathes_sdk["id"]:
dest_path = pathes_mbed["full_path"]
dest_path = dest_path[ (len(mbed_port_path)):]
item = {"id" : pathes_mbed["id"], "src_path": pathes_sdk["full_path"], "dest_path": dest_path, "old_path": pathes_mbed["full_path"]}
copy_list.append(item)
empty = False;
if empty:
orphan_list.append(pathes_mbed["full_path"])
print("\r\nfitted files: {0:#d}".format(len(copy_list)))
if verbose:
for item in copy_list:
str_verbose = "{0} --> {1}"
print(str_verbose.format(item["id"], item["dest_path"]))
print("\r\norphaned files: {0:#d}".format(len(orphan_list)))
if verbose:
for xitem in orphan_list:
print(xitem)
return copy_list
def is_in_copying_list(copy_list, file_id):
for pathes_copy in copy_list:
if pathes_copy["id"] == file_id:
return False
return True
def upgrade_copying_list(copy_list, pathes_sdk, dest_mbed_dir_path, print_list):
splited = os.path.split(pathes_sdk["id"])
dest_path = os.path.join(dest_mbed_dir_path, splited[1])
item = {"id" : pathes_sdk["id"], "src_path": pathes_sdk["full_path"], "dest_path": dest_path} #, "old_path": pathes_mbed["full_path"]}
copy_list.append(item)
print_list.append(item)
def upgrade_copying_list_by_dirs(copy_list, list_sdk, force_copy_dirs_list, port_relative_dir = '',verbose = False):
print_list = []
for pathes_sdk in list_sdk:
if is_in_copying_list(copy_list, pathes_sdk["id"]):
make_hard_copy = False
for hard_copy_dir in force_copy_dirs_list:
if 0 == string.find(pathes_sdk["cutted_root"], hard_copy_dir["sdk_dir"]):
make_hard_copy = True
post_path = os.path.relpath(pathes_sdk["cutted_root"], hard_copy_dir["sdk_dir"])
if post_path == '.':
corect_hard_copy_dir = hard_copy_dir["mbed_dir"]
if post_path != '.': # destynation is a nested directory
corect_hard_copy_dir = os.path.join(hard_copy_dir["mbed_dir"], post_path)
corect_hard_copy_dir = os.path.join(port_relative_dir, corect_hard_copy_dir)
upgrade_copying_list(copy_list, pathes_sdk, corect_hard_copy_dir, print_list)
break
print("\r\nforced copy of files by directories: {0:#d}".format(len(print_list)))
if verbose:
for item in print_list:
str_verbose = "{0} --> {1}"
print(str_verbose.format(item["id"], item["dest_path"]))
def upgrade_copying_list_by_files(copy_list, list_sdk, force_copy_files_list, port_relative_dir ='',verbose = False):
print_list = []
for pathes_sdk in list_sdk:
if is_in_copying_list(copy_list, pathes_sdk["id"]):
make_hard_copy = False
for hard_copy_file in force_copy_files_list:
if pathes_sdk["id"] == hard_copy_file["sdk_file"]:
make_hard_copy = True
corect_hard_copy_dir = os.path.join(port_relative_dir, hard_copy_file["mbed_dir"])
upgrade_copying_list(copy_list, pathes_sdk, corect_hard_copy_dir, print_list)
break
print("\r\nforced copy of files by files: {0:#d}".format(len(print_list)))
if verbose:
for item in print_list:
str_verbose = "{0} --> {1}"
print(str_verbose.format(item["id"], item["dest_path"]))
def copy_one_file(src, dest, verbose=False,dry_run=False):
dirs_to_created = os.path.dirname(dest)
if not os.path.exists(dirs_to_created):
if not dry_run:
os.makedirs(dirs_to_created)
if verbose:
print('makerdirs: {0}'.format(dirs_to_created))
if not dry_run:
shutil.copyfile(src, dest)
if verbose:
print('copy: {0} --> {1}'.format(src, dest))
if __name__ == '__main__':
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument('-r', '--run', help='run', action='store_true')
argument_parser.add_argument('-v', '--verbose', help='Verbose mode', action='store_true')
#argument_parser.add_argument('-r', '--rename_only', help='rename only', action='store_true')
parser_args = vars(argument_parser.parse_args())
verbose = False
if parser_args['verbose'] or not parser_args['run']:
verbose = True
with open('update_desc.json') as data_file:
update_desc = json.load(data_file)
#if not parser_args ['rename_only']:
ignore_file_list = update_desc['ignore_file_list']
ignore_dirs_list = update_desc['ignore_dirs_list']
id_replacements = update_desc['id_replacements']
force_copy_files_list = update_desc['force_copy_files_list']
force_copy_dirs_list = update_desc['force_copy_dirs_list']
sdk_dirs_in_mbed = update_desc['sdk_dirs_in_mbed']
sdk_component_path = update_desc['sdk_component_path']
port_relative_dir = update_desc['port_relative_dir_in_mbed']
list_sdk = get_file_pathes_couples(sdk_component_path,
ignore_dirs_list,
ignore_file_list,
verbose)
list_mbed = []
for directory in sdk_dirs_in_mbed:
list_mbed.extend(get_file_pathes_couples(directory))
list_mbed = apply_replacement_id(list_mbed, id_replacements)
mbed_port_path = ''
copy_list = get_copying_automatic_list(list_mbed, list_sdk, mbed_port_path, verbose)
upgrade_copying_list_by_dirs(copy_list, list_sdk, force_copy_dirs_list, port_relative_dir, verbose)
upgrade_copying_list_by_files(copy_list, list_sdk, force_copy_files_list, port_relative_dir, verbose)
rename_dirs(sdk_dirs_in_mbed, '_old_sdk', not parser_args['run'])
for copy_item in copy_list:
src = os.path.join('.',copy_item["src_path"])
dest = os.path.join('.',copy_item["dest_path"])
copy_one_file(src, dest, verbose, not parser_args['run'])
with open('sdk_update_result.json', 'w') as fp:
json.dump(copy_list, fp)
|
apache-2.0
|
alzeih/ava
|
ava_core/abstract/decorators.py
|
5
|
2393
|
from functools import wraps
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.utils.decorators import available_attrs
from django.utils.six.moves.urllib.parse import urlparse
from django.shortcuts import resolve_url
def access_check(test_func, view_func=None, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that should pass a test before allowing access.
:param test_func: A callable that returns True if access is allowed, or
False if the user should be sent to the login page.
:param test_func: A callable that returns a HTTP response. This should only
be set if used in a 'urls.py' file. If the decorator is used
on a view function, this parameter should be set to None.
:param login_url: The URL of the login page. If set to None, the default
login page is used.
:param redirect_field_name: The name of the URL field that should contain
the page to display after login.
"""
def decorator(decorated_view_func):
@wraps(decorated_view_func, assigned=available_attrs(decorated_view_func))
def _wrapped_view(request, *args, **kwargs):
# If the access test is passed, display the page.
if test_func(request):
return decorated_view_func(request, *args, **kwargs)
# Get the path to the current page and the login page.
path = request.build_absolute_uri()
resolved_login_url = resolve_url(login_url or settings.LOGIN_URL)
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
# Redirect to the login page.
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view
if view_func:
return decorator(view_func)
else:
return decorator
|
gpl-3.0
|
crepererum/invenio
|
invenio/utils/pagination.py
|
18
|
1931
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Invenio pagination helper class.
"""
from math import ceil
class Pagination(object):
"""Helps with rendering pagination list."""
def __init__(self, page, per_page, total_count):
self.page = page
self.per_page = per_page
self.total_count = total_count
@property
def pages(self):
"""Returns number of pages."""
return int(ceil(self.total_count / float(self.per_page)))
@property
def has_prev(self):
"""Returns true if it has previous page."""
return self.page > 1
@property
def has_next(self):
"""Returns true if it has next page."""
return self.page < self.pages
def iter_pages(self, left_edge=1, left_current=1,
right_current=3, right_edge=1):
last = 0
for num in xrange(1, self.pages + 1):
if num <= left_edge or \
(num > self.page - left_current - 1 and
num < self.page + right_current) or \
num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
|
gpl-2.0
|
job/exscript
|
src/Exscript/protocols/drivers/ace.py
|
4
|
1940
|
# Copyright (C) 2007-2010 Samuel Abels.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
A driver for Cisco Application Control Engine (ACE)
"""
import re
from Exscript.protocols.drivers.driver import Driver
_user_re = [re.compile(r'user ?name: ?$', re.I)]
_password_re = [re.compile(r'(?:[\r\n]Password: ?|last resort password:)$')]
_tacacs_re = re.compile(r'[\r\n]s\/key[\S ]+\r?%s' % _password_re[0].pattern)
_prompt_re = [re.compile(r'[\r\n][\-\w+\.:/]+(?:\([^\)]+\))?[>#] ?$')]
_error_re = [re.compile(r'%Error'),
re.compile(r'invalid input', re.I),
re.compile(r'(?:incomplete|ambiguous) command', re.I),
re.compile(r'connection timed out', re.I),
re.compile(r'[^\r\n]+ not found', re.I)]
class ACEDriver(Driver):
def __init__(self):
Driver.__init__(self, 'ace')
self.user_re = _user_re
self.password_re = _password_re
self.prompt_re = _prompt_re
self.error_re = _error_re
def check_head_for_os(self, string):
if 'Cisco Application Control Software' in string:
return 90
def init_terminal(self, conn):
conn.execute('term len 0')
def auto_authorize(self, conn, account, flush, bailout):
conn.send('enable\r')
conn.app_authorize(account, flush, bailout)
|
gpl-2.0
|
keithroe/vtkoptix
|
ThirdParty/Twisted/twisted/internet/test/_win32ifaces.py
|
41
|
3844
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Windows implementation of local network interface enumeration.
"""
from socket import socket, AF_INET6, SOCK_STREAM
from ctypes import (
WinDLL, byref, create_string_buffer, c_int, c_void_p,
POINTER, Structure, cast, string_at)
WS2_32 = WinDLL('ws2_32')
SOCKET = c_int
DWORD = c_int
LPVOID = c_void_p
LPSOCKADDR = c_void_p
LPWSAPROTOCOL_INFO = c_void_p
LPTSTR = c_void_p
LPDWORD = c_void_p
LPWSAOVERLAPPED = c_void_p
LPWSAOVERLAPPED_COMPLETION_ROUTINE = c_void_p
# http://msdn.microsoft.com/en-us/library/ms741621(v=VS.85).aspx
# int WSAIoctl(
# __in SOCKET s,
# __in DWORD dwIoControlCode,
# __in LPVOID lpvInBuffer,
# __in DWORD cbInBuffer,
# __out LPVOID lpvOutBuffer,
# __in DWORD cbOutBuffer,
# __out LPDWORD lpcbBytesReturned,
# __in LPWSAOVERLAPPED lpOverlapped,
# __in LPWSAOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine
# );
WSAIoctl = WS2_32.WSAIoctl
WSAIoctl.argtypes = [
SOCKET, DWORD, LPVOID, DWORD, LPVOID, DWORD, LPDWORD,
LPWSAOVERLAPPED, LPWSAOVERLAPPED_COMPLETION_ROUTINE]
WSAIoctl.restype = c_int
# http://msdn.microsoft.com/en-us/library/ms741516(VS.85).aspx
# INT WSAAPI WSAAddressToString(
# __in LPSOCKADDR lpsaAddress,
# __in DWORD dwAddressLength,
# __in_opt LPWSAPROTOCOL_INFO lpProtocolInfo,
# __inout LPTSTR lpszAddressString,
# __inout LPDWORD lpdwAddressStringLength
# );
WSAAddressToString = WS2_32.WSAAddressToStringA
WSAAddressToString.argtypes = [
LPSOCKADDR, DWORD, LPWSAPROTOCOL_INFO, LPTSTR, LPDWORD]
WSAAddressToString.restype = c_int
SIO_ADDRESS_LIST_QUERY = 0x48000016
WSAEFAULT = 10014
class SOCKET_ADDRESS(Structure):
_fields_ = [('lpSockaddr', c_void_p),
('iSockaddrLength', c_int)]
def make_SAL(ln):
class SOCKET_ADDRESS_LIST(Structure):
_fields_ = [('iAddressCount', c_int),
('Address', SOCKET_ADDRESS * ln)]
return SOCKET_ADDRESS_LIST
def win32GetLinkLocalIPv6Addresses():
"""
Return a list of strings in colon-hex format representing all the link local
IPv6 addresses available on the system, as reported by
I{WSAIoctl}/C{SIO_ADDRESS_LIST_QUERY}.
"""
s = socket(AF_INET6, SOCK_STREAM)
size = 4096
retBytes = c_int()
for i in range(2):
buf = create_string_buffer(size)
ret = WSAIoctl(
s.fileno(),
SIO_ADDRESS_LIST_QUERY, 0, 0, buf, size, byref(retBytes), 0, 0)
# WSAIoctl might fail with WSAEFAULT, which means there was not enough
# space in the buffer we gave it. There's no way to check the errno
# until Python 2.6, so we don't even try. :/ Maybe if retBytes is still
# 0 another error happened, though.
if ret and retBytes.value:
size = retBytes.value
else:
break
# If it failed, then we'll just have to give up. Still no way to see why.
if ret:
raise RuntimeError("WSAIoctl failure")
addrList = cast(buf, POINTER(make_SAL(0)))
addrCount = addrList[0].iAddressCount
addrList = cast(buf, POINTER(make_SAL(addrCount)))
addressStringBufLength = 1024
addressStringBuf = create_string_buffer(addressStringBufLength)
retList = []
for i in range(addrList[0].iAddressCount):
retBytes.value = addressStringBufLength
addr = addrList[0].Address[i]
ret = WSAAddressToString(
addr.lpSockaddr, addr.iSockaddrLength, 0, addressStringBuf,
byref(retBytes))
if ret:
raise RuntimeError("WSAAddressToString failure")
retList.append(string_at(addressStringBuf))
return [addr for addr in retList if '%' in addr]
|
bsd-3-clause
|
Shanec132006/Lab4
|
lib/werkzeug/testsuite/wsgi.py
|
99
|
14961
|
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.wsgi
~~~~~~~~~~~~~~~~~~~~~~~
Tests the WSGI utilities.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from os import path
from contextlib import closing
from werkzeug.testsuite import WerkzeugTestCase, get_temporary_directory
from werkzeug.wrappers import BaseResponse
from werkzeug.exceptions import BadRequest, ClientDisconnected
from werkzeug.test import Client, create_environ, run_wsgi_app
from werkzeug import wsgi
from werkzeug._compat import StringIO, BytesIO, NativeStringIO, to_native
class WSGIUtilsTestCase(WerkzeugTestCase):
def test_shareddatamiddleware_get_file_loader(self):
app = wsgi.SharedDataMiddleware(None, {})
assert callable(app.get_file_loader('foo'))
def test_shared_data_middleware(self):
def null_application(environ, start_response):
start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])
yield b'NOT FOUND'
test_dir = get_temporary_directory()
with open(path.join(test_dir, to_native(u'äöü', 'utf-8')), 'w') as test_file:
test_file.write(u'FOUND')
app = wsgi.SharedDataMiddleware(null_application, {
'/': path.join(path.dirname(__file__), 'res'),
'/sources': path.join(path.dirname(__file__), 'res'),
'/pkg': ('werkzeug.debug', 'shared'),
'/foo': test_dir
})
for p in '/test.txt', '/sources/test.txt', '/foo/äöü':
app_iter, status, headers = run_wsgi_app(app, create_environ(p))
self.assert_equal(status, '200 OK')
with closing(app_iter) as app_iter:
data = b''.join(app_iter).strip()
self.assert_equal(data, b'FOUND')
app_iter, status, headers = run_wsgi_app(
app, create_environ('/pkg/debugger.js'))
with closing(app_iter) as app_iter:
contents = b''.join(app_iter)
self.assert_in(b'$(function() {', contents)
app_iter, status, headers = run_wsgi_app(
app, create_environ('/missing'))
self.assert_equal(status, '404 NOT FOUND')
self.assert_equal(b''.join(app_iter).strip(), b'NOT FOUND')
def test_get_host(self):
env = {'HTTP_X_FORWARDED_HOST': 'example.org',
'SERVER_NAME': 'bullshit', 'HOST_NAME': 'ignore me dammit'}
self.assert_equal(wsgi.get_host(env), 'example.org')
self.assert_equal(
wsgi.get_host(create_environ('/', 'http://example.org')),
'example.org')
def test_get_host_multiple_forwarded(self):
env = {'HTTP_X_FORWARDED_HOST': 'example.com, example.org',
'SERVER_NAME': 'bullshit', 'HOST_NAME': 'ignore me dammit'}
self.assert_equal(wsgi.get_host(env), 'example.com')
self.assert_equal(
wsgi.get_host(create_environ('/', 'http://example.com')),
'example.com')
def test_get_host_validation(self):
env = {'HTTP_X_FORWARDED_HOST': 'example.org',
'SERVER_NAME': 'bullshit', 'HOST_NAME': 'ignore me dammit'}
self.assert_equal(wsgi.get_host(env, trusted_hosts=['.example.org']),
'example.org')
self.assert_raises(BadRequest, wsgi.get_host, env,
trusted_hosts=['example.com'])
def test_responder(self):
def foo(environ, start_response):
return BaseResponse(b'Test')
client = Client(wsgi.responder(foo), BaseResponse)
response = client.get('/')
self.assert_equal(response.status_code, 200)
self.assert_equal(response.data, b'Test')
def test_pop_path_info(self):
original_env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b///c'}
# regular path info popping
def assert_tuple(script_name, path_info):
self.assert_equal(env.get('SCRIPT_NAME'), script_name)
self.assert_equal(env.get('PATH_INFO'), path_info)
env = original_env.copy()
pop = lambda: wsgi.pop_path_info(env)
assert_tuple('/foo', '/a/b///c')
self.assert_equal(pop(), 'a')
assert_tuple('/foo/a', '/b///c')
self.assert_equal(pop(), 'b')
assert_tuple('/foo/a/b', '///c')
self.assert_equal(pop(), 'c')
assert_tuple('/foo/a/b///c', '')
self.assert_is_none(pop())
def test_peek_path_info(self):
env = {
'SCRIPT_NAME': '/foo',
'PATH_INFO': '/aaa/b///c'
}
self.assert_equal(wsgi.peek_path_info(env), 'aaa')
self.assert_equal(wsgi.peek_path_info(env), 'aaa')
self.assert_equal(wsgi.peek_path_info(env, charset=None), b'aaa')
self.assert_equal(wsgi.peek_path_info(env, charset=None), b'aaa')
def test_path_info_and_script_name_fetching(self):
env = create_environ(u'/\N{SNOWMAN}', u'http://example.com/\N{COMET}/')
self.assert_equal(wsgi.get_path_info(env), u'/\N{SNOWMAN}')
self.assert_equal(wsgi.get_path_info(env, charset=None), u'/\N{SNOWMAN}'.encode('utf-8'))
self.assert_equal(wsgi.get_script_name(env), u'/\N{COMET}')
self.assert_equal(wsgi.get_script_name(env, charset=None), u'/\N{COMET}'.encode('utf-8'))
def test_query_string_fetching(self):
env = create_environ(u'/?\N{SNOWMAN}=\N{COMET}')
qs = wsgi.get_query_string(env)
self.assert_strict_equal(qs, '%E2%98%83=%E2%98%84')
def test_limited_stream(self):
class RaisingLimitedStream(wsgi.LimitedStream):
def on_exhausted(self):
raise BadRequest('input stream exhausted')
io = BytesIO(b'123456')
stream = RaisingLimitedStream(io, 3)
self.assert_strict_equal(stream.read(), b'123')
self.assert_raises(BadRequest, stream.read)
io = BytesIO(b'123456')
stream = RaisingLimitedStream(io, 3)
self.assert_strict_equal(stream.tell(), 0)
self.assert_strict_equal(stream.read(1), b'1')
self.assert_strict_equal(stream.tell(), 1)
self.assert_strict_equal(stream.read(1), b'2')
self.assert_strict_equal(stream.tell(), 2)
self.assert_strict_equal(stream.read(1), b'3')
self.assert_strict_equal(stream.tell(), 3)
self.assert_raises(BadRequest, stream.read)
io = BytesIO(b'123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_strict_equal(stream.readline(), b'123456\n')
self.assert_strict_equal(stream.readline(), b'ab')
io = BytesIO(b'123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_strict_equal(stream.readlines(), [b'123456\n', b'ab'])
io = BytesIO(b'123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_strict_equal(stream.readlines(2), [b'12'])
self.assert_strict_equal(stream.readlines(2), [b'34'])
self.assert_strict_equal(stream.readlines(), [b'56\n', b'ab'])
io = BytesIO(b'123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_strict_equal(stream.readline(100), b'123456\n')
io = BytesIO(b'123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_strict_equal(stream.readlines(100), [b'123456\n', b'ab'])
io = BytesIO(b'123456')
stream = wsgi.LimitedStream(io, 3)
self.assert_strict_equal(stream.read(1), b'1')
self.assert_strict_equal(stream.read(1), b'2')
self.assert_strict_equal(stream.read(), b'3')
self.assert_strict_equal(stream.read(), b'')
io = BytesIO(b'123456')
stream = wsgi.LimitedStream(io, 3)
self.assert_strict_equal(stream.read(-1), b'123')
io = BytesIO(b'123456')
stream = wsgi.LimitedStream(io, 0)
self.assert_strict_equal(stream.read(-1), b'')
io = StringIO(u'123456')
stream = wsgi.LimitedStream(io, 0)
self.assert_strict_equal(stream.read(-1), u'')
io = StringIO(u'123\n456\n')
stream = wsgi.LimitedStream(io, 8)
self.assert_strict_equal(list(stream), [u'123\n', u'456\n'])
def test_limited_stream_disconnection(self):
io = BytesIO(b'A bit of content')
# disconnect detection on out of bytes
stream = wsgi.LimitedStream(io, 255)
with self.assert_raises(ClientDisconnected):
stream.read()
# disconnect detection because file close
io = BytesIO(b'x' * 255)
io.close()
stream = wsgi.LimitedStream(io, 255)
with self.assert_raises(ClientDisconnected):
stream.read()
def test_path_info_extraction(self):
x = wsgi.extract_path_info('http://example.com/app', '/app/hello')
self.assert_equal(x, u'/hello')
x = wsgi.extract_path_info('http://example.com/app',
'https://example.com/app/hello')
self.assert_equal(x, u'/hello')
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app/hello')
self.assert_equal(x, u'/hello')
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app')
self.assert_equal(x, u'/')
x = wsgi.extract_path_info(u'http://☃.net/', u'/fööbär')
self.assert_equal(x, u'/fööbär')
x = wsgi.extract_path_info(u'http://☃.net/x', u'http://☃.net/x/fööbär')
self.assert_equal(x, u'/fööbär')
env = create_environ(u'/fööbär', u'http://☃.net/x/')
x = wsgi.extract_path_info(env, u'http://☃.net/x/fööbär')
self.assert_equal(x, u'/fööbär')
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/a/hello')
self.assert_is_none(x)
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app/hello',
collapse_http_schemes=False)
self.assert_is_none(x)
def test_get_host_fallback(self):
self.assert_equal(wsgi.get_host({
'SERVER_NAME': 'foobar.example.com',
'wsgi.url_scheme': 'http',
'SERVER_PORT': '80'
}), 'foobar.example.com')
self.assert_equal(wsgi.get_host({
'SERVER_NAME': 'foobar.example.com',
'wsgi.url_scheme': 'http',
'SERVER_PORT': '81'
}), 'foobar.example.com:81')
def test_get_current_url_unicode(self):
env = create_environ()
env['QUERY_STRING'] = 'foo=bar&baz=blah&meh=\xcf'
rv = wsgi.get_current_url(env)
self.assert_strict_equal(rv,
u'http://localhost/?foo=bar&baz=blah&meh=\ufffd')
def test_multi_part_line_breaks(self):
data = 'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK'
test_stream = NativeStringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
buffer_size=16))
self.assert_equal(lines, ['abcdef\r\n', 'ghijkl\r\n',
'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK'])
data = 'abc\r\nThis line is broken by the buffer length.' \
'\r\nFoo bar baz'
test_stream = NativeStringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
buffer_size=24))
self.assert_equal(lines, ['abc\r\n', 'This line is broken by the '
'buffer length.\r\n', 'Foo bar baz'])
def test_multi_part_line_breaks_bytes(self):
data = b'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK'
test_stream = BytesIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
buffer_size=16))
self.assert_equal(lines, [b'abcdef\r\n', b'ghijkl\r\n',
b'mnopqrstuvwxyz\r\n', b'ABCDEFGHIJK'])
data = b'abc\r\nThis line is broken by the buffer length.' \
b'\r\nFoo bar baz'
test_stream = BytesIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
buffer_size=24))
self.assert_equal(lines, [b'abc\r\n', b'This line is broken by the '
b'buffer length.\r\n', b'Foo bar baz'])
def test_multi_part_line_breaks_problematic(self):
data = 'abc\rdef\r\nghi'
for x in range(1, 10):
test_stream = NativeStringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
buffer_size=4))
self.assert_equal(lines, ['abc\r', 'def\r\n', 'ghi'])
def test_iter_functions_support_iterators(self):
data = ['abcdef\r\nghi', 'jkl\r\nmnopqrstuvwxyz\r', '\nABCDEFGHIJK']
lines = list(wsgi.make_line_iter(data))
self.assert_equal(lines, ['abcdef\r\n', 'ghijkl\r\n',
'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK'])
def test_make_chunk_iter(self):
data = [u'abcdefXghi', u'jklXmnopqrstuvwxyzX', u'ABCDEFGHIJK']
rv = list(wsgi.make_chunk_iter(data, 'X'))
self.assert_equal(rv, [u'abcdef', u'ghijkl', u'mnopqrstuvwxyz',
u'ABCDEFGHIJK'])
data = u'abcdefXghijklXmnopqrstuvwxyzXABCDEFGHIJK'
test_stream = StringIO(data)
rv = list(wsgi.make_chunk_iter(test_stream, 'X', limit=len(data),
buffer_size=4))
self.assert_equal(rv, [u'abcdef', u'ghijkl', u'mnopqrstuvwxyz',
u'ABCDEFGHIJK'])
def test_make_chunk_iter_bytes(self):
data = [b'abcdefXghi', b'jklXmnopqrstuvwxyzX', b'ABCDEFGHIJK']
rv = list(wsgi.make_chunk_iter(data, 'X'))
self.assert_equal(rv, [b'abcdef', b'ghijkl', b'mnopqrstuvwxyz',
b'ABCDEFGHIJK'])
data = b'abcdefXghijklXmnopqrstuvwxyzXABCDEFGHIJK'
test_stream = BytesIO(data)
rv = list(wsgi.make_chunk_iter(test_stream, 'X', limit=len(data),
buffer_size=4))
self.assert_equal(rv, [b'abcdef', b'ghijkl', b'mnopqrstuvwxyz',
b'ABCDEFGHIJK'])
def test_lines_longer_buffer_size(self):
data = '1234567890\n1234567890\n'
for bufsize in range(1, 15):
lines = list(wsgi.make_line_iter(NativeStringIO(data), limit=len(data),
buffer_size=4))
self.assert_equal(lines, ['1234567890\n', '1234567890\n'])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(WSGIUtilsTestCase))
return suite
|
apache-2.0
|
aldian/tensorflow
|
tensorflow/contrib/gan/python/features/python/random_tensor_pool_test.py
|
20
|
4462
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.gan.python.features.random_tensor_pool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.gan.python.features.python.random_tensor_pool_impl import tensor_pool
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class TensorPoolTest(test.TestCase):
def test_pool_unknown_input_shape(self):
"""Checks that `input_value` can have unknown shape."""
input_value = array_ops.placeholder(
dtype=dtypes.int32, shape=[None, None, 3])
output_value = tensor_pool(input_value, pool_size=10)
self.assertEqual(output_value.shape.as_list(), [None, None, 3])
with self.test_session(use_gpu=True) as session:
for i in range(10):
session.run(output_value, {input_value: [[[i] * 3]]})
session.run(output_value, {input_value: [[[i] * 3] * 2]})
session.run(output_value, {input_value: [[[i] * 3] * 5] * 2})
def test_pool_sequence(self):
"""Checks that values are pooled and returned maximally twice."""
input_value = array_ops.placeholder(dtype=dtypes.int32, shape=[])
output_value = tensor_pool(input_value, pool_size=10)
self.assertEqual(output_value.shape.as_list(), [])
with self.test_session(use_gpu=True) as session:
outs = []
for i in range(50):
out = session.run(output_value, {input_value: i})
outs.append(out)
self.assertLessEqual(out, i)
_, counts = np.unique(outs, return_counts=True)
# Check that each value is returned maximally twice.
self.assertTrue((counts <= 2).all())
def test_never_pool(self):
"""Checks that setting `pooling_probability` to zero works."""
input_value = array_ops.placeholder(dtype=dtypes.int32, shape=[])
output_value = tensor_pool(
input_value, pool_size=10, pooling_probability=0.0)
self.assertEqual(output_value.shape.as_list(), [])
with self.test_session(use_gpu=True) as session:
for i in range(50):
out = session.run(output_value, {input_value: i})
self.assertEqual(out, i)
def test_pooling_probability(self):
"""Checks that `pooling_probability` works."""
input_value = array_ops.placeholder(dtype=dtypes.int32, shape=[])
pool_size = 10
pooling_probability = 0.2
output_value = tensor_pool(
input_value,
pool_size=pool_size,
pooling_probability=pooling_probability)
self.assertEqual(output_value.shape.as_list(), [])
with self.test_session(use_gpu=True) as session:
not_pooled = 0
total = 1000
for i in range(total):
out = session.run(output_value, {input_value: i})
if out == i:
not_pooled += 1
self.assertAllClose(
(not_pooled - pool_size) / (total - pool_size),
1 - pooling_probability,
atol=0.03)
def test_input_values_tuple(self):
"""Checks that `input_values` can be a tuple."""
input_values = (array_ops.placeholder(dtype=dtypes.int32, shape=[]),
array_ops.placeholder(dtype=dtypes.int32, shape=[]))
output_values = tensor_pool(input_values, pool_size=3)
self.assertEqual(len(output_values), len(input_values))
for output_value in output_values:
self.assertEqual(output_value.shape.as_list(), [])
with self.test_session(use_gpu=True) as session:
for i in range(10):
outs = session.run(output_values, {
input_values[0]: i,
input_values[1]: i + 1
})
self.assertEqual(len(outs), len(input_values))
self.assertEqual(outs[1] - outs[0], 1)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
nnakamot/ncclient
|
ncclient/operations/third_party/h3c/rpc.py
|
10
|
2286
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# by yangxufeng.zhao
from ncclient.xml_ import *
from ncclient.operations import util
from ncclient.operations.rpc import RPC
class GetBulk(RPC):
"The *get-bulk* RPC."
def request(self, filter=None):
"""Retrieve running configuration and device state information.
*filter* specifies the portion of the configuration to retrieve (by default entire configuration is retrieved)
:seealso: :ref:`filter_params`
"""
node = new_ele("get-bulk")
if filter is not None:
node.append(util.build_filter(filter))
return self._request(node)
class GetBulkConfig(RPC):
"""The *get-bulk-config* RPC."""
def request(self, source, filter=None):
"""Retrieve all or part of a specified configuration.
*source* name of the configuration datastore being queried
*filter* specifies the portion of the configuration to retrieve (by default entire configuration is retrieved)
:seealso: :ref:`filter_params`"""
node = new_ele("get-bulk-config")
node.append(util.datastore_or_url("source", source, self._assert))
if filter is not None:
node.append(util.build_filter(filter))
return self._request(node)
class CLI(RPC):
def request(self, command=None):
"""command text
view: Execution user view exec
Configuration system view exec
"""
node = new_ele("CLI")
node.append(validated_element(command))
# sub_ele(node, view).text = command
return self._request(node)
class Action(RPC):
def request(self, action=None):
node = new_ele("action")
node.append(validated_element(action))
return self._request(node)
class Save(RPC):
def request(self, file=None):
node = new_ele('save')
sub_ele(node, 'file').text = file
return self._request(node)
class Load(RPC):
def request(self, file=None):
node = new_ele('load')
sub_ele(node, 'file').text = file
return self._request(node)
class Rollback(RPC):
def request(self, file=None):
node = new_ele('rollback')
sub_ele(node, 'file').text = file
return self._request(node)
|
apache-2.0
|
AICP/external_chromium_org
|
tools/deep_memory_profiler/lib/bucket.py
|
59
|
5674
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from lib.symbol import FUNCTION_SYMBOLS, SOURCEFILE_SYMBOLS, TYPEINFO_SYMBOLS
LOGGER = logging.getLogger('dmprof')
# Indexes in dumped heap profile dumps.
VIRTUAL, COMMITTED, ALLOC_COUNT, FREE_COUNT, _, BUCKET_ID = range(6)
class Bucket(object):
"""Represents a bucket, which is a unit of memory block classification."""
def __init__(self, stacktrace, allocator_type, typeinfo, typeinfo_name):
self._stacktrace = stacktrace
self._allocator_type = allocator_type
self._typeinfo = typeinfo
self._typeinfo_name = typeinfo_name
self._symbolized_stackfunction = stacktrace
self._symbolized_joined_stackfunction = ''
self._symbolized_stacksourcefile = stacktrace
self._symbolized_joined_stacksourcefile = ''
self._symbolized_typeinfo = typeinfo_name
self.component_cache = ''
def __str__(self):
result = []
result.append(self._allocator_type)
if self._symbolized_typeinfo == 'no typeinfo':
result.append('tno_typeinfo')
else:
result.append('t' + self._symbolized_typeinfo)
result.append('n' + self._typeinfo_name)
result.extend(['%s(@%s)' % (function, sourcefile)
for function, sourcefile
in zip(self._symbolized_stackfunction,
self._symbolized_stacksourcefile)])
return ' '.join(result)
def symbolize(self, symbol_mapping_cache):
"""Makes a symbolized stacktrace and typeinfo with |symbol_mapping_cache|.
Args:
symbol_mapping_cache: A SymbolMappingCache object.
"""
# TODO(dmikurube): Fill explicitly with numbers if symbol not found.
self._symbolized_stackfunction = [
symbol_mapping_cache.lookup(FUNCTION_SYMBOLS, address)
for address in self._stacktrace]
self._symbolized_joined_stackfunction = ' '.join(
self._symbolized_stackfunction)
self._symbolized_stacksourcefile = [
symbol_mapping_cache.lookup(SOURCEFILE_SYMBOLS, address)
for address in self._stacktrace]
self._symbolized_joined_stacksourcefile = ' '.join(
self._symbolized_stacksourcefile)
if not self._typeinfo:
self._symbolized_typeinfo = 'no typeinfo'
else:
self._symbolized_typeinfo = symbol_mapping_cache.lookup(
TYPEINFO_SYMBOLS, self._typeinfo)
if not self._symbolized_typeinfo:
self._symbolized_typeinfo = 'no typeinfo'
def clear_component_cache(self):
self.component_cache = ''
@property
def stacktrace(self):
return self._stacktrace
@property
def allocator_type(self):
return self._allocator_type
@property
def typeinfo(self):
return self._typeinfo
@property
def typeinfo_name(self):
return self._typeinfo_name
@property
def symbolized_stackfunction(self):
return self._symbolized_stackfunction
@property
def symbolized_joined_stackfunction(self):
return self._symbolized_joined_stackfunction
@property
def symbolized_stacksourcefile(self):
return self._symbolized_stacksourcefile
@property
def symbolized_joined_stacksourcefile(self):
return self._symbolized_joined_stacksourcefile
@property
def symbolized_typeinfo(self):
return self._symbolized_typeinfo
class BucketSet(object):
"""Represents a set of bucket."""
def __init__(self):
self._buckets = {}
self._code_addresses = set()
self._typeinfo_addresses = set()
def load(self, prefix):
"""Loads all related bucket files.
Args:
prefix: A prefix string for bucket file names.
"""
LOGGER.info('Loading bucket files.')
n = 0
skipped = 0
while True:
path = '%s.%04d.buckets' % (prefix, n)
if not os.path.exists(path) or not os.stat(path).st_size:
if skipped > 10:
break
n += 1
skipped += 1
continue
LOGGER.info(' %s' % path)
with open(path, 'r') as f:
self._load_file(f)
n += 1
skipped = 0
def _load_file(self, bucket_f):
for line in bucket_f:
words = line.split()
typeinfo = None
typeinfo_name = ''
stacktrace_begin = 2
for index, word in enumerate(words):
if index < 2:
continue
if word[0] == 't':
typeinfo = int(word[1:], 16)
self._typeinfo_addresses.add(typeinfo)
elif word[0] == 'n':
typeinfo_name = word[1:]
else:
stacktrace_begin = index
break
stacktrace = [int(address, 16) for address in words[stacktrace_begin:]]
for frame in stacktrace:
self._code_addresses.add(frame)
self._buckets[int(words[0])] = Bucket(
stacktrace, words[1], typeinfo, typeinfo_name)
def __iter__(self):
for bucket_id, bucket_content in self._buckets.iteritems():
yield bucket_id, bucket_content
def __getitem__(self, bucket_id):
return self._buckets[bucket_id]
def get(self, bucket_id):
return self._buckets.get(bucket_id)
def symbolize(self, symbol_mapping_cache):
for bucket_content in self._buckets.itervalues():
bucket_content.symbolize(symbol_mapping_cache)
def clear_component_cache(self):
for bucket_content in self._buckets.itervalues():
bucket_content.clear_component_cache()
def iter_addresses(self, symbol_type):
if symbol_type in [FUNCTION_SYMBOLS, SOURCEFILE_SYMBOLS]:
for function in self._code_addresses:
yield function
else:
for function in self._typeinfo_addresses:
yield function
|
bsd-3-clause
|
D4wN/brickv
|
src/build_data/windows/OpenGL/GL/NV/register_combiners.py
|
4
|
3899
|
'''OpenGL extension NV.register_combiners
This module customises the behaviour of the
OpenGL.raw.GL.NV.register_combiners to provide a more
Python-friendly API
Overview (from the spec)
NVIDIA's next-generation graphics processor and its derivative
designs support an extremely configurable mechanism know as "register
combiners" for computing fragment colors.
The register combiner mechanism is a significant redesign of NVIDIA's
original TNT combiner mechanism as introduced by NVIDIA's RIVA
TNT graphics processor. Familiarity with the TNT combiners will
help the reader appreciate the greatly enhanced register combiners
functionality (see the NV_texture_env_combine4 OpenGL extension
specification for this background). The register combiner mechanism
has the following enhanced functionality:
The numeric range of combiner computations is from [-1,1]
(instead of TNT's [0,1] numeric range),
The set of available combiner inputs is expanded to include the
secondary color, fog color, fog factor, and a second combiner
constant color (TNT's available combiner inputs consist of
only zero, a single combiner constant color, the primary color,
texture 0, texture 1, and, in the case of combiner 1, the result
of combiner 0).
Each combiner variable input can be independently scaled and
biased into several possible numeric ranges (TNT can only
complement combiner inputs).
Each combiner stage computes three distinct outputs (instead
TNT's single combiner output).
The output operations include support for computing dot products
(TNT has no support for computing dot products).
After each output operation, there is a configurable scale and bias
applied (TNT's combiner operations builds in a scale and/or bias
into some of its combiner operations).
Each input variable for each combiner stage is fetched from any
entry in a combiner register set. Moreover, the outputs of each
combiner stage are written into the register set of the subsequent
combiner stage (TNT could only use the result from combiner 0 as
a possible input to combiner 1; TNT lacks the notion of an
input/output register set).
The register combiner mechanism supports at least two general
combiner stages and then a special final combiner stage appropriate
for applying a color sum and fog computation (TNT provides two
simpler combiner stages, and TNT's color sum and fog stages are
hard-wired and not subsumed by the combiner mechanism as in register
combiners).
The register combiners fit into the OpenGL pipeline as a rasterization
processing stage operating in parallel to the traditional OpenGL
texture environment, color sum, AND fog application. Enabling this
extension bypasses OpenGL's existing texture environment, color
sum, and fog application processing and instead use the register
combiners. The combiner and texture environment state is orthogonal
so modifying combiner state does not change the traditional OpenGL
texture environment state and the texture environment state is
ignored when combiners are enabled.
OpenGL application developers can use the register combiner mechanism
for very sophisticated shading techniques. For example, an
approximation of Blinn's bump mapping technique can be achieved with
the combiner mechanism. Additionally, multi-pass shading models
that require several passes with unextended OpenGL 1.2 functionality
can be implemented in several fewer passes with register combiners.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/register_combiners.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.NV.register_combiners import *
### END AUTOGENERATED SECTION
|
gpl-2.0
|
leonardr/botfriend
|
bots.sample/junkmail/__init__.py
|
1
|
1402
|
import json
import random
import requests
from olipy.ia import Text
from botfriend.bot import BasicBot
from botfriend.model import Post
class JunkMailBot(BasicBot):
COLLECTION = "tednelsonjunkmail"
def update_state(self):
cutoff = self.model.last_state_update_time
old_state = self.model.json_state or []
query = Text.recent("collection:%s" % self.COLLECTION, cutoff=cutoff)
new_items = [x.identifier for x in query]
all_items = set(old_state + new_items)
return list(all_items)
def new_post(self):
# Choose a random identifier from the current state.
identifier = random.choice(self.model.json_state)
if not identifier:
return None
text = Text(identifier)
title = text.metadata['title']
page_num = random.randint(0, text.pages-1)
reader_url = text.reader_url(page_num)
image_url = text.image_url(page_num)
# Create the post.
text = "%s\n\n%s" % (title, reader_url)
post, is_new = Post.from_content(
self.model, text, reuse_existing=False
)
# Attach the image.
if not image_url:
return None
response = requests.get(image_url)
media_type = response.headers['Content-Type']
post.attach(media_type, content=response.content)
return post
Bot = JunkMailBot
|
mit
|
jindongh/boto
|
tests/unit/s3/test_website.py
|
114
|
9219
|
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests.unit import unittest
import xml.dom.minidom
import xml.sax
from boto.s3.website import WebsiteConfiguration
from boto.s3.website import RedirectLocation
from boto.s3.website import RoutingRules
from boto.s3.website import Condition
from boto.s3.website import RoutingRules
from boto.s3.website import RoutingRule
from boto.s3.website import Redirect
from boto import handler
def pretty_print_xml(text):
text = ''.join(t.strip() for t in text.splitlines())
x = xml.dom.minidom.parseString(text)
return x.toprettyxml()
class TestS3WebsiteConfiguration(unittest.TestCase):
maxDiff = None
def setUp(self):
pass
def tearDown(self):
pass
def test_suffix_only(self):
config = WebsiteConfiguration(suffix='index.html')
xml = config.to_xml()
self.assertIn(
'<IndexDocument><Suffix>index.html</Suffix></IndexDocument>', xml)
def test_suffix_and_error(self):
config = WebsiteConfiguration(suffix='index.html',
error_key='error.html')
xml = config.to_xml()
self.assertIn(
'<ErrorDocument><Key>error.html</Key></ErrorDocument>', xml)
def test_redirect_all_request_to_with_just_host(self):
location = RedirectLocation(hostname='example.com')
config = WebsiteConfiguration(redirect_all_requests_to=location)
xml = config.to_xml()
self.assertIn(
('<RedirectAllRequestsTo><HostName>'
'example.com</HostName></RedirectAllRequestsTo>'), xml)
def test_redirect_all_requests_with_protocol(self):
location = RedirectLocation(hostname='example.com', protocol='https')
config = WebsiteConfiguration(redirect_all_requests_to=location)
xml = config.to_xml()
self.assertIn(
('<RedirectAllRequestsTo><HostName>'
'example.com</HostName><Protocol>https</Protocol>'
'</RedirectAllRequestsTo>'), xml)
def test_routing_rules_key_prefix(self):
x = pretty_print_xml
# This rule redirects requests for docs/* to documentation/*
rules = RoutingRules()
condition = Condition(key_prefix='docs/')
redirect = Redirect(replace_key_prefix='documents/')
rules.add_rule(RoutingRule(condition, redirect))
config = WebsiteConfiguration(suffix='index.html', routing_rules=rules)
xml = config.to_xml()
expected_xml = """<?xml version="1.0" encoding="UTF-8"?>
<WebsiteConfiguration xmlns='http://s3.amazonaws.com/doc/2006-03-01/'>
<IndexDocument>
<Suffix>index.html</Suffix>
</IndexDocument>
<RoutingRules>
<RoutingRule>
<Condition>
<KeyPrefixEquals>docs/</KeyPrefixEquals>
</Condition>
<Redirect>
<ReplaceKeyPrefixWith>documents/</ReplaceKeyPrefixWith>
</Redirect>
</RoutingRule>
</RoutingRules>
</WebsiteConfiguration>
"""
self.assertEqual(x(expected_xml), x(xml))
def test_routing_rules_to_host_on_404(self):
x = pretty_print_xml
# Another example from the docs:
# Redirect requests to a specific host in the event of a 404.
# Also, the redirect inserts a report-404/. For example,
# if you request a page ExamplePage.html and it results
# in a 404, the request is routed to a page report-404/ExamplePage.html
rules = RoutingRules()
condition = Condition(http_error_code=404)
redirect = Redirect(hostname='example.com',
replace_key_prefix='report-404/')
rules.add_rule(RoutingRule(condition, redirect))
config = WebsiteConfiguration(suffix='index.html', routing_rules=rules)
xml = config.to_xml()
expected_xml = """<?xml version="1.0" encoding="UTF-8"?>
<WebsiteConfiguration xmlns='http://s3.amazonaws.com/doc/2006-03-01/'>
<IndexDocument>
<Suffix>index.html</Suffix>
</IndexDocument>
<RoutingRules>
<RoutingRule>
<Condition>
<HttpErrorCodeReturnedEquals>404</HttpErrorCodeReturnedEquals>
</Condition>
<Redirect>
<HostName>example.com</HostName>
<ReplaceKeyPrefixWith>report-404/</ReplaceKeyPrefixWith>
</Redirect>
</RoutingRule>
</RoutingRules>
</WebsiteConfiguration>
"""
self.assertEqual(x(expected_xml), x(xml))
def test_key_prefix(self):
x = pretty_print_xml
rules = RoutingRules()
condition = Condition(key_prefix="images/")
redirect = Redirect(replace_key='folderdeleted.html')
rules.add_rule(RoutingRule(condition, redirect))
config = WebsiteConfiguration(suffix='index.html', routing_rules=rules)
xml = config.to_xml()
expected_xml = """<?xml version="1.0" encoding="UTF-8"?>
<WebsiteConfiguration xmlns='http://s3.amazonaws.com/doc/2006-03-01/'>
<IndexDocument>
<Suffix>index.html</Suffix>
</IndexDocument>
<RoutingRules>
<RoutingRule>
<Condition>
<KeyPrefixEquals>images/</KeyPrefixEquals>
</Condition>
<Redirect>
<ReplaceKeyWith>folderdeleted.html</ReplaceKeyWith>
</Redirect>
</RoutingRule>
</RoutingRules>
</WebsiteConfiguration>
"""
self.assertEqual(x(expected_xml), x(xml))
def test_builders(self):
x = pretty_print_xml
# This is a more declarative way to create rules.
# First the long way.
rules = RoutingRules()
condition = Condition(http_error_code=404)
redirect = Redirect(hostname='example.com',
replace_key_prefix='report-404/')
rules.add_rule(RoutingRule(condition, redirect))
xml = rules.to_xml()
# Then the more concise way.
rules2 = RoutingRules().add_rule(
RoutingRule.when(http_error_code=404).then_redirect(
hostname='example.com', replace_key_prefix='report-404/'))
xml2 = rules2.to_xml()
self.assertEqual(x(xml), x(xml2))
def test_parse_xml(self):
x = pretty_print_xml
xml_in = """<?xml version="1.0" encoding="UTF-8"?>
<WebsiteConfiguration xmlns='http://s3.amazonaws.com/doc/2006-03-01/'>
<IndexDocument>
<Suffix>index.html</Suffix>
</IndexDocument>
<ErrorDocument>
<Key>error.html</Key>
</ErrorDocument>
<RoutingRules>
<RoutingRule>
<Condition>
<KeyPrefixEquals>docs/</KeyPrefixEquals>
</Condition>
<Redirect>
<Protocol>https</Protocol>
<HostName>www.example.com</HostName>
<ReplaceKeyWith>documents/</ReplaceKeyWith>
<HttpRedirectCode>302</HttpRedirectCode>
</Redirect>
</RoutingRule>
<RoutingRule>
<Condition>
<HttpErrorCodeReturnedEquals>404</HttpErrorCodeReturnedEquals>
</Condition>
<Redirect>
<HostName>example.com</HostName>
<ReplaceKeyPrefixWith>report-404/</ReplaceKeyPrefixWith>
</Redirect>
</RoutingRule>
</RoutingRules>
</WebsiteConfiguration>
"""
webconfig = WebsiteConfiguration()
h = handler.XmlHandler(webconfig, None)
xml.sax.parseString(xml_in.encode('utf-8'), h)
xml_out = webconfig.to_xml()
self.assertEqual(x(xml_in), x(xml_out))
|
mit
|
mushyshah/ELEC490G11
|
node_modules/node-gyp/gyp/pylib/gyp/MSVSUtil.py
|
1812
|
9537
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
# A dictionary mapping supported target types to extensions.
TARGET_TYPE_EXT = {
'executable': 'exe',
'loadable_module': 'dll',
'shared_library': 'dll',
'static_library': 'lib',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
for deptype in ('dependencies', 'dependencies_original'):
dependencies = copy.copy(new_target_dicts[t].get(deptype, []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t][deptype] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s.%s.pdb' % (pdb_base, TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts)
|
gpl-2.0
|
timchenxiaoyu/Diamond
|
src/collectors/rabbitmq/test/testrabbitmq.py
|
15
|
5563
|
#!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from rabbitmq import RabbitMQCollector
##########################################################################
class TestRabbitMQCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('RabbitMQCollector', {
'host': 'localhost:55672',
'user': 'guest',
'password': 'password',
'queues_ignored': '^ignored',
'cluster': True,
})
self.collector = RabbitMQCollector(config, None)
def test_import(self):
self.assertTrue(RabbitMQCollector)
@patch('rabbitmq.RabbitMQClient')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys(self, publish_mock, client_mock):
client = Mock()
queue_data = [{
'more_keys': {'nested_key': 1},
'key': 2,
'string': 'str',
'name': 'test_queue'
}, {
'name': 'ignored',
'more_keys': {'nested_key': 1},
'key': 2,
'string': 'str',
}]
overview_data = {
'node': 'rabbit@localhost',
'more_keys': {'nested_key': 3},
'key': 4,
'string': 'string',
}
node_health = {
'fd_used': 1,
'fd_total': 2,
'mem_used': 2,
'mem_limit': 4,
'sockets_used': 1,
'sockets_total': 2,
'disk_free_limit': 1,
'disk_free': 1,
'proc_used': 1,
'proc_total': 1,
'partitions': [],
}
client_mock.return_value = client
client.get_queues.return_value = queue_data
client.get_overview.return_value = overview_data
client.get_nodes.return_value = [1, 2, 3]
client.get_node.return_value = node_health
self.collector.collect()
client.get_queues.assert_called_once_with(None)
client.get_nodes.assert_called_once_with()
client.get_node.assert_called_once_with('rabbit@localhost')
metrics = {
'queues.test_queue.more_keys.nested_key': 1,
'queues.test_queue.key': 2,
'more_keys.nested_key': 3,
'key': 4,
'health.fd_used': 1,
'health.fd_total': 2,
'health.mem_used': 2,
'health.mem_limit': 4,
'health.sockets_used': 1,
'health.sockets_total': 2,
'health.disk_free_limit': 1,
'health.disk_free': 1,
'health.proc_used': 1,
'health.proc_total': 1,
'cluster.partitions': 0,
'cluster.nodes': 3
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch('rabbitmq.RabbitMQClient')
@patch.object(Collector, 'publish')
def test_opt_should_replace_dots(self, publish_mock, client_mock):
self.collector.config['replace_dot'] = '_'
client = Mock()
queue_data = [{
'more_keys': {'nested_key': 1},
'key': 2,
'string': 'str',
'name': 'test.queue'
}, {
'name': 'ignored',
'more_keys': {'nested_key': 1},
'key': 2,
'string': 'str',
}]
overview_data = {
'node': 'rabbit@localhost',
'more_keys': {'nested_key': 3},
'key': 4,
'string': 'string',
}
node_health = {
'fd_used': 1,
'fd_total': 2,
'mem_used': 2,
'mem_limit': 4,
'sockets_used': 1,
'sockets_total': 2,
'disk_free_limit': 1,
'disk_free': 1,
'proc_used': 1,
'proc_total': 1,
'partitions': [],
}
client_mock.return_value = client
client.get_queues.return_value = queue_data
client.get_overview.return_value = overview_data
client.get_nodes.return_value = [1, 2, 3]
client.get_node.return_value = node_health
self.collector.collect()
metrics = {
'queues.test_queue.more_keys.nested_key': 1,
'queues.test_queue.key': 2,
'more_keys.nested_key': 3,
'key': 4,
'health.fd_used': 1,
'health.fd_total': 2,
'health.mem_used': 2,
'health.mem_limit': 4,
'health.sockets_used': 1,
'health.sockets_total': 2,
'health.disk_free_limit': 1,
'health.disk_free': 1,
'health.proc_used': 1,
'health.proc_total': 1,
'cluster.partitions': 0,
'cluster.nodes': 3
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
self.collector.config['replace_dot'] = False
##########################################################################
if __name__ == "__main__":
unittest.main()
|
mit
|
xujun10110/AIL-framework
|
bin/Categ.py
|
2
|
3272
|
#!/usr/bin/env python2
# -*-coding:UTF-8 -*
"""
The ZMQ_PubSub_Categ Module
============================
This module is consuming the Redis-list created by the ZMQ_PubSub_Tokenize_Q
Module.
Each words files created under /files/ are representing categories.
This modules take these files and compare them to
the stream of data given by the ZMQ_PubSub_Tokenize_Q Module.
When a word from a paste match one or more of these words file, the filename of
the paste is published/forwarded to the next modules.
Each category (each files) are representing a dynamic channel.
This mean that if you create 1000 files under /files/ you'll have 1000 channels
where every time there is a matching word to a category, the paste containing
this word will be pushed to this specific channel.
..note:: The channel will have the name of the file created.
Implementing modules can start here, create your own category file,
and then create your own module to treat the specific paste matching this
category.
..note:: Module ZMQ_Something_Q and ZMQ_Something are closely bound, always put
the same Subscriber name in both of them.
Requirements
------------
*Need running Redis instances. (Redis)
*Categories files of words in /files/ need to be created
*Need the ZMQ_PubSub_Tokenize_Q Module running to be able to work properly.
"""
import os
import argparse
import time
import re
from pubsublogger import publisher
from packages import Paste
from Helper import Process
if __name__ == "__main__":
publisher.port = 6380
publisher.channel = "Script"
config_section = 'Categ'
p = Process(config_section)
# SCRIPT PARSER #
parser = argparse.ArgumentParser(
description='This script is a part of the Analysis Information \
Leak framework.')
parser.add_argument(
'-d', type=str, default="../files/",
help='Path to the directory containing the category files.',
action='store')
args = parser.parse_args()
# FUNCTIONS #
publisher.info("Script Categ started")
categories = ['CreditCards', 'Mail', 'Onion', 'Web']
tmp_dict = {}
for filename in categories:
bname = os.path.basename(filename)
tmp_dict[bname] = []
with open(os.path.join(args.d, filename), 'r') as f:
patterns = [r'%s' % re.escape(s.strip()) for s in f]
tmp_dict[bname] = re.compile('|'.join(patterns), re.IGNORECASE)
prec_filename = None
while True:
filename = p.get_from_set()
if filename is not None:
paste = Paste.Paste(filename)
content = paste.get_p_content()
for categ, pattern in tmp_dict.items():
found = re.findall(pattern, content)
if len(found) > 0:
msg = '{} {}'.format(paste.p_path, len(found))
print msg, categ
p.populate_set_out(msg, categ)
publisher.info(
'Categ;{};{};{};Detected {} as {}'.format(
paste.p_source, paste.p_date, paste.p_name,
len(found), categ))
else:
publisher.debug("Script Categ is Idling 10s")
print 'Sleeping'
time.sleep(10)
|
agpl-3.0
|
andiwand/tmobile-scripts
|
pool.py
|
1
|
1566
|
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
def parse_resource(s):
s = s.strip();
end = s.rfind(" ")
limit = None
if end > 0:
limit = s[end+1:]
s = s[:end]
limit = limit.replace(".", "")
if s.find(",") > 0:
limit = limit.replace(",", ".")
limit = float(limit)
else:
limit = int(limit)
return s, limit
def parse_number(s):
s = s.strip();
end = s.find(" ")
unit = None
if end > 0:
unit = s[end+1:]
s = s[:end]
s = s.replace(".", "");
if s.find(",") > 0:
s = s.replace(",", ".");
return float(s), unit
return int(s), unit
def fetch(username, password):
result = {}
driver = webdriver.PhantomJS(
service_args=["--ignore-ssl-errors=true"],
service_log_path=os.path.devnull)
driver.implicitly_wait(10)
driver.get("https://rechnung.t-mobile.at/index.cfm")
form_username = driver.find_element_by_name("account")
form_password = driver.find_element_by_name("password")
form_username.send_keys(username)
form_password.send_keys(password)
form_password.send_keys(Keys.RETURN)
link = driver.find_element_by_link_text("Business Complete");
link.click()
table = driver.find_element_by_class_name("main_table")
table_rows = table.find_elements_by_tag_name("tr")
for row in table_rows[1:]:
cell = row.find_elements_by_tag_name("td")
resource = parse_resource(cell[2].text)
used = parse_number(cell[3].text)
free = parse_number(cell[5].text)
result[resource[0]] = {"used": used[0], "limit": used[0] + free[0], "unit": used[1]}
driver.close()
return result
|
gpl-3.0
|
ros-infrastructure/ros_buildfarm
|
scripts/misc/generate_check_agents_job.py
|
1
|
1849
|
#!/usr/bin/env python3
# Copyright 2015-2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from ros_buildfarm.argument import add_argument_config_url
from ros_buildfarm.argument import add_argument_dry_run
from ros_buildfarm.config import get_index
from ros_buildfarm.jenkins import configure_job
from ros_buildfarm.jenkins import configure_management_view
from ros_buildfarm.jenkins import connect
from ros_buildfarm.templates import expand_template
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(
description="Generate the 'check_agents' job on Jenkins")
add_argument_config_url(parser)
add_argument_dry_run(parser)
args = parser.parse_args(argv)
config = get_index(args.config_url)
job_config = get_job_config(config.notify_emails)
jenkins = connect(config.jenkins_url)
configure_management_view(jenkins, dry_run=args.dry_run)
job_name = 'check_agents'
configure_job(jenkins, job_name, job_config, dry_run=args.dry_run)
def get_job_config(notification_emails):
template_name = 'misc/check_agents_job.xml.em'
job_data = {
'notification_emails': notification_emails,
}
job_config = expand_template(template_name, job_data)
return job_config
if __name__ == '__main__':
main()
|
apache-2.0
|
laiqiqi886/kbengine
|
kbe/src/lib/python/Lib/reprlib.py
|
923
|
5110
|
"""Redo the builtin repr() (representation) but with limits on most sizes."""
__all__ = ["Repr", "repr", "recursive_repr"]
import builtins
from itertools import islice
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxarray = 5
self.maxdict = 4
self.maxset = 6
self.maxfrozenset = 6
self.maxdeque = 6
self.maxstring = 30
self.maxlong = 40
self.maxother = 30
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
return self.repr_instance(x, level)
def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
n = len(x)
if level <= 0 and n:
s = '...'
else:
newlevel = level - 1
repr1 = self.repr1
pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
if n > maxiter: pieces.append('...')
s = ', '.join(pieces)
if n == 1 and trail: right = trail + right
return '%s%s%s' % (left, s, right)
def repr_tuple(self, x, level):
return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
def repr_list(self, x, level):
return self._repr_iterable(x, level, '[', ']', self.maxlist)
def repr_array(self, x, level):
header = "array('%s', [" % x.typecode
return self._repr_iterable(x, level, header, '])', self.maxarray)
def repr_set(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'set([', '])', self.maxset)
def repr_frozenset(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'frozenset([', '])',
self.maxfrozenset)
def repr_deque(self, x, level):
return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
newlevel = level - 1
repr1 = self.repr1
pieces = []
for key in islice(_possibly_sorted(x), self.maxdict):
keyrepr = repr1(key, newlevel)
valrepr = repr1(x[key], newlevel)
pieces.append('%s: %s' % (keyrepr, valrepr))
if n > self.maxdict: pieces.append('...')
s = ', '.join(pieces)
return '{%s}' % (s,)
def repr_str(self, x, level):
s = builtins.repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = builtins.repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_int(self, x, level):
s = builtins.repr(x) # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = builtins.repr(x)
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except Exception:
return '<%s instance at %x>' % (x.__class__.__name__, id(x))
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _possibly_sorted(x):
# Since not all sequences of items can be sorted and comparison
# functions may raise arbitrary exceptions, return an unsorted
# sequence in that case.
try:
return sorted(x)
except Exception:
return list(x)
aRepr = Repr()
repr = aRepr.repr
|
lgpl-3.0
|
bmbouter/python-bugzilla
|
tests/createbug.py
|
5
|
2897
|
#
# Copyright Red Hat, Inc. 2013
#
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
#
'''
Unit tests for building createbug dictionaries with bin/bugzilla
'''
import unittest
from bugzilla.bugzilla4 import Bugzilla4
import tests
bz4 = Bugzilla4(cookiefile=None, tokenfile=None)
class CreatebugTest(unittest.TestCase):
maxDiff = None
bz = bz4
def assertDictEqual(self, *args, **kwargs):
# EPEL5 back compat
if hasattr(unittest.TestCase, "assertDictEqual"):
return unittest.TestCase.assertDictEqual(self, *args, **kwargs)
return self.assertEqual(*args, **kwargs)
def clicomm(self, argstr, out):
comm = "bugzilla new --test-return-result " + argstr
if out is None:
self.assertRaises(RuntimeError, tests.clicomm, comm, self.bz)
else:
q = tests.clicomm(comm, self.bz, returnmain=True)
self.assertDictEqual(out, q)
def testBasic(self):
self.clicomm(
"--product foo --component bar --summary baz --version 12",
{'component': 'bar', 'product': 'foo',
'summary': 'baz', 'version': '12'}
)
def testOpSys(self):
self.clicomm(
"--os windowsNT --arch ia64 --comment 'youze a foo' --cc me",
{'description': 'youze a foo', 'op_sys': 'windowsNT',
'platform': 'ia64', 'cc': ["me"]}
)
def testSeverity(self):
self.clicomm(
"--severity HIGH --priority Low --url http://example.com",
{'url': 'http://example.com', 'priority': 'Low',
'severity': 'HIGH'}
)
def testMisc(self):
self.clicomm(
"--alias some-alias",
{"alias": "some-alias"}
)
def testMultiOpts(self):
# Test all opts that can take lists
out = {'blocks': ['3', '4'], 'cc': ['1', '2'],
'depends_on': ['5', 'foo', 'wib'], 'groups': ['bar', '8'],
'keywords': ['TestOnly', 'ZStream']}
self.clicomm(
"--cc 1,2 --blocked 3,4 --dependson 5,foo,wib --groups bar,8 "
"--keywords TestOnly,ZStream",
out
)
self.clicomm(
"--cc 1 --cc 2 --blocked 3 --blocked 4 "
"--dependson 5,foo --dependson wib --groups bar --groups 8 "
"--keywords TestOnly --keywords ZStream",
out
)
def testFieldConversion(self):
vc = self.bz._validate_createbug # pylint: disable=protected-access
out = vc(product="foo", component="bar",
version="12", description="foo", short_desc="bar",
check_args=False)
self.assertDictEqual(out,
{'component': 'bar', 'description': 'foo', 'product': 'foo',
'summary': 'bar', 'version': '12'})
|
gpl-2.0
|
brainstorm/bcbio-nextgen
|
bcbio/install.py
|
1
|
32947
|
"""Handle installation and updates of bcbio-nextgen, third party software and data.
Enables automated installation tool and in-place updates to install additional
data and software.
"""
from __future__ import print_function
import argparse
import collections
import contextlib
import datetime
import dateutil
from distutils.version import LooseVersion
import gzip
import os
import shutil
import subprocess
import sys
import glob
import requests
from six.moves import urllib
import toolz as tz
import yaml
from bcbio import broad, utils
from bcbio.pipeline import genome
from bcbio.variation import effects
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import datadict as dd
REMOTES = {
"requirements": "https://raw.githubusercontent.com/chapmanb/bcbio-nextgen/master/requirements-conda.txt",
"gitrepo": "https://github.com/chapmanb/bcbio-nextgen.git",
"cloudbiolinux": "https://github.com/chapmanb/cloudbiolinux/archive/master.tar.gz",
"genome_resources": "https://raw.github.com/chapmanb/bcbio-nextgen/master/config/genomes/%s-resources.yaml",
"snpeff_dl_url": ("http://downloads.sourceforge.net/project/snpeff/databases/v{snpeff_ver}/"
"snpEff_v{snpeff_ver}_{genome}.zip")}
SUPPORTED_GENOMES = ["GRCh37", "hg19", "hg38", "hg38-noalt", "mm10", "mm9",
"rn6", "rn5", "canFam3", "dm3", "galGal4", "phix",
"pseudomonas_aeruginosa_ucbpp_pa14", "sacCer3", "TAIR10",
"WBcel235", "xenTro3", "GRCz10"]
SUPPORTED_INDEXES = ["bowtie", "bowtie2", "bwa", "novoalign", "rtg", "snap",
"star", "ucsc", "seq", "hisat2"]
DEFAULT_INDEXES = ["rtg"]
Tool = collections.namedtuple("Tool", ["name", "fname"])
def upgrade_bcbio(args):
"""Perform upgrade of bcbio to latest release, or from GitHub development version.
Handles bcbio, third party tools and data.
"""
print("Upgrading bcbio")
args = add_install_defaults(args)
if args.upgrade in ["stable", "system", "deps", "development"]:
if args.upgrade == "development":
anaconda_dir = _update_conda_devel()
print("Upgrading bcbio-nextgen to latest development version")
pip_bin = os.path.join(os.path.dirname(sys.executable), "pip")
_pip_safe_ssl([[pip_bin, "install", "--upgrade", "--no-deps",
"git+%s#egg=bcbio-nextgen" % REMOTES["gitrepo"]]], anaconda_dir)
print("Upgrade of bcbio-nextgen development code complete.")
else:
_update_conda_packages()
print("Upgrade of bcbio-nextgen code complete.")
try:
_set_matplotlib_default_backend()
except OSError:
pass
if args.tooldir:
with bcbio_tmpdir():
print("Upgrading third party tools to latest versions")
_symlink_bcbio(args, script="bcbio_nextgen.py")
_symlink_bcbio(args, script="bcbio_setup_genome.py")
_symlink_bcbio(args, script="bcbio_prepare_samples.py")
_symlink_bcbio(args, script="bcbio_fastq_umi_prep.py")
upgrade_thirdparty_tools(args, REMOTES)
print("Third party tools upgrade complete.")
if args.toolplus:
print("Installing additional tools")
_install_toolplus(args)
if args.install_data:
for default in DEFAULT_INDEXES:
if default not in args.aligners:
args.aligners.append(default)
if len(args.aligners) == 0:
print("Warning: no aligners provided with `--aligners` flag")
if len(args.genomes) == 0:
print("Data not installed, no genomes provided with `--genomes` flag")
else:
with bcbio_tmpdir():
print("Upgrading bcbio-nextgen data files")
upgrade_bcbio_data(args, REMOTES)
print("bcbio-nextgen data upgrade complete.")
if args.isolate and args.tooldir:
print("Isolated tool installation not automatically added to environmental variables")
print(" Add:\n {t}/bin to PATH".format(t=args.tooldir))
save_install_defaults(args)
args.datadir = _get_data_dir()
_install_container_bcbio_system(args.datadir)
print("Upgrade completed successfully.")
return args
def _pip_safe_ssl(cmds, anaconda_dir):
"""Run pip, retrying with conda SSL certificate if global certificate fails.
"""
try:
for cmd in cmds:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
_set_pip_ssl(anaconda_dir)
for cmd in cmds:
subprocess.check_call(cmd)
def _set_pip_ssl(anaconda_dir):
"""Set PIP SSL certificate to installed conda certificate to avoid SSL errors
"""
if anaconda_dir:
cert_file = os.path.join(anaconda_dir, "ssl", "cert.pem")
if os.path.exists(cert_file):
os.environ["PIP_CERT"] = cert_file
def _set_matplotlib_default_backend():
"""
matplotlib will try to print to a display if it is available, but don't want
to run it in interactive mode. we tried setting the backend to 'Agg'' before
importing, but it was still resulting in issues. we replace the existing
backend with 'agg' in the default matplotlibrc. This is a hack until we can
find a better solution
"""
if _matplotlib_installed():
import matplotlib
matplotlib.use('Agg', force=True)
config = matplotlib.matplotlib_fname()
with file_transaction(config) as tx_out_file:
with open(config) as in_file, open(tx_out_file, "w") as out_file:
for line in in_file:
if line.split(":")[0].strip() == "backend":
out_file.write("backend: agg\n")
else:
out_file.write(line)
def _matplotlib_installed():
try:
import matplotlib
except ImportError:
return False
return True
def _symlink_bcbio(args, script="bcbio_nextgen.py"):
"""Ensure a bcbio-nextgen script symlink in final tool directory.
"""
bcbio_anaconda = os.path.join(os.path.dirname(sys.executable), script)
bindir = os.path.join(args.tooldir, "bin")
if not os.path.exists(bindir):
os.makedirs(bindir)
bcbio_final = os.path.join(bindir, script)
if not os.path.exists(bcbio_final):
if os.path.lexists(bcbio_final):
subprocess.check_call(["rm", "-f", bcbio_final])
subprocess.check_call(["ln", "-s", bcbio_anaconda, bcbio_final])
def _install_container_bcbio_system(datadir):
"""Install limited bcbio_system.yaml file for setting core and memory usage.
Adds any non-specific programs to the exposed bcbio_system.yaml file, only
when upgrade happening inside a docker container.
"""
base_file = os.path.join(datadir, "config", "bcbio_system.yaml")
if not os.path.exists(base_file):
return
expose_file = os.path.join(datadir, "galaxy", "bcbio_system.yaml")
expose = set(["memory", "cores", "jvm_opts"])
with open(base_file) as in_handle:
config = yaml.load(in_handle)
if os.path.exists(expose_file):
with open(expose_file) as in_handle:
expose_config = yaml.load(in_handle)
else:
expose_config = {"resources": {}}
for pname, vals in iteritems(config["resources"]):
expose_vals = {}
for k, v in iteritems(vals):
if k in expose:
expose_vals[k] = v
if len(expose_vals) > 0 and pname not in expose_config["resources"]:
expose_config["resources"][pname] = expose_vals
if expose_file and os.path.exists(os.path.dirname(expose_file)):
with open(expose_file, "w") as out_handle:
yaml.safe_dump(expose_config, out_handle, default_flow_style=False, allow_unicode=False)
return expose_file
def _get_conda_bin():
conda_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), "conda")
if os.path.exists(conda_bin):
return conda_bin
def _default_deploy_args(args):
"""Standard install arguments for CloudBioLinux.
Avoid using sudo and keep an installation isolated if running as the root user.
"""
return {"flavor": "ngs_pipeline_minimal",
"vm_provider": "novm",
"hostname": "localhost",
"fabricrc_overrides": {"edition": "minimal",
"use_sudo": False,
"keep_isolated": args.isolate or os.geteuid() == 0,
"conda_cmd": _get_conda_bin(),
"distribution": args.distribution or "__auto__",
"dist_name": "__auto__"}}
def _update_conda_packages():
"""If installed in an anaconda directory, upgrade conda packages.
"""
conda_bin = _get_conda_bin()
assert conda_bin, ("Could not find anaconda distribution for upgrading bcbio.\n"
"Using python at %s but could not find conda." % (os.path.realpath(sys.executable)))
req_file = "bcbio-update-requirements.txt"
if os.path.exists(req_file):
os.remove(req_file)
subprocess.check_call(["wget", "-O", req_file, "--no-check-certificate", REMOTES["requirements"]])
subprocess.check_call([conda_bin, "install", "--update-deps", "--quiet", "--yes",
"-c", "bioconda", "-c", "conda-forge", "--file", req_file])
if os.path.exists(req_file):
os.remove(req_file)
return os.path.dirname(os.path.dirname(conda_bin))
def _update_conda_devel():
"""Update to the latest development conda package.
"""
conda_bin = _get_conda_bin()
assert conda_bin, "Could not find anaconda distribution for upgrading bcbio"
subprocess.check_call([conda_bin, "install", "--update-deps",
"--quiet", "--yes", "-c", "bioconda", "-c", "conda-forge", "bcbio-nextgen"])
return os.path.dirname(os.path.dirname(conda_bin))
def get_genome_dir(gid, galaxy_dir, data):
"""Return standard location of genome directories.
"""
if galaxy_dir:
refs = genome.get_refs(gid, None, galaxy_dir, data)
seq_file = tz.get_in(["fasta", "base"], refs)
if seq_file and os.path.exists(seq_file):
return os.path.dirname(os.path.dirname(seq_file))
else:
gdirs = glob.glob(os.path.join(_get_data_dir(), "genomes", "*", gid))
if len(gdirs) == 1 and os.path.exists(gdirs[0]):
return gdirs[0]
def _get_data_dir():
base_dir = os.path.realpath(os.path.dirname(os.path.dirname(os.path.realpath(sys.executable))))
if "anaconda" not in os.path.basename(base_dir) and "virtualenv" not in os.path.basename(base_dir):
raise ValueError("Cannot update data for bcbio-nextgen not installed by installer.\n"
"bcbio-nextgen needs to be installed inside an anaconda environment \n"
"located in the same directory as `galaxy` `genomes` and `gemini_data` directories.")
return os.path.dirname(base_dir)
def get_gemini_dir(data=None):
try:
data_dir = _get_data_dir()
return os.path.join(data_dir, "gemini_data")
except ValueError:
if data:
galaxy_dir = dd.get_galaxy_dir(data)
data_dir = os.path.realpath(os.path.dirname(os.path.dirname(galaxy_dir)))
return os.path.join(data_dir, "gemini_data")
else:
return None
def upgrade_bcbio_data(args, remotes):
"""Upgrade required genome data files in place.
"""
from fabric.api import env
data_dir = _get_data_dir()
s = _default_deploy_args(args)
s["actions"] = ["setup_biodata"]
tooldir = args.tooldir or get_defaults().get("tooldir")
if tooldir:
s["fabricrc_overrides"]["system_install"] = tooldir
s["fabricrc_overrides"]["data_files"] = data_dir
s["fabricrc_overrides"]["galaxy_home"] = os.path.join(data_dir, "galaxy")
cbl = get_cloudbiolinux(remotes)
s["genomes"] = _get_biodata(cbl["biodata"], args)
sys.path.insert(0, cbl["dir"])
env.cores = args.cores
cbl_deploy = __import__("cloudbio.deploy", fromlist=["deploy"])
cbl_deploy.deploy(s)
_upgrade_genome_resources(s["fabricrc_overrides"]["galaxy_home"],
remotes["genome_resources"])
_upgrade_snpeff_data(s["fabricrc_overrides"]["galaxy_home"], args, remotes)
if "vep" in args.datatarget:
_upgrade_vep_data(s["fabricrc_overrides"]["galaxy_home"], tooldir)
if 'gemini' in args.datatarget and ("hg19" in args.genomes or "GRCh37" in args.genomes):
gemini = os.path.join(os.path.dirname(sys.executable), "gemini")
extras = []
if "cadd" in args.datatarget:
extras.extend(["--extra", "cadd_score"])
ann_dir = get_gemini_dir()
subprocess.check_call([gemini, "--annotation-dir", ann_dir, "update", "--dataonly"] + extras)
if "kraken" in args.datatarget:
_install_kraken_db(_get_data_dir(), args)
def _upgrade_genome_resources(galaxy_dir, base_url):
"""Retrieve latest version of genome resource YAML configuration files.
"""
for dbkey, ref_file in genome.get_builds(galaxy_dir):
# Check for a remote genome resources file
remote_url = base_url % dbkey
requests.packages.urllib3.disable_warnings()
r = requests.get(remote_url, verify=False)
if r.status_code == requests.codes.ok:
local_file = os.path.join(os.path.dirname(ref_file), os.path.basename(remote_url))
if os.path.exists(local_file):
with open(local_file) as in_handle:
local_config = yaml.load(in_handle)
remote_config = yaml.load(r.text)
needs_update = remote_config["version"] > local_config.get("version", 0)
if needs_update:
shutil.move(local_file, local_file + ".old%s" % local_config.get("version", 0))
else:
needs_update = True
if needs_update:
print("Updating %s genome resources configuration" % dbkey)
with open(local_file, "w") as out_handle:
out_handle.write(r.text)
def _upgrade_vep_data(galaxy_dir, tooldir):
for dbkey, ref_file in genome.get_builds(galaxy_dir):
effects.prep_vep_cache(dbkey, ref_file, tooldir)
def _upgrade_snpeff_data(galaxy_dir, args, remotes):
"""Install or upgrade snpEff databases, localized to reference directory.
"""
snpeff_version = effects.snpeff_version(args)
if not snpeff_version:
return
for dbkey, ref_file in genome.get_builds(galaxy_dir):
resource_file = os.path.join(os.path.dirname(ref_file), "%s-resources.yaml" % dbkey)
if os.path.exists(resource_file):
with open(resource_file) as in_handle:
resources = yaml.load(in_handle)
snpeff_db, snpeff_base_dir = effects.get_db({"genome_resources": resources,
"reference": {"fasta": {"base": ref_file}}})
if snpeff_db:
snpeff_db_dir = os.path.join(snpeff_base_dir, snpeff_db)
if os.path.exists(snpeff_db_dir) and _is_old_database(snpeff_db_dir, args):
shutil.rmtree(snpeff_db_dir)
if not os.path.exists(snpeff_db_dir):
print("Installing snpEff database %s in %s" % (snpeff_db, snpeff_base_dir))
dl_url = remotes["snpeff_dl_url"].format(
snpeff_ver=snpeff_version.replace(".", "_"),
genome=snpeff_db)
dl_file = os.path.basename(dl_url)
with utils.chdir(snpeff_base_dir):
subprocess.check_call(["wget", "-c", "-O", dl_file, dl_url])
subprocess.check_call(["unzip", dl_file])
os.remove(dl_file)
dl_dir = os.path.join(snpeff_base_dir, "data", snpeff_db)
shutil.move(dl_dir, snpeff_db_dir)
os.rmdir(os.path.join(snpeff_base_dir, "data"))
def _is_old_database(db_dir, args):
"""Check for old database versions, supported in snpEff 4.1.
"""
snpeff_version = effects.snpeff_version(args)
if LooseVersion(snpeff_version) >= LooseVersion("4.1"):
pred_file = os.path.join(db_dir, "snpEffectPredictor.bin")
if not utils.file_exists(pred_file):
return True
with gzip.open(pred_file) as in_handle:
version_info = in_handle.readline().strip().split("\t")
program, version = version_info[:2]
if not program.lower() == "snpeff" or LooseVersion(snpeff_version) > LooseVersion(version):
return True
return False
def _get_biodata(base_file, args):
"""Retrieve biodata genome targets customized by install parameters.
"""
with open(base_file) as in_handle:
config = yaml.load(in_handle)
config["install_liftover"] = False
config["genome_indexes"] = args.aligners
ann_groups = config.pop("annotation_groups", {})
config["genomes"] = [_setup_genome_annotations(g, args, ann_groups)
for g in config["genomes"] if g["dbkey"] in args.genomes]
return config
def _setup_genome_annotations(g, args, ann_groups):
"""Configure genome annotations to install based on datatarget.
"""
available_anns = g.get("annotations", []) + g.pop("annotations_available", [])
anns = []
for orig_target in args.datatarget:
if orig_target in ann_groups:
targets = ann_groups[orig_target]
else:
targets = [orig_target]
for target in targets:
if target in available_anns:
anns.append(target)
g["annotations"] = anns
if "variation" not in args.datatarget and "validation" in g:
del g["validation"]
return g
def upgrade_thirdparty_tools(args, remotes):
"""Install and update third party tools used in the pipeline.
Creates a manifest directory with installed programs on the system.
"""
s = {"fabricrc_overrides": {"system_install": args.tooldir,
"local_install": os.path.join(args.tooldir, "local_install"),
"distribution": args.distribution,
"conda_cmd": _get_conda_bin(),
"use_sudo": False,
"edition": "minimal"}}
s = _default_deploy_args(args)
s["actions"] = ["install_biolinux"]
s["fabricrc_overrides"]["system_install"] = args.tooldir
s["fabricrc_overrides"]["local_install"] = os.path.join(args.tooldir, "local_install")
cbl = get_cloudbiolinux(remotes)
sys.path.insert(0, cbl["dir"])
cbl_deploy = __import__("cloudbio.deploy", fromlist=["deploy"])
cbl_deploy.deploy(s)
manifest_dir = os.path.join(_get_data_dir(), "manifest")
print("Creating manifest of installed packages in %s" % manifest_dir)
cbl_manifest = __import__("cloudbio.manifest", fromlist=["manifest"])
if os.path.exists(manifest_dir):
for fname in os.listdir(manifest_dir):
if not fname.startswith("toolplus"):
os.remove(os.path.join(manifest_dir, fname))
cbl_manifest.create(manifest_dir, args.tooldir)
def _install_toolplus(args):
"""Install additional tools we cannot distribute, updating local manifest.
"""
manifest_dir = os.path.join(_get_data_dir(), "manifest")
toolplus_manifest = os.path.join(manifest_dir, "toolplus-packages.yaml")
system_config = os.path.join(_get_data_dir(), "galaxy", "bcbio_system.yaml")
# Handle toolplus installs inside Docker container
if not os.path.exists(system_config):
docker_system_config = os.path.join(_get_data_dir(), "config", "bcbio_system.yaml")
if os.path.exists(docker_system_config):
system_config = docker_system_config
toolplus_dir = os.path.join(_get_data_dir(), "toolplus")
for tool in args.toolplus:
if tool.name in set(["gatk", "mutect"]):
print("Installing %s" % tool.name)
_install_gatk_jar(tool.name, tool.fname, toolplus_manifest, system_config, toolplus_dir)
else:
raise ValueError("Unexpected toolplus argument: %s %s" % (tool.name, tool.fname))
def get_gatk_jar_version(name, fname):
if name == "gatk":
return broad.get_gatk_version(fname)
elif name == "mutect":
return broad.get_mutect_version(fname)
else:
raise ValueError("Unexpected GATK input: %s" % name)
def _install_gatk_jar(name, fname, manifest, system_config, toolplus_dir):
"""Install a jar for GATK or associated tools like MuTect.
"""
if not fname.endswith(".jar"):
raise ValueError("--toolplus argument for %s expects a jar file: %s" % (name, fname))
version = get_gatk_jar_version(name, fname)
store_dir = utils.safe_makedir(os.path.join(toolplus_dir, name, version))
shutil.copyfile(fname, os.path.join(store_dir, os.path.basename(fname)))
_update_system_file(system_config, name, {"dir": store_dir})
_update_manifest(manifest, name, version)
def _update_manifest(manifest_file, name, version):
"""Update the toolplus manifest file with updated name and version
"""
if os.path.exists(manifest_file):
with open(manifest_file) as in_handle:
manifest = yaml.load(in_handle)
else:
manifest = {}
manifest[name] = {"name": name, "version": version}
with open(manifest_file, "w") as out_handle:
yaml.safe_dump(manifest, out_handle, default_flow_style=False, allow_unicode=False)
def _update_system_file(system_file, name, new_kvs):
"""Update the bcbio_system.yaml file with new resource information.
"""
if os.path.exists(system_file):
bak_file = system_file + ".bak%s" % datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
shutil.copyfile(system_file, bak_file)
with open(system_file) as in_handle:
config = yaml.load(in_handle)
else:
utils.safe_makedir(os.path.dirname(system_file))
config = {}
new_rs = {}
added = False
for rname, r_kvs in config.get("resources", {}).items():
if rname == name:
for k, v in new_kvs.items():
r_kvs[k] = v
added = True
new_rs[rname] = r_kvs
if not added:
new_rs[name] = new_kvs
config["resources"] = new_rs
with open(system_file, "w") as out_handle:
yaml.safe_dump(config, out_handle, default_flow_style=False, allow_unicode=False)
def _install_kraken_db(datadir, args):
"""Install kraken minimal DB in genome folder.
"""
kraken = os.path.join(datadir, "genomes/kraken")
url = "https://ccb.jhu.edu/software/kraken/dl/minikraken.tgz"
compress = os.path.join(kraken, os.path.basename(url))
base, ext = utils.splitext_plus(os.path.basename(url))
db = os.path.join(kraken, base)
tooldir = args.tooldir or get_defaults()["tooldir"]
requests.packages.urllib3.disable_warnings()
last_mod = urllib.request.urlopen(url).info().getheader('Last-Modified')
last_mod = dateutil.parser.parse(last_mod).astimezone(dateutil.tz.tzutc())
if os.path.exists(os.path.join(tooldir, "bin", "kraken")):
if not os.path.exists(db):
is_new_version = True
else:
cur_file = glob.glob(os.path.join(kraken, "minikraken_*"))[0]
cur_version = datetime.datetime.utcfromtimestamp(os.path.getmtime(cur_file))
is_new_version = last_mod.date() > cur_version.date()
if is_new_version:
shutil.move(cur_file, cur_file.replace('minikraken', 'old'))
if not os.path.exists(kraken):
utils.safe_makedir(kraken)
if is_new_version:
if not os.path.exists(compress):
subprocess.check_call(["wget", "-O", compress, url, "--no-check-certificate"])
cmd = ["tar", "-xzvf", compress, "-C", kraken]
subprocess.check_call(cmd)
last_version = glob.glob(os.path.join(kraken, "minikraken_*"))
utils.symlink_plus(os.path.join(kraken, last_version[0]), os.path.join(kraken, "minikraken"))
utils.remove_safe(compress)
else:
print("You have the latest version %s." % last_mod)
else:
raise argparse.ArgumentTypeError("kraken not installed in tooldir %s." %
os.path.join(tooldir, "bin", "kraken"))
# ## Store a local configuration file with upgrade details
def _get_install_config():
"""Return the YAML configuration file used to store upgrade information.
"""
try:
data_dir = _get_data_dir()
except ValueError:
return None
config_dir = utils.safe_makedir(os.path.join(data_dir, "config"))
return os.path.join(config_dir, "install-params.yaml")
def save_install_defaults(args):
"""Save installation information to make future upgrades easier.
"""
install_config = _get_install_config()
if install_config is None:
return
if utils.file_exists(install_config):
with open(install_config) as in_handle:
cur_config = yaml.load(in_handle)
else:
cur_config = {}
if args.tooldir:
cur_config["tooldir"] = args.tooldir
cur_config["isolate"] = args.isolate
for attr in ["genomes", "aligners", "datatarget"]:
if not cur_config.get(attr):
cur_config[attr] = []
for x in getattr(args, attr):
if x not in cur_config[attr]:
cur_config[attr].append(x)
# toolplus -- save non-filename inputs
attr = "toolplus"
if not cur_config.get(attr):
cur_config[attr] = []
for x in getattr(args, attr):
if not x.fname:
if x.name not in cur_config[attr]:
cur_config[attr].append(x.name)
with open(install_config, "w") as out_handle:
yaml.safe_dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False)
def add_install_defaults(args):
"""Add any saved installation defaults to the upgrade.
"""
# Ensure we install data if we've specified any secondary installation targets
if len(args.genomes) > 0 or len(args.aligners) > 0 or len(args.datatarget) > 0:
args.install_data = True
install_config = _get_install_config()
if install_config is None or not utils.file_exists(install_config):
default_args = {}
else:
with open(install_config) as in_handle:
default_args = yaml.load(in_handle)
# if we are upgrading to development, also upgrade the tools
if args.upgrade in ["development"]:
args.tools = True
if args.tools and args.tooldir is None:
if "tooldir" in default_args:
args.tooldir = str(default_args["tooldir"])
else:
raise ValueError("Default tool directory not yet saved in config defaults. "
"Specify the '--tooldir=/path/to/tools' to upgrade tools. "
"After a successful upgrade, the '--tools' parameter will "
"work for future upgrades.")
for attr in ["genomes", "aligners"]:
# don't upgrade default genomes if a genome was specified
if attr == "genomes" and len(args.genomes) > 0:
continue
for x in default_args.get(attr, []):
x = str(x)
new_val = getattr(args, attr)
if x not in getattr(args, attr):
new_val.append(x)
setattr(args, attr, new_val)
args = _datatarget_defaults(args, default_args)
if "isolate" in default_args and args.isolate is not True:
args.isolate = default_args["isolate"]
return args
def _datatarget_defaults(args, default_args):
"""Set data installation targets, handling defaults.
Sets variation, rnaseq, smallrna as default targets if we're not
isolated to a single method.
Provides back compatibility for toolplus specifications.
"""
default_data = default_args.get("datatarget", [])
# back-compatible toolplus specifications
for x in default_args.get("toolplus", []):
val = None
if x == "data":
val = "gemini"
elif x in ["cadd", "dbnsfp", "dbscsnv", "kraken"]:
val = x
if val and val not in default_data:
default_data.append(val)
new_val = getattr(args, "datatarget")
for x in default_data:
if x not in new_val:
new_val.append(x)
has_std_target = False
std_targets = ["variation", "rnaseq", "smallrna"]
for target in std_targets:
if target in new_val:
has_std_target = True
break
if not has_std_target:
new_val = new_val + std_targets
setattr(args, "datatarget", new_val)
return args
def get_defaults():
install_config = _get_install_config()
if install_config is None or not utils.file_exists(install_config):
return {}
with open(install_config) as in_handle:
return yaml.load(in_handle)
def _check_toolplus(x):
"""Parse options for adding non-standard/commercial tools like GATK and MuTecT.
"""
if "=" in x and len(x.split("=")) == 2:
name, fname = x.split("=")
fname = os.path.normpath(os.path.realpath(fname))
if not os.path.exists(fname):
raise argparse.ArgumentTypeError("Unexpected --toolplus argument for %s. File does not exist: %s"
% (name, fname))
return Tool(name, fname)
else:
raise argparse.ArgumentTypeError("Unexpected --toolplus argument. Expect toolname=filename.")
def add_subparser(subparsers):
parser = subparsers.add_parser("upgrade", help="Install or upgrade bcbio-nextgen")
parser.add_argument("--cores", default=1,
help="Number of cores to use if local indexing is necessary.")
parser.add_argument("--tooldir",
help="Directory to install 3rd party software tools. Leave unspecified for no tools",
type=lambda x: (os.path.abspath(os.path.expanduser(x))), default=None)
parser.add_argument("--tools",
help="Boolean argument specifying upgrade of tools. Uses previously saved install directory",
action="store_true", default=False)
parser.add_argument("-u", "--upgrade", help="Code version to upgrade",
choices=["stable", "development", "system", "deps", "skip"], default="skip")
parser.add_argument("--toolplus", help="Specify additional tool categories to install",
action="append", default=[], type=_check_toolplus)
parser.add_argument("--datatarget", help="Data to install. Allows customization or install of extra data.",
action="append", default=[],
choices=["variation", "rnaseq", "smallrna", "gemini", "cadd", "vep", "dbnsfp", "dbscsnv", "battenberg", "kraken"])
parser.add_argument("--genomes", help="Genomes to download",
action="append", default=[], choices=SUPPORTED_GENOMES)
parser.add_argument("--aligners", help="Aligner indexes to download",
action="append", default=[],
choices=SUPPORTED_INDEXES)
parser.add_argument("--data", help="Upgrade data dependencies",
dest="install_data", action="store_true", default=False)
parser.add_argument("--isolate", help="Created an isolated installation without PATH updates",
dest="isolate", action="store_true", default=False)
parser.add_argument("--distribution", help="Operating system distribution",
default="",
choices=["ubuntu", "debian", "centos", "scientificlinux", "macosx"])
return parser
def get_cloudbiolinux(remotes):
base_dir = os.path.join(os.getcwd(), "cloudbiolinux")
if not os.path.exists(base_dir):
subprocess.check_call("wget --no-check-certificate -O- %s | tar xz && "
"(mv master cloudbiolinux || mv cloudbiolinux-master cloudbiolinux)"
% remotes["cloudbiolinux"], shell=True)
return {"biodata": os.path.join(base_dir, "config", "biodata.yaml"),
"dir": base_dir}
@contextlib.contextmanager
def bcbio_tmpdir():
orig_dir = os.getcwd()
work_dir = os.path.join(os.getcwd(), "tmpbcbio-install")
if not os.path.exists(work_dir):
os.makedirs(work_dir)
os.chdir(work_dir)
yield work_dir
os.chdir(orig_dir)
shutil.rmtree(work_dir)
|
mit
|
plumgrid/plumgrid-nova
|
nova/openstack/common/db/exception.py
|
17
|
1634
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""DB related custom exceptions."""
from nova.openstack.common.gettextutils import _ # noqa
class DBError(Exception):
"""Wraps an implementation specific exception."""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
super(DBError, self).__init__(str(inner_exception))
class DBDuplicateEntry(DBError):
"""Wraps an implementation specific exception."""
def __init__(self, columns=[], inner_exception=None):
self.columns = columns
super(DBDuplicateEntry, self).__init__(inner_exception)
class DBDeadlock(DBError):
def __init__(self, inner_exception=None):
super(DBDeadlock, self).__init__(inner_exception)
class DBInvalidUnicodeParameter(Exception):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.")
|
apache-2.0
|
angad/libjingle-mac
|
scons-2.2.0/build/lib/SCons/Variables/EnumVariable.py
|
14
|
3872
|
"""engine.SCons.Variables.EnumVariable
This file defines the option type for SCons allowing only specified
input-values.
Usage example:
opts = Variables()
opts.Add(EnumVariable('debug', 'debug output and symbols', 'no',
allowed_values=('yes', 'no', 'full'),
map={}, ignorecase=2))
...
if env['debug'] == 'full':
...
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Variables/EnumVariable.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
__all__ = ['EnumVariable',]
import SCons.Errors
def _validator(key, val, env, vals):
if not val in vals:
raise SCons.Errors.UserError(
'Invalid value for option %s: %s. Valid values are: %s' % (key, val, vals))
def EnumVariable(key, help, default, allowed_values, map={}, ignorecase=0):
"""
The input parameters describe a option with only certain values
allowed. They are returned with an appropriate converter and
validator appended. The result is usable for input to
Variables.Add().
'key' and 'default' are the values to be passed on to Variables.Add().
'help' will be appended by the allowed values automatically
'allowed_values' is a list of strings, which are allowed as values
for this option.
The 'map'-dictionary may be used for converting the input value
into canonical values (eg. for aliases).
'ignorecase' defines the behaviour of the validator:
If ignorecase == 0, the validator/converter are case-sensitive.
If ignorecase == 1, the validator/converter are case-insensitive.
If ignorecase == 2, the validator/converter is case-insensitive and
the converted value will always be lower-case.
The 'validator' tests whether the value is in the list of allowed
values. The 'converter' converts input values according to the
given 'map'-dictionary (unmapped input values are returned
unchanged).
"""
help = '%s (%s)' % (help, '|'.join(allowed_values))
# define validator
if ignorecase >= 1:
validator = lambda key, val, env: \
_validator(key, val.lower(), env, allowed_values)
else:
validator = lambda key, val, env: \
_validator(key, val, env, allowed_values)
# define converter
if ignorecase == 2:
converter = lambda val: map.get(val.lower(), val).lower()
elif ignorecase == 1:
converter = lambda val: map.get(val.lower(), val)
else:
converter = lambda val: map.get(val, val)
return (key, help, default, validator, converter)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
bsd-3-clause
|
Carreau/readthedocs.org
|
readthedocs/projects/migrations/0039_add_mirror.py
|
12
|
12667
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.mirror'
db.add_column('projects_project', 'mirror',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.mirror'
db.delete_column('projects_project', 'mirror')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'builds.version': {
'Meta': {'ordering': "['-verbose_name']", 'unique_together': "[('project', 'slug')]", 'object_name': 'Version'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'built': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'privacy_level': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '20'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supported': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '20'}),
'uploaded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.emailhook': {
'Meta': {'object_name': 'EmailHook'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'emailhook_notifications'", 'to': "orm['projects.Project']"})
},
'projects.importedfile': {
'Meta': {'object_name': 'ImportedFile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'imported_files'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'imported_filed'", 'null': 'True', 'to': "orm['builds.Version']"})
},
'projects.project': {
'Meta': {'ordering': "('slug',)", 'object_name': 'Project'},
'analytics_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'canonical_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'conf_py_file': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'copyright': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'default_branch': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_version': ('django.db.models.fields.CharField', [], {'default': "'latest'", 'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'django_packages_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'documentation_type': ('django.db.models.fields.CharField', [], {'default': "'sphinx'", 'max_length': '20'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '20'}),
'main_language_project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'translations'", 'null': 'True', 'to': "orm['projects.Project']"}),
'mirror': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_major': ('django.db.models.fields.IntegerField', [], {'default': '2', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'num_minor': ('django.db.models.fields.IntegerField', [], {'default': '2', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'num_point': ('django.db.models.fields.IntegerField', [], {'default': '2', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'privacy_level': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '20'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'python_interpreter': ('django.db.models.fields.CharField', [], {'default': "'python'", 'max_length': '20'}),
'related_projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['projects.Project']", 'null': 'True', 'through': "orm['projects.ProjectRelationship']", 'blank': 'True'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'repo_type': ('django.db.models.fields.CharField', [], {'default': "'git'", 'max_length': '10'}),
'requirements_file': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'single_version': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'skip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'suffix': ('django.db.models.fields.CharField', [], {'default': "'.rst'", 'max_length': '10'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}),
'use_system_packages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'use_virtualenv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'projects'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'version_privacy_level': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '20'})
},
'projects.projectrelationship': {
'Meta': {'object_name': 'ProjectRelationship'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'superprojects'", 'to': "orm['projects.Project']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subprojects'", 'to': "orm['projects.Project']"})
},
'projects.webhook': {
'Meta': {'object_name': 'WebHook'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'webhook_notifications'", 'to': "orm['projects.Project']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['projects']
|
mit
|
davipeterlini/routeflow_tcc_ha
|
pox/pox/lib/util.py
|
21
|
12304
|
# Copyright 2011,2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
Various utility functions
"""
from __future__ import print_function
import traceback
import struct
import sys
import os
import time
import socket
#FIXME: ugh, why can't I make importing pox.core work here?
import logging
log = logging.getLogger("util")
class DirtyList (list):
#TODO: right now the callback may be called more often than needed
# and it may not be called with good names/parameters.
# All you can really rely on is that it will be called in
# some way if something may have changed.
def __init__ (self, *args, **kw):
list.__init__(self, *args, **kw)
self.dirty = False
self.callback = None
def __setslice__ (self, k, v):
#TODO: actually check for change
self._smudge('__setslice__', k, v)
list.__setslice__(self, k, v)
def __delslice__ (self, k):
#TODO: actually check for change
self._smudge('__delslice__', k, None)
list.__delslice__(self, k)
def append (self, v):
self._smudge('append', None, v)
list.append(self, v)
def extend (self, v):
self._smudge('extend', None, v)
list.extend(self, v)
def insert (self, i, v):
self._smudge('insert', k, v)
list.extend(self, v)
def pop (self, i=-1):
self._smudge('pop', i, None)
list.pop(self, i)
def remove (self, v):
if v in self:
self._smudge('remove', None, v)
list.remove(self, v)
def reverse (self):
if len(self):
self._smudge('reverse', None, None)
list.reverse(self)
def sort (self, *arg, **kw):
#TODO: check for changes?
self._smudge('sort', None, None)
list.sort(self, *arg, **kw)
def __setitem__ (self, k, v):
if isinstance(k, slice):
#TODO: actually check for change
self._smudge('__setitem__slice',k,v)
elif self[k] != v:
self._smudge('__setitem__',k,v)
list.__setitem__(self, k, v)
assert good
def __delitem__ (self, k):
list.__delitem__(self, k)
if isinstance(k, slice):
#TODO: actually check for change
self._smudge('__delitem__slice',k,v)
else:
self._smudge('__delitem__', k, None)
def _smudge (self, reason, k, v):
if self.callback:
if self.callback(reason, k, v) is not True:
self.dirty = True
else:
self.dirty = True
class DirtyDict (dict):
"""
A dict that tracks whether values have been changed shallowly.
If you set a callback, it will be called when the value changes, and
passed three values: "add"/"modify"/"delete", key, value
"""
def __init__ (self, *args, **kw):
dict.__init__(self, *args, **kw)
self.dirty = False
self.callback = None
def _smudge (self, reason, k, v):
if self.callback:
if self.callback(reason, k, v) is not True:
self.dirty = True
else:
self.dirty = True
def __setitem__ (self, k, v):
if k not in self:
self._smudge('__setitem__add',k,v)
elif self[k] != v:
self._smudge('__setitem__modify',k,v)
dict.__setitem__(self, k, v)
def __delitem__ (self, k):
self._smudge('__delitem__', k, None)
dict.__delitem__(self, k)
def set_extend (l, index, item, emptyValue = None):
"""
Adds item to the list l at position index. If index is beyond the end
of the list, it will pad the list out until it's large enough, using
emptyValue for the new entries.
"""
if index >= len(l):
l += ([emptyValue] * (index - len(self) + 1))
l[index] = item
def str_to_dpid (s):
"""
Convert a DPID in the canonical string form into a long int.
"""
if s.lower().startswith("0x"):
s = s[2:]
s = s.replace("-", "").split("|", 2)
a = int(s[0], 16)
if a > 0xffFFffFFffFF:
b = a >> 48
a &= 0xffFFffFFffFF
else:
b = 0
if len(s) == 2:
b = int(s[1])
return a | (b << 48)
strToDPID = str_to_dpid
def dpid_to_str (dpid, alwaysLong = False):
"""
Convert a DPID from a long into into the canonical string form.
"""
if type(dpid) is long or type(dpid) is int:
# Not sure if this is right
dpid = struct.pack('!Q', dpid)
assert len(dpid) == 8
r = '-'.join(['%02x' % (ord(x),) for x in dpid[2:]])
if alwaysLong or dpid[0:2] != (b'\x00'*2):
r += '|' + str(struct.unpack('!H', dpid[0:2])[0])
return r
dpidToStr = dpid_to_str # Deprecated
def assert_type(name, obj, types, none_ok=True):
"""
Assert that a parameter is of a given type.
Raise an Assertion Error with a descriptive error msg if not.
name: name of the parameter for error messages
obj: parameter value to be checked
types: type or list or tuple of types that is acceptable
none_ok: whether 'None' is an ok value
"""
if obj is None:
if none_ok:
return True
else:
raise AssertionError("%s may not be None" % name)
if not isinstance(types, (tuple, list)):
types = [ types ]
for cls in types:
if isinstance(obj, cls):
return True
allowed_types = "|".join(map(lambda x: str(x), types))
stack = traceback.extract_stack()
stack_msg = "Function call %s() in %s:%d" % (stack[-2][2],
stack[-3][0], stack[-3][1])
type_msg = ("%s must be instance of %s (but is %s)"
% (name, allowed_types , str(type(obj))))
raise AssertionError(stack_msg + ": " + type_msg)
def initHelper (obj, kw):
"""
Inside a class's __init__, this will copy keyword arguments to fields
of the same name. See libopenflow for an example.
"""
for k,v in kw.iteritems():
if not hasattr(obj, k):
raise TypeError(obj.__class__.__name__ + " constructor got "
+ "unexpected keyword argument '" + k + "'")
setattr(obj, k, v)
def makePinger ():
"""
A pinger is basically a thing to let you wake a select().
On Unix systems, this makes a pipe pair. But on Windows, select() only
works with sockets, so it makes a pair of connected sockets.
"""
class PipePinger (object):
def __init__ (self, pair):
self._w = pair[1]
self._r = pair[0]
assert os is not None
def ping (self):
if os is None: return #TODO: Is there a better fix for this?
os.write(self._w, ' ')
def fileno (self):
return self._r
def pongAll (self):
#TODO: make this actually read all
os.read(self._r, 1024)
def pong (self):
os.read(self._r, 1)
def __del__ (self):
try:
os.close(self._w)
except:
pass
try:
os.close(self._r)
except:
pass
def __repr__ (self):
return "<%s %i/%i>" % (self.__class__.__name__, self._w, self._r)
class SocketPinger (object):
def __init__ (self, pair):
self._w = pair[1]
self._r = pair[0]
def ping (self):
self._w.send(' ')
def pong (self):
self._r.recv(1)
def pongAll (self):
#TODO: make this actually read all
self._r.recv(1024)
def fileno (self):
return self._r.fileno()
def __repr__ (self):
return "<%s %s/%s>" % (self.__class__.__name__, self._w, self._r)
#return PipePinger((os.pipe()[0],os.pipe()[1])) # To test failure case
if os.name == "posix":
return PipePinger(os.pipe())
#TODO: clean up sockets?
localaddress = '127.127.127.127'
startPort = 10000
import socket
import select
def tryConnect ():
l = socket.socket()
l.setblocking(0)
port = startPort
while True:
try:
l.bind( (localaddress, port) )
break
except:
port += 1
if port - startPort > 1000:
raise RuntimeError("Could not find a free socket")
l.listen(0)
r = socket.socket()
try:
r.connect((localaddress, port))
except:
import traceback
ei = sys.exc_info()
ei = traceback.format_exception_only(ei[0], ei[1])
ei = ''.join(ei).strip()
log.warning("makePinger: connect exception:\n" + ei)
return False
rlist, wlist,elist = select.select([l], [], [l], 2)
if len(elist):
log.warning("makePinger: socket error in select()")
return False
if len(rlist) == 0:
log.warning("makePinger: socket didn't connect")
return False
try:
w, addr = l.accept()
except:
return False
#w.setblocking(0)
if addr != r.getsockname():
log.info("makePinger: pair didn't connect to each other!")
return False
r.setblocking(1)
# Turn off Nagle
r.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
w.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return (r, w)
# Try a few times
for i in range(0, 3):
result = tryConnect()
if result is not False:
return SocketPinger(result)
raise RuntimeError("Could not allocate a local socket pair")
def str_to_bool (s):
"""
Given a string, parses out whether it is meant to be True or not
"""
s = str(s).lower() # Make sure
if s in ['true', 't', 'yes', 'y', 'on', 'enable', 'enabled', 'ok',
'okay', '1', 'allow', 'allowed']:
return True
try:
r = 10
if s.startswith("0x"):
s = s[2:]
r = 16
i = int(s, r)
if i != 0:
return True
except:
pass
return False
def hexdump (data):
if isinstance(data, str):
data = [ord(c) for c in data]
o = ""
def chunks (data, length):
return (data[i:i+length] for i in xrange(0, len(data), length))
def filt (c):
if c >= 32 and c <= 126: return chr(c)
return '.'
for i,chunk in enumerate(chunks(data,16)):
if i > 0: o += "\n"
o += "%04x: " % (i * 16,)
l = ' '.join("%02x" % (c,) for c in chunk)
l = "%-48s" % (l,)
l = l[:3*8-1] + " " + l[3*8:]
t = ''.join([filt(x) for x in chunk])
l += ' %-16s' % (t,)
o += l
return o
def connect_socket_with_backoff (address, port, max_backoff_seconds=32):
'''
Connect to the given address and port. If the connection attempt fails,
exponentially back off, up to the max backoff
return the connected socket, or raise an exception if the connection
was unsuccessful
'''
backoff_seconds = 1
sock = None
print("connect_socket_with_backoff(address=%s, port=%d)"
% (address, port), file=sys.stderr)
while True:
try:
sock = socket.socket()
sock.connect( (address, port) )
break
except socket.error as e:
print("%s. Backing off %d seconds ..." % (str(e), backoff_seconds),
file=sys.stderr)
if backoff_seconds >= max_backoff_seconds:
raise RuntimeError("Could not connect to controller %s:%d"
% (address, port))
else:
time.sleep(backoff_seconds)
backoff_seconds <<= 1
return sock
_scalar_types = (int, long, basestring, float, bool)
def is_scalar (v):
return isinstance(v, _scalar_types)
def fields_of (obj, primitives_only=False,
primitives_and_composites_only=False, allow_caps=False):
"""
Returns key/value pairs of things that seem like public fields of an object.
"""
#NOTE: The above docstring isn't split into two lines on purpose.
r = {}
for k in dir(obj):
if k.startswith('_'): continue
v = getattr(obj, k)
if hasattr(v, '__call__'): continue
if not allow_caps and k.upper() == k: continue
if primitives_only:
if not isinstance(v, _scalar_types):
continue
elif primitives_and_composites_only:
if not isinstance(v, (int, long, basestring, float, bool, set,
dict, list)):
continue
#r.append((k,v))
r[k] = v
return r
if __name__ == "__main__":
#TODO: move to tests?
def cb (t,k,v): print(v)
l = DirtyList([10,20,30,40,50])
l.callback = cb
l.append(3)
print(l)
|
apache-2.0
|
olemis/brython
|
www/src/Lib/bisect.py
|
1261
|
2595
|
"""Bisection algorithms."""
def insort_right(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the right of the rightmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
a.insert(lo, x)
insort = insort_right # backward compatibility
def bisect_right(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, a.insert(x) will
insert just after the rightmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
return lo
bisect = bisect_right # backward compatibility
def insort_left(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the left of the leftmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
a.insert(lo, x)
def bisect_left(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in
a[i:] have e >= x. So if x already appears in the list, a.insert(x) will
insert just before the leftmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
return lo
# Overwrite above definitions with a fast C implementation
try:
from _bisect import *
except ImportError:
pass
|
bsd-3-clause
|
takluyver/readthedocs.org
|
readthedocs/rtd_tests/tests/test_project.py
|
9
|
1911
|
from bamboo_boy.utils import with_canopy
import json
from django.test import TestCase
from projects.models import Project
from rtd_tests.factories.projects_factories import OneProjectWithTranslationsOneWithout,\
ProjectFactory
from rest_framework.reverse import reverse
from restapi.serializers import ProjectSerializer
@with_canopy(OneProjectWithTranslationsOneWithout)
class TestProject(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username='eric', password='test')
self.pip = Project.objects.get(slug='pip')
def test_valid_versions(self):
r = self.client.get('/api/v2/project/6/valid_versions/', {})
resp = json.loads(r.content)
self.assertEqual(r.status_code, 200)
self.assertEqual(resp['flat'][0], '0.8')
self.assertEqual(resp['flat'][1], '0.8.1')
def test_subprojects(self):
r = self.client.get('/api/v2/project/6/subprojects/', {})
resp = json.loads(r.content)
self.assertEqual(r.status_code, 200)
self.assertEqual(resp['subprojects'][0]['id'], 23)
def test_translations(self):
p = self.canopy.project_with_translations
url = reverse('project-translations', [p.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
translation_ids_from_api = [t['id']
for t in response.data['translations']]
translation_ids_from_orm = [t[0]
for t in p.translations.values_list('id')]
self.assertEqual(
set(translation_ids_from_api),
set(translation_ids_from_orm)
)
def test_token(self):
r = self.client.get('/api/v2/project/6/token/', {})
resp = json.loads(r.content)
self.assertEqual(r.status_code, 200)
self.assertEqual(resp['token'], None)
|
mit
|
kaedroho/wagtail
|
wagtail/admin/views/pages/create.py
|
2
|
11989
|
from urllib.parse import urlencode
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils import timezone
from django.utils.http import urlquote
from django.utils.translation import gettext as _
from django.views.generic.base import ContextMixin, TemplateResponseMixin, View
from wagtail.admin import messages, signals
from wagtail.admin.action_menu import PageActionMenu
from wagtail.admin.views.generic import HookResponseMixin
from wagtail.admin.views.pages.utils import get_valid_next_url_from_request
from wagtail.core.models import Locale, Page, UserPagePermissionsProxy
def add_subpage(request, parent_page_id):
parent_page = get_object_or_404(Page, id=parent_page_id).specific
if not parent_page.permissions_for_user(request.user).can_add_subpage():
raise PermissionDenied
page_types = [
(model.get_verbose_name(), model._meta.app_label, model._meta.model_name)
for model in type(parent_page).creatable_subpage_models()
if model.can_create_at(parent_page)
]
# sort by lower-cased version of verbose name
page_types.sort(key=lambda page_type: page_type[0].lower())
if len(page_types) == 1:
# Only one page type is available - redirect straight to the create form rather than
# making the user choose
verbose_name, app_label, model_name = page_types[0]
return redirect('wagtailadmin_pages:add', app_label, model_name, parent_page.id)
return TemplateResponse(request, 'wagtailadmin/pages/add_subpage.html', {
'parent_page': parent_page,
'page_types': page_types,
'next': get_valid_next_url_from_request(request),
})
class CreateView(TemplateResponseMixin, ContextMixin, HookResponseMixin, View):
template_name = 'wagtailadmin/pages/create.html'
def dispatch(self, request, content_type_app_name, content_type_model_name, parent_page_id):
self.parent_page = get_object_or_404(Page, id=parent_page_id).specific
self.parent_page_perms = self.parent_page.permissions_for_user(self.request.user)
if not self.parent_page_perms.can_add_subpage():
raise PermissionDenied
try:
self.page_content_type = ContentType.objects.get_by_natural_key(content_type_app_name, content_type_model_name)
except ContentType.DoesNotExist:
raise Http404
# Get class
self.page_class = self.page_content_type.model_class()
# Make sure the class is a descendant of Page
if not issubclass(self.page_class, Page):
raise Http404
# page must be in the list of allowed subpage types for this parent ID
if self.page_class not in self.parent_page.creatable_subpage_models():
raise PermissionDenied
if not self.page_class.can_create_at(self.parent_page):
raise PermissionDenied
response = self.run_hook('before_create_page', self.request, self.parent_page, self.page_class)
if response:
return response
self.locale = self.parent_page.locale
# If the parent page is the root page. The user may specify any locale they like
if self.parent_page.is_root():
selected_locale = request.GET.get('locale', None) or request.POST.get('locale', None)
if selected_locale:
self.locale = get_object_or_404(Locale, language_code=selected_locale)
self.page = self.page_class(owner=self.request.user)
self.page.locale = self.locale
self.edit_handler = self.page_class.get_edit_handler()
self.edit_handler = self.edit_handler.bind_to(request=self.request, instance=self.page)
self.form_class = self.edit_handler.get_form_class()
self.next_url = get_valid_next_url_from_request(self.request)
return super().dispatch(request)
def post(self, request):
self.form = self.form_class(
self.request.POST, self.request.FILES, instance=self.page, parent_page=self.parent_page
)
if self.form.is_valid():
return self.form_valid(self.form)
else:
return self.form_invalid(self.form)
def form_valid(self, form):
if bool(self.request.POST.get('action-publish')) and self.parent_page_perms.can_publish_subpage():
return self.publish_action()
elif bool(self.request.POST.get('action-submit')) and self.parent_page.has_workflow:
return self.submit_action()
else:
return self.save_action()
def get_edit_message_button(self):
return messages.button(reverse('wagtailadmin_pages:edit', args=(self.page.id,)), _('Edit'))
def get_view_draft_message_button(self):
return messages.button(
reverse('wagtailadmin_pages:view_draft', args=(self.page.id,)), _('View draft'), new_window=True
)
def get_view_live_message_button(self):
return messages.button(self.page.url, _('View live'), new_window=True)
def save_action(self):
self.page = self.form.save(commit=False)
self.page.live = False
# Save page
self.parent_page.add_child(instance=self.page)
# Save revision
self.page.save_revision(user=self.request.user, log_action=False)
# Notification
messages.success(self.request, _("Page '{0}' created.").format(self.page.get_admin_display_title()))
response = self.run_hook('after_create_page', self.request, self.page)
if response:
return response
# remain on edit page for further edits
return self.redirect_and_remain()
def publish_action(self):
self.page = self.form.save(commit=False)
# Save page
self.parent_page.add_child(instance=self.page)
# Save revision
revision = self.page.save_revision(user=self.request.user, log_action=False)
# Publish
response = self.run_hook('before_publish_page', self.request, self.page)
if response:
return response
revision.publish(user=self.request.user)
response = self.run_hook('after_publish_page', self.request, self.page)
if response:
return response
# Notification
if self.page.go_live_at and self.page.go_live_at > timezone.now():
messages.success(
self.request,
_("Page '{0}' created and scheduled for publishing.").format(self.page.get_admin_display_title()),
buttons=[self.get_edit_message_button()]
)
else:
buttons = []
if self.page.url is not None:
buttons.append(self.get_view_live_message_button())
buttons.append(self.get_edit_message_button())
messages.success(
self.request,
_("Page '{0}' created and published.").format(self.page.get_admin_display_title()),
buttons=buttons
)
response = self.run_hook('after_create_page', self.request, self.page)
if response:
return response
return self.redirect_away()
def submit_action(self):
self.page = self.form.save(commit=False)
self.page.live = False
# Save page
self.parent_page.add_child(instance=self.page)
# Save revision
self.page.save_revision(user=self.request.user, log_action=False)
# Submit
workflow = self.page.get_workflow()
workflow.start(self.page, self.request.user)
# Notification
buttons = []
if self.page.is_previewable():
buttons.append(self.get_view_draft_message_button())
buttons.append(self.get_edit_message_button())
messages.success(
self.request,
_("Page '{0}' created and submitted for moderation.").format(self.page.get_admin_display_title()),
buttons=buttons
)
response = self.run_hook('after_create_page', self.request, self.page)
if response:
return response
return self.redirect_away()
def redirect_away(self):
if self.next_url:
# redirect back to 'next' url if present
return redirect(self.next_url)
else:
# redirect back to the explorer
return redirect('wagtailadmin_explore', self.page.get_parent().id)
def redirect_and_remain(self):
target_url = reverse('wagtailadmin_pages:edit', args=[self.page.id])
if self.next_url:
# Ensure the 'next' url is passed through again if present
target_url += '?next=%s' % urlquote(self.next_url)
return redirect(target_url)
def form_invalid(self, form):
messages.validation_error(
self.request, _("The page could not be created due to validation errors"), self.form
)
self.has_unsaved_changes = True
self.edit_handler = self.edit_handler.bind_to(form=self.form)
return self.render_to_response(self.get_context_data())
def get(self, request):
signals.init_new_page.send(sender=CreateView, page=self.page, parent=self.parent_page)
self.form = self.form_class(instance=self.page, parent_page=self.parent_page)
self.has_unsaved_changes = False
self.edit_handler = self.edit_handler.bind_to(form=self.form)
return self.render_to_response(self.get_context_data())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'content_type': self.page_content_type,
'page_class': self.page_class,
'parent_page': self.parent_page,
'edit_handler': self.edit_handler,
'action_menu': PageActionMenu(self.request, view='create', parent_page=self.parent_page),
'preview_modes': self.page.preview_modes,
'form': self.form,
'next': self.next_url,
'has_unsaved_changes': self.has_unsaved_changes,
'locale': None,
'translations': [],
})
if getattr(settings, 'WAGTAIL_I18N_ENABLED', False):
# Pages can be created in any language at the root level
if self.parent_page.is_root():
translations = [
{
'locale': locale,
'url': reverse('wagtailadmin_pages:add', args=[
self.page_content_type.app_label,
self.page_content_type.model,
self.parent_page.id
]) + '?' + urlencode({'locale': locale.language_code}),
}
for locale in Locale.objects.all()
]
else:
user_perms = UserPagePermissionsProxy(self.request.user)
translations = [
{
'locale': translation.locale,
'url': reverse('wagtailadmin_pages:add', args=[self.page_content_type.app_label, self.page_content_type.model, translation.id]),
}
for translation in self.parent_page.get_translations().only('id', 'locale').select_related('locale')
if user_perms.for_page(translation).can_add_subpage()
and self.page_class in translation.specific_class.creatable_subpage_models()
and self.page_class.can_create_at(translation)
]
context.update({
'locale': self.locale,
'translations': translations,
})
return context
|
bsd-3-clause
|
maleficarium/youtube-dl
|
youtube_dl/extractor/npo.py
|
19
|
18401
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
fix_xml_ampersands,
parse_duration,
qualities,
strip_jsonp,
unified_strdate,
)
class NPOBaseIE(InfoExtractor):
def _get_token(self, video_id):
token_page = self._download_webpage(
'http://ida.omroep.nl/npoplayer/i.js',
video_id, note='Downloading token')
token = self._search_regex(
r'npoplayer\.token = "(.+?)"', token_page, 'token')
# Decryption algorithm extracted from http://npoplayer.omroep.nl/csjs/npoplayer-min.js
token_l = list(token)
first = second = None
for i in range(5, len(token_l) - 4):
if token_l[i].isdigit():
if first is None:
first = i
elif second is None:
second = i
if first is None or second is None:
first = 12
second = 13
token_l[first], token_l[second] = token_l[second], token_l[first]
return ''.join(token_l)
class NPOIE(NPOBaseIE):
IE_NAME = 'npo'
IE_DESC = 'npo.nl and ntr.nl'
_VALID_URL = r'''(?x)
(?:
npo:|
https?://
(?:www\.)?
(?:
npo\.nl/(?!live|radio)(?:[^/]+/){2}|
ntr\.nl/(?:[^/]+/){2,}|
omroepwnl\.nl/video/fragment/[^/]+__
)
)
(?P<id>[^/?#]+)
'''
_TESTS = [
{
'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719',
'md5': '4b3f9c429157ec4775f2c9cb7b911016',
'info_dict': {
'id': 'VPWON_1220719',
'ext': 'm4v',
'title': 'Nieuwsuur',
'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.',
'upload_date': '20140622',
},
},
{
'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800',
'md5': 'da50a5787dbfc1603c4ad80f31c5120b',
'info_dict': {
'id': 'VARA_101191800',
'ext': 'm4v',
'title': 'De Mega Mike & Mega Thomas show: The best of.',
'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4',
'upload_date': '20090227',
'duration': 2400,
},
},
{
'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'Tegenlicht: De toekomst komt uit Afrika',
'description': 'md5:52cf4eefbc96fffcbdc06d024147abea',
'upload_date': '20130225',
'duration': 3000,
},
},
{
'url': 'http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706',
'info_dict': {
'id': 'WO_VPRO_043706',
'ext': 'wmv',
'title': 'De nieuwe mens - Deel 1',
'description': 'md5:518ae51ba1293ffb80d8d8ce90b74e4b',
'duration': 4680,
},
'params': {
# mplayer mms download
'skip_download': True,
}
},
# non asf in streams
{
'url': 'http://www.npo.nl/hoe-gaat-europa-verder-na-parijs/10-01-2015/WO_NOS_762771',
'md5': 'b3da13de374cbe2d5332a7e910bef97f',
'info_dict': {
'id': 'WO_NOS_762771',
'ext': 'mp4',
'title': 'Hoe gaat Europa verder na Parijs?',
},
},
{
'url': 'http://www.ntr.nl/Aap-Poot-Pies/27/detail/Aap-poot-pies/VPWON_1233944#content',
'md5': '01c6a2841675995da1f0cf776f03a9c3',
'info_dict': {
'id': 'VPWON_1233944',
'ext': 'm4v',
'title': 'Aap, poot, pies',
'description': 'md5:c9c8005d1869ae65b858e82c01a91fde',
'upload_date': '20150508',
'duration': 599,
},
},
{
'url': 'http://www.omroepwnl.nl/video/fragment/vandaag-de-dag-verkiezingen__POMS_WNL_853698',
'md5': 'd30cd8417b8b9bca1fdff27428860d08',
'info_dict': {
'id': 'POW_00996502',
'ext': 'm4v',
'title': '''"Dit is wel een 'landslide'..."''',
'description': 'md5:f8d66d537dfb641380226e31ca57b8e8',
'upload_date': '20150508',
'duration': 462,
},
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._get_info(video_id)
def _get_info(self, video_id):
metadata = self._download_json(
'http://e.omroep.nl/metadata/%s' % video_id,
video_id,
# We have to remove the javascript callback
transform_source=strip_jsonp,
)
# For some videos actual video id (prid) is different (e.g. for
# http://www.omroepwnl.nl/video/fragment/vandaag-de-dag-verkiezingen__POMS_WNL_853698
# video id is POMS_WNL_853698 but prid is POW_00996502)
video_id = metadata.get('prid') or video_id
# titel is too generic in some cases so utilize aflevering_titel as well
# when available (e.g. http://tegenlicht.vpro.nl/afleveringen/2014-2015/access-to-africa.html)
title = metadata['titel']
sub_title = metadata.get('aflevering_titel')
if sub_title and sub_title != title:
title += ': %s' % sub_title
token = self._get_token(video_id)
formats = []
pubopties = metadata.get('pubopties')
if pubopties:
quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std'])
for format_id in pubopties:
format_info = self._download_json(
'http://ida.omroep.nl/odi/?prid=%s&puboptions=%s&adaptive=yes&token=%s'
% (video_id, format_id, token),
video_id, 'Downloading %s JSON' % format_id)
if format_info.get('error_code', 0) or format_info.get('errorcode', 0):
continue
streams = format_info.get('streams')
if streams:
video_info = self._download_json(
streams[0] + '&type=json',
video_id, 'Downloading %s stream JSON' % format_id)
else:
video_info = format_info
video_url = video_info.get('url')
if not video_url:
continue
if format_id == 'adaptive':
formats.extend(self._extract_m3u8_formats(video_url, video_id, 'mp4'))
else:
formats.append({
'url': video_url,
'format_id': format_id,
'quality': quality(format_id),
})
streams = metadata.get('streams')
if streams:
for i, stream in enumerate(streams):
stream_url = stream.get('url')
if not stream_url:
continue
if '.asf' not in stream_url:
formats.append({
'url': stream_url,
'quality': stream.get('kwaliteit'),
})
continue
asx = self._download_xml(
stream_url, video_id,
'Downloading stream %d ASX playlist' % i,
transform_source=fix_xml_ampersands)
ref = asx.find('./ENTRY/Ref')
if ref is None:
continue
video_url = ref.get('href')
if not video_url:
continue
formats.append({
'url': video_url,
'ext': stream.get('formaat', 'asf'),
'quality': stream.get('kwaliteit'),
})
self._sort_formats(formats)
subtitles = {}
if metadata.get('tt888') == 'ja':
subtitles['nl'] = [{
'ext': 'vtt',
'url': 'http://e.omroep.nl/tt888/%s' % video_id,
}]
return {
'id': video_id,
'title': title,
'description': metadata.get('info'),
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'upload_date': unified_strdate(metadata.get('gidsdatum')),
'duration': parse_duration(metadata.get('tijdsduur')),
'formats': formats,
'subtitles': subtitles,
}
class NPOLiveIE(NPOBaseIE):
IE_NAME = 'npo.nl:live'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/live/(?P<id>.+)'
_TEST = {
'url': 'http://www.npo.nl/live/npo-1',
'info_dict': {
'id': 'LI_NEDERLAND1_136692',
'display_id': 'npo-1',
'ext': 'mp4',
'title': 're:^Nederland 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'Livestream',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
live_id = self._search_regex(
r'data-prid="([^"]+)"', webpage, 'live id')
metadata = self._download_json(
'http://e.omroep.nl/metadata/%s' % live_id,
display_id, transform_source=strip_jsonp)
token = self._get_token(display_id)
formats = []
streams = metadata.get('streams')
if streams:
for stream in streams:
stream_type = stream.get('type').lower()
# smooth streaming is not supported
if stream_type in ['ss', 'ms']:
continue
stream_info = self._download_json(
'http://ida.omroep.nl/aapi/?stream=%s&token=%s&type=jsonp'
% (stream.get('url'), token),
display_id, 'Downloading %s JSON' % stream_type)
if stream_info.get('error_code', 0) or stream_info.get('errorcode', 0):
continue
stream_url = self._download_json(
stream_info['stream'], display_id,
'Downloading %s URL' % stream_type,
'Unable to download %s URL' % stream_type,
transform_source=strip_jsonp, fatal=False)
if not stream_url:
continue
if stream_type == 'hds':
f4m_formats = self._extract_f4m_formats(stream_url, display_id)
# f4m downloader downloads only piece of live stream
for f4m_format in f4m_formats:
f4m_format['preference'] = -1
formats.extend(f4m_formats)
elif stream_type == 'hls':
formats.extend(self._extract_m3u8_formats(stream_url, display_id, 'mp4'))
else:
formats.append({
'url': stream_url,
'preference': -10,
})
self._sort_formats(formats)
return {
'id': live_id,
'display_id': display_id,
'title': self._live_title(metadata['titel']),
'description': metadata['info'],
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'formats': formats,
'is_live': True,
}
class NPORadioIE(InfoExtractor):
IE_NAME = 'npo.nl:radio'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/(?P<id>[^/]+)/?$'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-1',
'info_dict': {
'id': 'radio-1',
'ext': 'mp3',
'title': 're:^NPO Radio 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
@staticmethod
def _html_get_attribute_regex(attribute):
return r'{0}\s*=\s*\'([^\']+)\''.format(attribute)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
self._html_get_attribute_regex('data-channel'), webpage, 'title')
stream = self._parse_json(
self._html_search_regex(self._html_get_attribute_regex('data-streams'), webpage, 'data-streams'),
video_id)
codec = stream.get('codec')
return {
'id': video_id,
'url': stream['url'],
'title': self._live_title(title),
'acodec': codec,
'ext': codec,
'is_live': True,
}
class NPORadioFragmentIE(InfoExtractor):
IE_NAME = 'npo.nl:radio:fragment'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/[^/]+/fragment/(?P<id>\d+)'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-5/fragment/174356',
'md5': 'dd8cc470dad764d0fdc70a9a1e2d18c2',
'info_dict': {
'id': '174356',
'ext': 'mp3',
'title': 'Jubileumconcert Willeke Alberti',
},
}
def _real_extract(self, url):
audio_id = self._match_id(url)
webpage = self._download_webpage(url, audio_id)
title = self._html_search_regex(
r'href="/radio/[^/]+/fragment/%s" title="([^"]+)"' % audio_id,
webpage, 'title')
audio_url = self._search_regex(
r"data-streams='([^']+)'", webpage, 'audio url')
return {
'id': audio_id,
'url': audio_url,
'title': title,
}
class SchoolTVIE(InfoExtractor):
IE_NAME = 'schooltv'
_VALID_URL = r'https?://(?:www\.)?schooltv\.nl/video/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'http://www.schooltv.nl/video/ademhaling-de-hele-dag-haal-je-adem-maar-wat-gebeurt-er-dan-eigenlijk-in-je-lichaam/',
'info_dict': {
'id': 'WO_NTR_429477',
'display_id': 'ademhaling-de-hele-dag-haal-je-adem-maar-wat-gebeurt-er-dan-eigenlijk-in-je-lichaam',
'title': 'Ademhaling: De hele dag haal je adem. Maar wat gebeurt er dan eigenlijk in je lichaam?',
'ext': 'mp4',
'description': 'md5:abfa0ff690adb73fd0297fd033aaa631'
},
'params': {
# Skip because of m3u8 download
'skip_download': True
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
r'data-mid=(["\'])(?P<id>.+?)\1', webpage, 'video_id', group='id')
return {
'_type': 'url_transparent',
'ie_key': 'NPO',
'url': 'npo:%s' % video_id,
'display_id': display_id
}
class VPROIE(NPOIE):
IE_NAME = 'vpro'
_VALID_URL = r'https?://(?:www\.)?(?:tegenlicht\.)?vpro\.nl/(?:[^/]+/){2,}(?P<id>[^/]+)\.html'
_TESTS = [
{
'url': 'http://tegenlicht.vpro.nl/afleveringen/2012-2013/de-toekomst-komt-uit-afrika.html',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'De toekomst komt uit Afrika',
'description': 'md5:52cf4eefbc96fffcbdc06d024147abea',
'upload_date': '20130225',
},
},
{
'url': 'http://www.vpro.nl/programmas/2doc/2015/sergio-herman.html',
'info_dict': {
'id': 'sergio-herman',
'title': 'Sergio Herman: Fucking perfect',
},
'playlist_count': 2,
},
{
# playlist with youtube embed
'url': 'http://www.vpro.nl/programmas/2doc/2015/education-education.html',
'info_dict': {
'id': 'education-education',
'title': '2Doc',
},
'playlist_count': 2,
}
]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result('npo:%s' % video_id if not video_id.startswith('http') else video_id)
for video_id in re.findall(r'data-media-id="([^"]+)"', webpage)
]
playlist_title = self._search_regex(
r'<title>\s*([^>]+?)\s*-\s*Teledoc\s*-\s*VPRO\s*</title>',
webpage, 'playlist title', default=None) or self._og_search_title(webpage)
return self.playlist_result(entries, playlist_id, playlist_title)
class WNLIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?omroepwnl\.nl/video/detail/(?P<id>[^/]+)__\d+'
_TEST = {
'url': 'http://www.omroepwnl.nl/video/detail/vandaag-de-dag-6-mei__060515',
'info_dict': {
'id': 'vandaag-de-dag-6-mei',
'title': 'Vandaag de Dag 6 mei',
},
'playlist_count': 4,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result('npo:%s' % video_id, 'NPO')
for video_id, part in re.findall(
r'<a[^>]+href="([^"]+)"[^>]+class="js-mid"[^>]*>(Deel \d+)', webpage)
]
playlist_title = self._html_search_regex(
r'(?s)<h1[^>]+class="subject"[^>]*>(.+?)</h1>',
webpage, 'playlist title')
return self.playlist_result(entries, playlist_id, playlist_title)
|
unlicense
|
etataurov/pytest
|
_pytest/helpconfig.py
|
16
|
5289
|
""" version info, help messages, tracing configuration. """
import py
import pytest
import os, sys
def pytest_addoption(parser):
group = parser.getgroup('debugconfig')
group.addoption('--version', action="store_true",
help="display pytest lib version and import information.")
group._addoption("-h", "--help", action="store_true", dest="help",
help="show help message and configuration info")
group._addoption('-p', action="append", dest="plugins", default = [],
metavar="name",
help="early-load given plugin (multi-allowed). "
"To avoid loading of plugins, use the `no:` prefix, e.g. "
"`no:doctest`.")
group.addoption('--traceconfig', '--trace-config',
action="store_true", default=False,
help="trace considerations of conftest.py files."),
group.addoption('--debug',
action="store_true", dest="debug", default=False,
help="store internal tracing debug information in 'pytestdebug.log'.")
group._addoption(
'-o', '--override-ini', nargs='*', dest="override_ini",
action="append",
help="override config option with option=value style, e.g. `-o xfail_strict=True`.")
@pytest.hookimpl(hookwrapper=True)
def pytest_cmdline_parse():
outcome = yield
config = outcome.get_result()
if config.option.debug:
path = os.path.abspath("pytestdebug.log")
debugfile = open(path, 'w')
debugfile.write("versions pytest-%s, py-%s, "
"python-%s\ncwd=%s\nargs=%s\n\n" %(
pytest.__version__, py.__version__,
".".join(map(str, sys.version_info)),
os.getcwd(), config._origargs))
config.trace.root.setwriter(debugfile.write)
undo_tracing = config.pluginmanager.enable_tracing()
sys.stderr.write("writing pytestdebug information to %s\n" % path)
def unset_tracing():
debugfile.close()
sys.stderr.write("wrote pytestdebug information to %s\n" %
debugfile.name)
config.trace.root.setwriter(None)
undo_tracing()
config.add_cleanup(unset_tracing)
def pytest_cmdline_main(config):
if config.option.version:
p = py.path.local(pytest.__file__)
sys.stderr.write("This is pytest version %s, imported from %s\n" %
(pytest.__version__, p))
plugininfo = getpluginversioninfo(config)
if plugininfo:
for line in plugininfo:
sys.stderr.write(line + "\n")
return 0
elif config.option.help:
config._do_configure()
showhelp(config)
config._ensure_unconfigure()
return 0
def showhelp(config):
reporter = config.pluginmanager.get_plugin('terminalreporter')
tw = reporter._tw
tw.write(config._parser.optparser.format_help())
tw.line()
tw.line()
tw.line("[pytest] ini-options in the first "
"pytest.ini|tox.ini|setup.cfg file found:")
tw.line()
for name in config._parser._ininames:
help, type, default = config._parser._inidict[name]
if type is None:
type = "string"
spec = "%s (%s)" % (name, type)
line = " %-24s %s" %(spec, help)
tw.line(line[:tw.fullwidth])
tw.line()
tw.line("environment variables:")
vars = [
("PYTEST_ADDOPTS", "extra command line options"),
("PYTEST_PLUGINS", "comma-separated plugins to load during startup"),
("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals")
]
for name, help in vars:
tw.line(" %-24s %s" % (name, help))
tw.line()
tw.line()
tw.line("to see available markers type: pytest --markers")
tw.line("to see available fixtures type: pytest --fixtures")
tw.line("(shown according to specified file_or_dir or current dir "
"if not specified)")
for warningreport in reporter.stats.get('warnings', []):
tw.line("warning : " + warningreport.message, red=True)
return
conftest_options = [
('pytest_plugins', 'list of plugin names to load'),
]
def getpluginversioninfo(config):
lines = []
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
lines.append("setuptools registered plugins:")
for plugin, dist in plugininfo:
loc = getattr(plugin, '__file__', repr(plugin))
content = "%s-%s at %s" % (dist.project_name, dist.version, loc)
lines.append(" " + content)
return lines
def pytest_report_header(config):
lines = []
if config.option.debug or config.option.traceconfig:
lines.append("using: pytest-%s pylib-%s" %
(pytest.__version__,py.__version__))
verinfo = getpluginversioninfo(config)
if verinfo:
lines.extend(verinfo)
if config.option.traceconfig:
lines.append("active plugins:")
items = config.pluginmanager.list_name_plugin()
for name, plugin in items:
if hasattr(plugin, '__file__'):
r = plugin.__file__
else:
r = repr(plugin)
lines.append(" %-20s: %s" %(name, r))
return lines
|
mit
|
sandeepkrjha/pgmpy
|
pgmpy/tests/test_estimators/test_BicScore.py
|
6
|
1432
|
import unittest
import pandas as pd
from pgmpy.models import BayesianModel
from pgmpy.estimators import BicScore
class TestBicScore(unittest.TestCase):
def setUp(self):
self.d1 = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0], 'D': ['X', 'Y', 'Z']})
self.m1 = BayesianModel([('A', 'C'), ('B', 'C'), ('D', 'B')])
self.m2 = BayesianModel([('C', 'A'), ('C', 'B'), ('A', 'D')])
# data_link - "https://www.kaggle.com/c/titanic/download/train.csv"
self.titanic_data = pd.read_csv('pgmpy/tests/test_estimators/testdata/titanic_train.csv')
self.titanic_data2 = self.titanic_data[["Survived", "Sex", "Pclass"]]
def test_score(self):
self.assertAlmostEqual(BicScore(self.d1).score(self.m1), -10.698440814229318)
self.assertEqual(BicScore(self.d1).score(BayesianModel()), 0)
def test_score_titanic(self):
scorer = BicScore(self.titanic_data2)
titanic = BayesianModel([("Sex", "Survived"), ("Pclass", "Survived")])
self.assertAlmostEqual(scorer.score(titanic), -1896.7250012840179)
titanic2 = BayesianModel([("Pclass", "Sex"), ])
titanic2.add_nodes_from(["Sex", "Survived", "Pclass"])
self.assertLess(scorer.score(titanic2), scorer.score(titanic))
def tearDown(self):
del self.d1
del self.m1
del self.m2
del self.titanic_data
del self.titanic_data2
|
mit
|
toprahmin/NDMobileApp
|
node_modules/node-gyp/gyp/pylib/gyp/xcode_emulation.py
|
1283
|
65086
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import copy
import gyp.common
import os
import os.path
import re
import shlex
import subprocess
import sys
import tempfile
from gyp.common import GypError
# Populated lazily by XcodeVersion, for efficiency, and to fix an issue when
# "xcodebuild" is called too quickly (it has been found to return incorrect
# version number).
XCODE_VERSION_CACHE = None
# Populated lazily by GetXcodeArchsDefault, to an |XcodeArchsDefault| instance
# corresponding to the installed version of Xcode.
XCODE_ARCHS_DEFAULT_CACHE = None
def XcodeArchsVariableMapping(archs, archs_including_64_bit=None):
"""Constructs a dictionary with expansion for $(ARCHS_STANDARD) variable,
and optionally for $(ARCHS_STANDARD_INCLUDING_64_BIT)."""
mapping = {'$(ARCHS_STANDARD)': archs}
if archs_including_64_bit:
mapping['$(ARCHS_STANDARD_INCLUDING_64_BIT)'] = archs_including_64_bit
return mapping
class XcodeArchsDefault(object):
"""A class to resolve ARCHS variable from xcode_settings, resolving Xcode
macros and implementing filtering by VALID_ARCHS. The expansion of macros
depends on the SDKROOT used ("macosx", "iphoneos", "iphonesimulator") and
on the version of Xcode.
"""
# Match variable like $(ARCHS_STANDARD).
variable_pattern = re.compile(r'\$\([a-zA-Z_][a-zA-Z0-9_]*\)$')
def __init__(self, default, mac, iphonesimulator, iphoneos):
self._default = (default,)
self._archs = {'mac': mac, 'ios': iphoneos, 'iossim': iphonesimulator}
def _VariableMapping(self, sdkroot):
"""Returns the dictionary of variable mapping depending on the SDKROOT."""
sdkroot = sdkroot.lower()
if 'iphoneos' in sdkroot:
return self._archs['ios']
elif 'iphonesimulator' in sdkroot:
return self._archs['iossim']
else:
return self._archs['mac']
def _ExpandArchs(self, archs, sdkroot):
"""Expands variables references in ARCHS, and remove duplicates."""
variable_mapping = self._VariableMapping(sdkroot)
expanded_archs = []
for arch in archs:
if self.variable_pattern.match(arch):
variable = arch
try:
variable_expansion = variable_mapping[variable]
for arch in variable_expansion:
if arch not in expanded_archs:
expanded_archs.append(arch)
except KeyError as e:
print 'Warning: Ignoring unsupported variable "%s".' % variable
elif arch not in expanded_archs:
expanded_archs.append(arch)
return expanded_archs
def ActiveArchs(self, archs, valid_archs, sdkroot):
"""Expands variables references in ARCHS, and filter by VALID_ARCHS if it
is defined (if not set, Xcode accept any value in ARCHS, otherwise, only
values present in VALID_ARCHS are kept)."""
expanded_archs = self._ExpandArchs(archs or self._default, sdkroot or '')
if valid_archs:
filtered_archs = []
for arch in expanded_archs:
if arch in valid_archs:
filtered_archs.append(arch)
expanded_archs = filtered_archs
return expanded_archs
def GetXcodeArchsDefault():
"""Returns the |XcodeArchsDefault| object to use to expand ARCHS for the
installed version of Xcode. The default values used by Xcode for ARCHS
and the expansion of the variables depends on the version of Xcode used.
For all version anterior to Xcode 5.0 or posterior to Xcode 5.1 included
uses $(ARCHS_STANDARD) if ARCHS is unset, while Xcode 5.0 to 5.0.2 uses
$(ARCHS_STANDARD_INCLUDING_64_BIT). This variable was added to Xcode 5.0
and deprecated with Xcode 5.1.
For "macosx" SDKROOT, all version starting with Xcode 5.0 includes 64-bit
architecture as part of $(ARCHS_STANDARD) and default to only building it.
For "iphoneos" and "iphonesimulator" SDKROOT, 64-bit architectures are part
of $(ARCHS_STANDARD_INCLUDING_64_BIT) from Xcode 5.0. From Xcode 5.1, they
are also part of $(ARCHS_STANDARD).
All thoses rules are coded in the construction of the |XcodeArchsDefault|
object to use depending on the version of Xcode detected. The object is
for performance reason."""
global XCODE_ARCHS_DEFAULT_CACHE
if XCODE_ARCHS_DEFAULT_CACHE:
return XCODE_ARCHS_DEFAULT_CACHE
xcode_version, _ = XcodeVersion()
if xcode_version < '0500':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['armv7']))
elif xcode_version < '0510':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD_INCLUDING_64_BIT)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s'],
['armv7', 'armv7s', 'arm64']))
else:
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386', 'x86_64'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s', 'arm64'],
['armv7', 'armv7s', 'arm64']))
return XCODE_ARCHS_DEFAULT_CACHE
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
_sdk_root_cache = {}
# Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_plist_cache = {}
# Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_codesigning_key_cache = {}
def __init__(self, spec):
self.spec = spec
self.isIOS = False
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
self._ConvertConditionalKeys(configname)
if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET',
None):
self.isIOS = True
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _ConvertConditionalKeys(self, configname):
"""Converts or warns on conditional keys. Xcode supports conditional keys,
such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation
with some keys converted while the rest force a warning."""
settings = self.xcode_settings[configname]
conditional_keys = [key for key in settings if key.endswith(']')]
for key in conditional_keys:
# If you need more, speak up at http://crbug.com/122592
if key.endswith("[sdk=iphoneos*]"):
if configname.endswith("iphoneos"):
new_key = key.split("[")[0]
settings[new_key] = settings[key]
else:
print 'Warning: Conditional keys not implemented, ignoring:', \
' '.join(conditional_keys)
del settings[key]
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def IsBinaryOutputFormat(self, configname):
default = "binary" if self.isIOS else "xml"
format = self.xcode_settings[configname].get('INFOPLIST_OUTPUT_FORMAT',
default)
return format == "binary"
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def _IsIosAppExtension(self):
return int(self.spec.get('ios_app_extension', 0)) != 0
def _IsIosWatchKitExtension(self):
return int(self.spec.get('ios_watchkit_extension', 0)) != 0
def _IsIosWatchApp(self):
return int(self.spec.get('ios_watch_app', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
if self._IsIosAppExtension() or self._IsIosWatchKitExtension():
return '.' + self.spec.get('product_extension', 'appex')
else:
return '.' + self.spec.get('product_extension', 'app')
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
if self.isIOS:
return self.GetWrapperName()
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
if self.isIOS:
return self.GetBundleContentsFolderPath()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsIosAppExtension():
assert self._IsBundle(), ('ios_app_extension flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.app-extension'
if self._IsIosWatchKitExtension():
assert self._IsBundle(), ('ios_watchkit_extension flag requires '
'mac_bundle (target %s)' % self.spec['target_name'])
return 'com.apple.product-type.watchkit-extension'
if self._IsIosWatchApp():
assert self._IsBundle(), ('ios_watch_app flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.application.watchapp'
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library') or self.isIOS:
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def GetActiveArchs(self, configname):
"""Returns the architectures this target should be built for."""
config_settings = self.xcode_settings[configname]
xcode_archs_default = GetXcodeArchsDefault()
return xcode_archs_default.ActiveArchs(
config_settings.get('ARCHS'),
config_settings.get('VALID_ARCHS'),
config_settings.get('SDKROOT'))
def _GetSdkVersionInfoItem(self, sdk, infoitem):
# xcodebuild requires Xcode and can't run on Command Line Tools-only
# systems from 10.7 onward.
# Since the CLT has no SDK paths anyway, returning None is the
# most sensible route and should still do the right thing.
try:
return GetStdout(['xcodebuild', '-version', '-sdk', sdk, infoitem])
except:
pass
def _SdkRoot(self, configname):
if configname is None:
configname = self.configname
return self.GetPerConfigSetting('SDKROOT', configname, default='')
def _SdkPath(self, configname=None):
sdk_root = self._SdkRoot(configname)
if sdk_root.startswith('/'):
return sdk_root
return self._XcodeSdkPath(sdk_root)
def _XcodeSdkPath(self, sdk_root):
if sdk_root not in XcodeSettings._sdk_path_cache:
sdk_path = self._GetSdkVersionInfoItem(sdk_root, 'Path')
XcodeSettings._sdk_path_cache[sdk_root] = sdk_path
if sdk_root:
XcodeSettings._sdk_root_cache[sdk_path] = sdk_root
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname, arch=None):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings() and sdk_root:
cflags.append('-isysroot %s' % sdk_root)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Settings().get('GCC_STRICT_ALIASING') == 'YES':
cflags.append('-fstrict-aliasing')
elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO':
cflags.append('-fno-strict-aliasing')
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
# In Xcode, this is only activated when GCC_COMPILER_VERSION is clang or
# llvm-gcc. It also requires a fairly recent libtool, and
# if the system clang isn't used, DYLD_LIBRARY_PATH needs to contain the
# path to the libLTO.dylib that matches the used clang.
if self._Test('LLVM_LTO', 'YES', default='NO'):
cflags.append('-flto')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
if sdk_root:
framework_root = sdk_root
else:
framework_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', framework_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi':
cflags_c.append('-ansi')
else:
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
# Note: Don't make c++0x to c++11 so that c++0x can be used with older
# clangs that don't understand c++11 yet (like Xcode 4.2's).
if clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
# Note: This flag is a no-op for clang, it only has an effect for gcc.
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def _AddObjectiveCARCFlags(self, flags):
if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'):
flags.append('-fobjc-arc')
def _AddObjectiveCMissingPropertySynthesisFlags(self, flags):
if self._Test('CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS',
'YES', default='NO'):
flags.append('-Wobjc-missing-property-synthesis')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self._AddObjectiveCARCFlags(cflags_objc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
self._AddObjectiveCARCFlags(cflags_objcc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = r'(\S+)'
WORD = r'\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings() and self._SdkPath():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name and self.spec['type'] != 'loadable_module':
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
sdk_root = self._SdkPath()
if not sdk_root:
sdk_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
is_extension = self._IsIosAppExtension() or self._IsIosWatchKitExtension()
if sdk_root and is_extension:
# Adds the link flags for extensions. These flags are common for all
# extensions and provide loader and main function.
# These flags reflect the compilation options used by xcode to compile
# extensions.
ldflags.append('-lpkstart')
if XcodeVersion() < '0900':
ldflags.append(sdk_root +
'/System/Library/PrivateFrameworks/PlugInKit.framework/PlugInKit')
ldflags.append('-fapplication-extension')
ldflags.append('-Xlinker -rpath '
'-Xlinker @executable_path/../../Frameworks')
self._Appendf(ldflags, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerConfigSetting(self, setting, configname, default=None):
if configname in self.xcode_settings:
return self.xcode_settings[configname].get(setting, default)
else:
return self.GetPerTargetSetting(setting, default)
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
is_first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if is_first_pass:
result = self.xcode_settings[configname].get(setting, None)
is_first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, self.spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if self.spec['type'] == 'loadable_module' and self._IsBundle():
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def _GetTargetPostbuilds(self, configname, output, output_binary,
quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _GetIOSPostbuilds(self, configname, output_binary):
"""Return a shell command to codesign the iOS output binary so it can
be deployed to a device. This should be run as the very last step of the
build."""
if not (self.isIOS and self.spec['type'] == 'executable'):
return []
settings = self.xcode_settings[configname]
key = self._GetIOSCodeSignIdentityKey(settings)
if not key:
return []
# Warn for any unimplemented signing xcode keys.
unimpl = ['OTHER_CODE_SIGN_FLAGS']
unimpl = set(unimpl) & set(self.xcode_settings[configname].keys())
if unimpl:
print 'Warning: Some codesign keys not implemented, ignoring: %s' % (
', '.join(sorted(unimpl)))
return ['%s code-sign-bundle "%s" "%s" "%s" "%s"' % (
os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key,
settings.get('CODE_SIGN_RESOURCE_RULES_PATH', ''),
settings.get('CODE_SIGN_ENTITLEMENTS', ''),
settings.get('PROVISIONING_PROFILE', ''))
]
def _GetIOSCodeSignIdentityKey(self, settings):
identity = settings.get('CODE_SIGN_IDENTITY')
if not identity:
return None
if identity not in XcodeSettings._codesigning_key_cache:
output = subprocess.check_output(
['security', 'find-identity', '-p', 'codesigning', '-v'])
for line in output.splitlines():
if identity in line:
fingerprint = line.split()[1]
cache = XcodeSettings._codesigning_key_cache
assert identity not in cache or fingerprint == cache[identity], (
"Multiple codesigning fingerprints for identity: %s" % identity)
XcodeSettings._codesigning_key_cache[identity] = fingerprint
return XcodeSettings._codesigning_key_cache.get(identity, '')
def AddImplicitPostbuilds(self, configname, output, output_binary,
postbuilds=[], quiet=False):
"""Returns a list of shell commands that should run before and after
|postbuilds|."""
assert output_binary is not None
pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet)
post = self._GetIOSPostbuilds(configname, output_binary)
return pre + postbuilds + post
def _AdjustLibrary(self, library, config_name=None):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
sdk_root = self._SdkPath(config_name)
if not sdk_root:
sdk_root = ''
# Xcode 7 started shipping with ".tbd" (text based stubs) files instead of
# ".dylib" without providing a real support for them. What it does, for
# "/usr/lib" libraries, is do "-L/usr/lib -lname" which is dependent on the
# library order and cause collision when building Chrome.
#
# Instead substitude ".tbd" to ".dylib" in the generated project when the
# following conditions are both true:
# - library is referenced in the gyp file as "$(SDKROOT)/**/*.dylib",
# - the ".dylib" file does not exists but a ".tbd" file do.
library = l.replace('$(SDKROOT)', sdk_root)
if l.startswith('$(SDKROOT)'):
basename, ext = os.path.splitext(library)
if ext == '.dylib' and not os.path.exists(library):
tbd_library = basename + '.tbd'
if os.path.exists(tbd_library):
library = tbd_library
return library
def AdjustLibraries(self, libraries, config_name=None):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [self._AdjustLibrary(library, config_name)
for library in libraries]
return libraries
def _BuildMachineOSBuild(self):
return GetStdout(['sw_vers', '-buildVersion'])
def _XcodeIOSDeviceFamily(self, configname):
family = self.xcode_settings[configname].get('TARGETED_DEVICE_FAMILY', '1')
return [int(x) for x in family.split(',')]
def GetExtraPlistItems(self, configname=None):
"""Returns a dictionary with extra items to insert into Info.plist."""
if configname not in XcodeSettings._plist_cache:
cache = {}
cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild()
xcode, xcode_build = XcodeVersion()
cache['DTXcode'] = xcode
cache['DTXcodeBuild'] = xcode_build
sdk_root = self._SdkRoot(configname)
if not sdk_root:
sdk_root = self._DefaultSdkRoot()
cache['DTSDKName'] = sdk_root
if xcode >= '0430':
cache['DTSDKBuild'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductBuildVersion')
else:
cache['DTSDKBuild'] = cache['BuildMachineOSBuild']
if self.isIOS:
cache['DTPlatformName'] = cache['DTSDKName']
if configname.endswith("iphoneos"):
cache['DTPlatformVersion'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductVersion')
cache['CFBundleSupportedPlatforms'] = ['iPhoneOS']
else:
cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator']
XcodeSettings._plist_cache[configname] = cache
# Include extra plist items that are per-target, not per global
# XcodeSettings.
items = dict(XcodeSettings._plist_cache[configname])
if self.isIOS:
items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname)
return items
def _DefaultSdkRoot(self):
"""Returns the default SDKROOT to use.
Prior to version 5.0.0, if SDKROOT was not explicitly set in the Xcode
project, then the environment variable was empty. Starting with this
version, Xcode uses the name of the newest SDK installed.
"""
xcode_version, xcode_build = XcodeVersion()
if xcode_version < '0500':
return ''
default_sdk_path = self._XcodeSdkPath('')
default_sdk_root = XcodeSettings._sdk_root_cache.get(default_sdk_path)
if default_sdk_root:
return default_sdk_root
try:
all_sdks = GetStdout(['xcodebuild', '-showsdks'])
except:
# If xcodebuild fails, there will be no valid SDKs
return ''
for line in all_sdks.splitlines():
items = line.split()
if len(items) >= 3 and items[-2] == '-sdk':
sdk_root = items[-1]
sdk_path = self._XcodeSdkPath(sdk_root)
if sdk_path == default_sdk_path:
return sdk_root
return ''
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def _CompiledHeader(self, lang, arch):
assert self.compile_headers
h = self.compiled_headers[lang]
if arch:
h += '.' + arch
return h
def GetInclude(self, lang, arch=None):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self._CompiledHeader(lang, arch)
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang, arch):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self._CompiledHeader(lang, arch) + '.gch'
def GetObjDependencies(self, sources, objs, arch=None):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang, arch)))
return result
def GetPchBuildCommands(self, arch=None):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c', arch), '-x c-header', 'c', self.header),
(self._Gch('cc', arch), '-x c++-header', 'cc', self.header),
(self._Gch('m', arch), '-x objective-c-header', 'm', self.header),
(self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header),
]
def XcodeVersion():
"""Returns a tuple of version and build version of installed Xcode."""
# `xcodebuild -version` output looks like
# Xcode 4.6.3
# Build version 4H1503
# or like
# Xcode 3.2.6
# Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0
# BuildVersion: 10M2518
# Convert that to '0463', '4H1503'.
global XCODE_VERSION_CACHE
if XCODE_VERSION_CACHE:
return XCODE_VERSION_CACHE
try:
version_list = GetStdout(['xcodebuild', '-version']).splitlines()
# In some circumstances xcodebuild exits 0 but doesn't return
# the right results; for example, a user on 10.7 or 10.8 with
# a bogus path set via xcode-select
# In that case this may be a CLT-only install so fall back to
# checking that version.
if len(version_list) < 2:
raise GypError("xcodebuild returned unexpected results")
except:
version = CLTVersion()
if version:
version = re.match(r'(\d\.\d\.?\d*)', version).groups()[0]
else:
raise GypError("No Xcode or CLT version detected!")
# The CLT has no build information, so we return an empty string.
version_list = [version, '']
version = version_list[0]
build = version_list[-1]
# Be careful to convert "4.2" to "0420":
version = version.split()[-1].replace('.', '')
version = (version + '0' * (3 - len(version))).zfill(4)
if build:
build = build.split()[-1]
XCODE_VERSION_CACHE = (version, build)
return XCODE_VERSION_CACHE
# This function ported from the logic in Homebrew's CLT version check
def CLTVersion():
"""Returns the version of command-line tools from pkgutil."""
# pkgutil output looks like
# package-id: com.apple.pkg.CLTools_Executables
# version: 5.0.1.0.1.1382131676
# volume: /
# location: /
# install-time: 1382544035
# groups: com.apple.FindSystemFiles.pkg-group com.apple.DevToolsBoth.pkg-group com.apple.DevToolsNonRelocatableShared.pkg-group
STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo"
FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI"
MAVERICKS_PKG_ID = "com.apple.pkg.CLTools_Executables"
regex = re.compile('version: (?P<version>.+)')
for key in [MAVERICKS_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID]:
try:
output = GetStdout(['/usr/sbin/pkgutil', '--pkg-info', key])
return re.search(regex, output).groupdict()['version']
except:
continue
def GetStdout(cmdlist):
"""Returns the content of standard output returned by invoking |cmdlist|.
Raises |GypError| if the command return with a non-zero return code."""
job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running %s' % (job.returncode, cmdlist[0]))
return out.rstrip('\n')
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = os.path.splitext(output)[0] + '.nib'
# Compiled storyboard files are referred to by .storyboardc.
if output.endswith('.storyboard'):
output = os.path.splitext(output)[0] + '.storyboardc'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the source plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_FRAMEWORKS_DIR' : built_products_dir,
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
}
if xcode_settings.GetPerConfigSetting('SDKROOT', configuration):
env['SDKROOT'] = xcode_settings._SdkPath(configuration)
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if XcodeVersion() >= '0500' and not env.get('SDKROOT'):
sdk_root = xcode_settings._SdkRoot(configuration)
if not sdk_root:
sdk_root = xcode_settings._XcodeSdkPath('')
if sdk_root is None:
sdk_root = ''
env['SDKROOT'] = sdk_root
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
def _HasIOSTarget(targets):
"""Returns true if any target contains the iOS specific key
IPHONEOS_DEPLOYMENT_TARGET."""
for target_dict in targets.values():
for config in target_dict['configurations'].values():
if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'):
return True
return False
def _AddIOSDeviceConfigurations(targets):
"""Clone all targets and append -iphoneos to the name. Configure these targets
to build for iOS devices and use correct architectures for those builds."""
for target_dict in targets.itervalues():
toolset = target_dict['toolset']
configs = target_dict['configurations']
for config_name, config_dict in dict(configs).iteritems():
iphoneos_config_dict = copy.deepcopy(config_dict)
configs[config_name + '-iphoneos'] = iphoneos_config_dict
configs[config_name + '-iphonesimulator'] = config_dict
if toolset == 'target':
iphoneos_config_dict['xcode_settings']['SDKROOT'] = 'iphoneos'
return targets
def CloneConfigurationForDeviceAndEmulator(target_dicts):
"""If |target_dicts| contains any iOS targets, automatically create -iphoneos
targets for iOS device builds."""
if _HasIOSTarget(target_dicts):
return _AddIOSDeviceConfigurations(target_dicts)
return target_dicts
|
mit
|
2014c2g4/2015cda_g7
|
static/Brython3.1.1-20150328-091302/Lib/_sysconfigdata.py
|
731
|
18167
|
build_time_vars={'HAVE_SYS_WAIT_H': 1, 'HAVE_UTIL_H': 0, 'HAVE_SYMLINKAT': 1, 'HAVE_LIBSENDFILE': 0, 'SRCDIRS': 'Parser Grammar Objects Python Modules Mac', 'SIZEOF_OFF_T': 8, 'BASECFLAGS': '-Wno-unused-result', 'HAVE_UTIME_H': 1, 'EXTRAMACHDEPPATH': '', 'HAVE_SYS_TIME_H': 1, 'CFLAGSFORSHARED': '-fPIC', 'HAVE_HYPOT': 1, 'PGSRCS': '\\', 'HAVE_LIBUTIL_H': 0, 'HAVE_COMPUTED_GOTOS': 1, 'HAVE_LUTIMES': 1, 'HAVE_MAKEDEV': 1, 'HAVE_REALPATH': 1, 'HAVE_LINUX_TIPC_H': 1, 'MULTIARCH': 'i386-linux-gnu', 'HAVE_GETWD': 1, 'HAVE_GCC_ASM_FOR_X64': 0, 'HAVE_INET_PTON': 1, 'HAVE_GETHOSTBYNAME_R_6_ARG': 1, 'SIZEOF__BOOL': 1, 'HAVE_ZLIB_COPY': 1, 'ASDLGEN': 'python3.3 ../Parser/asdl_c.py', 'GRAMMAR_INPUT': '../Grammar/Grammar', 'HOST_GNU_TYPE': 'i686-pc-linux-gnu', 'HAVE_SCHED_RR_GET_INTERVAL': 1, 'HAVE_BLUETOOTH_H': 0, 'HAVE_MKFIFO': 1, 'TIMEMODULE_LIB': 0, 'LIBM': '-lm', 'PGENOBJS': '\\ \\', 'PYTHONFRAMEWORK': '', 'GETPGRP_HAVE_ARG': 0, 'HAVE_MMAP': 1, 'SHLIB_SUFFIX': '.so', 'SIZEOF_FLOAT': 4, 'HAVE_RENAMEAT': 1, 'HAVE_LANGINFO_H': 1, 'HAVE_STDLIB_H': 1, 'PY_CORE_CFLAGS': '-Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security -I. -IInclude -I../Include -D_FORTIFY_SOURCE=2 -fPIC -DPy_BUILD_CORE', 'HAVE_BROKEN_PIPE_BUF': 0, 'HAVE_CONFSTR': 1, 'HAVE_SIGTIMEDWAIT': 1, 'HAVE_FTELLO': 1, 'READELF': 'readelf', 'HAVE_SIGALTSTACK': 1, 'TESTTIMEOUT': 3600, 'PYTHONPATH': ':plat-i386-linux-gnu', 'SIZEOF_WCHAR_T': 4, 'LIBOBJS': '', 'HAVE_SYSCONF': 1, 'MAKESETUP': '../Modules/makesetup', 'HAVE_UTIMENSAT': 1, 'HAVE_FCHOWNAT': 1, 'HAVE_WORKING_TZSET': 1, 'HAVE_FINITE': 1, 'HAVE_ASINH': 1, 'HAVE_SETEUID': 1, 'CONFIGFILES': 'configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in', 'HAVE_SETGROUPS': 1, 'PARSER_OBJS': '\\ Parser/myreadline.o Parser/parsetok.o Parser/tokenizer.o', 'HAVE_MBRTOWC': 1, 'SIZEOF_INT': 4, 'HAVE_STDARG_PROTOTYPES': 1, 'TM_IN_SYS_TIME': 0, 'HAVE_SYS_TIMES_H': 1, 'HAVE_LCHOWN': 1, 'HAVE_SSIZE_T': 1, 'HAVE_PAUSE': 1, 'SYSLIBS': '-lm', 'POSIX_SEMAPHORES_NOT_ENABLED': 0, 'HAVE_DEVICE_MACROS': 1, 'BLDSHARED': 'i686-linux-gnu-gcc -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'LIBSUBDIRS': 'tkinter tkinter/test tkinter/test/test_tkinter \\', 'HAVE_SYS_UN_H': 1, 'HAVE_SYS_STAT_H': 1, 'VPATH': '..', 'INCLDIRSTOMAKE': '/usr/include /usr/include /usr/include/python3.3m /usr/include/python3.3m', 'HAVE_BROKEN_SEM_GETVALUE': 0, 'HAVE_TIMEGM': 1, 'PACKAGE_VERSION': 0, 'MAJOR_IN_SYSMACROS': 0, 'HAVE_ATANH': 1, 'HAVE_GAI_STRERROR': 1, 'HAVE_SYS_POLL_H': 1, 'SIZEOF_PTHREAD_T': 4, 'SIZEOF_FPOS_T': 16, 'HAVE_CTERMID': 1, 'HAVE_TMPFILE': 1, 'HAVE_SETUID': 1, 'CXX': 'i686-linux-gnu-g++ -pthread', 'srcdir': '..', 'HAVE_UINT32_T': 1, 'HAVE_ADDRINFO': 1, 'HAVE_GETSPENT': 1, 'SIZEOF_DOUBLE': 8, 'HAVE_INT32_T': 1, 'LIBRARY_OBJS_OMIT_FROZEN': '\\', 'HAVE_FUTIMES': 1, 'CONFINCLUDEPY': '/usr/include/python3.3m', 'HAVE_RL_COMPLETION_APPEND_CHARACTER': 1, 'LIBFFI_INCLUDEDIR': '', 'HAVE_SETGID': 1, 'HAVE_UINT64_T': 1, 'EXEMODE': 755, 'UNIVERSALSDK': '', 'HAVE_LIBDL': 1, 'HAVE_GETNAMEINFO': 1, 'HAVE_STDINT_H': 1, 'COREPYTHONPATH': ':plat-i386-linux-gnu', 'HAVE_SOCKADDR_STORAGE': 1, 'HAVE_WAITID': 1, 'EXTRAPLATDIR': '@EXTRAPLATDIR@', 'HAVE_ACCEPT4': 1, 'RUNSHARED': 'LD_LIBRARY_PATH=/build/buildd/python3.3-3.3.1/build-shared:', 'EXE': '', 'HAVE_SIGACTION': 1, 'HAVE_CHOWN': 1, 'HAVE_GETLOGIN': 1, 'HAVE_TZNAME': 0, 'PACKAGE_NAME': 0, 'HAVE_GETPGID': 1, 'HAVE_GLIBC_MEMMOVE_BUG': 0, 'BUILD_GNU_TYPE': 'i686-pc-linux-gnu', 'HAVE_LINUX_CAN_H': 1, 'DYNLOADFILE': 'dynload_shlib.o', 'HAVE_PWRITE': 1, 'BUILDEXE': '', 'HAVE_OPENPTY': 1, 'HAVE_LOCKF': 1, 'HAVE_COPYSIGN': 1, 'HAVE_PREAD': 1, 'HAVE_DLOPEN': 1, 'HAVE_SYS_KERN_CONTROL_H': 0, 'PY_FORMAT_LONG_LONG': '"ll"', 'HAVE_TCSETPGRP': 1, 'HAVE_SETSID': 1, 'HAVE_STRUCT_STAT_ST_BIRTHTIME': 0, 'HAVE_STRING_H': 1, 'LDLIBRARY': 'libpython3.3m.so', 'INSTALL_SCRIPT': '/usr/bin/install -c', 'HAVE_SYS_XATTR_H': 1, 'HAVE_CURSES_IS_TERM_RESIZED': 1, 'HAVE_TMPNAM_R': 1, 'STRICT_SYSV_CURSES': "/* Don't use ncurses extensions */", 'WANT_SIGFPE_HANDLER': 1, 'HAVE_INT64_T': 1, 'HAVE_STAT_TV_NSEC': 1, 'HAVE_SYS_MKDEV_H': 0, 'HAVE_BROKEN_POLL': 0, 'HAVE_IF_NAMEINDEX': 1, 'HAVE_GETPWENT': 1, 'PSRCS': '\\', 'RANLIB': 'ranlib', 'HAVE_WCSCOLL': 1, 'WITH_NEXT_FRAMEWORK': 0, 'ASDLGEN_FILES': '../Parser/asdl.py ../Parser/asdl_c.py', 'HAVE_RL_PRE_INPUT_HOOK': 1, 'PACKAGE_URL': 0, 'SHLIB_EXT': 0, 'HAVE_SYS_LOADAVG_H': 0, 'HAVE_LIBIEEE': 0, 'HAVE_SEM_OPEN': 1, 'HAVE_TERM_H': 1, 'IO_OBJS': '\\', 'IO_H': 'Modules/_io/_iomodule.h', 'HAVE_STATVFS': 1, 'VERSION': '3.3', 'HAVE_GETC_UNLOCKED': 1, 'MACHDEPS': 'plat-i386-linux-gnu @EXTRAPLATDIR@', 'SUBDIRSTOO': 'Include Lib Misc', 'HAVE_SETREUID': 1, 'HAVE_ERFC': 1, 'HAVE_SETRESUID': 1, 'LINKFORSHARED': '-Xlinker -export-dynamic -Wl,-O1 -Wl,-Bsymbolic-functions', 'HAVE_SYS_TYPES_H': 1, 'HAVE_GETPAGESIZE': 1, 'HAVE_SETEGID': 1, 'HAVE_PTY_H': 1, 'HAVE_STRUCT_STAT_ST_FLAGS': 0, 'HAVE_WCHAR_H': 1, 'HAVE_FSEEKO': 1, 'Py_ENABLE_SHARED': 1, 'HAVE_SIGRELSE': 1, 'HAVE_PTHREAD_INIT': 0, 'FILEMODE': 644, 'HAVE_SYS_RESOURCE_H': 1, 'HAVE_READLINKAT': 1, 'PYLONG_BITS_IN_DIGIT': 0, 'LINKCC': 'i686-linux-gnu-gcc -pthread', 'HAVE_SETLOCALE': 1, 'HAVE_CHROOT': 1, 'HAVE_OPENAT': 1, 'HAVE_FEXECVE': 1, 'LDCXXSHARED': 'i686-linux-gnu-g++ -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions', 'DIST': 'README ChangeLog configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in Include Lib Misc Ext-dummy', 'HAVE_MKNOD': 1, 'PY_LDFLAGS': '-Wl,-Bsymbolic-functions -Wl,-z,relro', 'HAVE_BROKEN_MBSTOWCS': 0, 'LIBRARY_OBJS': '\\', 'HAVE_LOG1P': 1, 'SIZEOF_VOID_P': 4, 'HAVE_FCHOWN': 1, 'PYTHONFRAMEWORKPREFIX': '', 'HAVE_LIBDLD': 0, 'HAVE_TGAMMA': 1, 'HAVE_ERRNO_H': 1, 'HAVE_IO_H': 0, 'OTHER_LIBTOOL_OPT': '', 'HAVE_POLL_H': 1, 'PY_CPPFLAGS': '-I. -IInclude -I../Include -D_FORTIFY_SOURCE=2', 'XMLLIBSUBDIRS': 'xml xml/dom xml/etree xml/parsers xml/sax', 'GRAMMAR_H': 'Include/graminit.h', 'TANH_PRESERVES_ZERO_SIGN': 1, 'HAVE_GETLOADAVG': 1, 'UNICODE_DEPS': '\\ \\', 'HAVE_GETCWD': 1, 'MANDIR': '/usr/share/man', 'MACHDESTLIB': '/usr/lib/python3.3', 'GRAMMAR_C': 'Python/graminit.c', 'PGOBJS': '\\', 'HAVE_DEV_PTMX': 1, 'HAVE_UINTPTR_T': 1, 'HAVE_SCHED_SETAFFINITY': 1, 'PURIFY': '', 'HAVE_DECL_ISINF': 1, 'HAVE_RL_CALLBACK': 1, 'HAVE_WRITEV': 1, 'HAVE_GETHOSTBYNAME_R_5_ARG': 0, 'HAVE_SYS_AUDIOIO_H': 0, 'EXT_SUFFIX': '.cpython-33m.so', 'SIZEOF_LONG_LONG': 8, 'DLINCLDIR': '.', 'HAVE_PATHCONF': 1, 'HAVE_UNLINKAT': 1, 'MKDIR_P': '/bin/mkdir -p', 'HAVE_ALTZONE': 0, 'SCRIPTDIR': '/usr/lib', 'OPCODETARGETGEN_FILES': '\\', 'HAVE_GETSPNAM': 1, 'HAVE_SYS_TERMIO_H': 0, 'HAVE_ATTRIBUTE_FORMAT_PARSETUPLE': 0, 'HAVE_PTHREAD_H': 1, 'Py_DEBUG': 0, 'HAVE_STRUCT_STAT_ST_BLOCKS': 1, 'X87_DOUBLE_ROUNDING': 1, 'SIZEOF_TIME_T': 4, 'HAVE_DYNAMIC_LOADING': 1, 'HAVE_DIRECT_H': 0, 'SRC_GDB_HOOKS': '../Tools/gdb/libpython.py', 'HAVE_GETADDRINFO': 1, 'HAVE_BROKEN_NICE': 0, 'HAVE_DIRENT_H': 1, 'HAVE_WCSXFRM': 1, 'HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK': 1, 'HAVE_FSTATVFS': 1, 'PYTHON': 'python', 'HAVE_OSX105_SDK': 0, 'BINDIR': '/usr/bin', 'TESTPYTHON': 'LD_LIBRARY_PATH=/build/buildd/python3.3-3.3.1/build-shared: ./python', 'ARFLAGS': 'rc', 'PLATDIR': 'plat-i386-linux-gnu', 'HAVE_ASM_TYPES_H': 1, 'PY3LIBRARY': 'libpython3.so', 'HAVE_PLOCK': 0, 'FLOCK_NEEDS_LIBBSD': 0, 'WITH_TSC': 0, 'HAVE_LIBREADLINE': 1, 'MACHDEP': 'linux', 'HAVE_SELECT': 1, 'LDFLAGS': '-Wl,-Bsymbolic-functions -Wl,-z,relro', 'HAVE_HSTRERROR': 1, 'SOABI': 'cpython-33m', 'HAVE_GETTIMEOFDAY': 1, 'HAVE_LIBRESOLV': 0, 'HAVE_UNSETENV': 1, 'HAVE_TM_ZONE': 1, 'HAVE_GETPGRP': 1, 'HAVE_FLOCK': 1, 'HAVE_SYS_BSDTTY_H': 0, 'SUBDIRS': '', 'PYTHONFRAMEWORKINSTALLDIR': '', 'PACKAGE_BUGREPORT': 0, 'HAVE_CLOCK': 1, 'HAVE_GETPEERNAME': 1, 'SIZEOF_PID_T': 4, 'HAVE_CONIO_H': 0, 'HAVE_FSTATAT': 1, 'HAVE_NETPACKET_PACKET_H': 1, 'HAVE_WAIT3': 1, 'DESTPATH': '', 'HAVE_STAT_TV_NSEC2': 0, 'HAVE_GETRESGID': 1, 'HAVE_UCS4_TCL': 0, 'SIGNED_RIGHT_SHIFT_ZERO_FILLS': 0, 'HAVE_TIMES': 1, 'HAVE_UNAME': 1, 'HAVE_ERF': 1, 'SIZEOF_SHORT': 2, 'HAVE_NCURSES_H': 1, 'HAVE_SYS_SENDFILE_H': 1, 'HAVE_CTERMID_R': 0, 'HAVE_TMPNAM': 1, 'prefix': '/usr', 'HAVE_NICE': 1, 'WITH_THREAD': 1, 'LN': 'ln', 'TESTRUNNER': 'LD_LIBRARY_PATH=/build/buildd/python3.3-3.3.1/build-shared: ./python ../Tools/scripts/run_tests.py', 'HAVE_SIGINTERRUPT': 1, 'HAVE_SETPGID': 1, 'RETSIGTYPE': 'void', 'HAVE_SCHED_GET_PRIORITY_MAX': 1, 'HAVE_SYS_SYS_DOMAIN_H': 0, 'HAVE_SYS_DIR_H': 0, 'HAVE__GETPTY': 0, 'HAVE_BLUETOOTH_BLUETOOTH_H': 1, 'HAVE_BIND_TEXTDOMAIN_CODESET': 1, 'HAVE_POLL': 1, 'PYTHON_OBJS': '\\', 'HAVE_WAITPID': 1, 'USE_INLINE': 1, 'HAVE_FUTIMENS': 1, 'USE_COMPUTED_GOTOS': 1, 'MAINCC': 'i686-linux-gnu-gcc -pthread', 'HAVE_SOCKETPAIR': 1, 'HAVE_PROCESS_H': 0, 'HAVE_SETVBUF': 1, 'HAVE_FDOPENDIR': 1, 'CONFINCLUDEDIR': '/usr/include', 'BINLIBDEST': '/usr/lib/python3.3', 'HAVE_SYS_IOCTL_H': 1, 'HAVE_SYSEXITS_H': 1, 'LDLAST': '', 'HAVE_SYS_FILE_H': 1, 'HAVE_RL_COMPLETION_SUPPRESS_APPEND': 1, 'HAVE_RL_COMPLETION_MATCHES': 1, 'HAVE_TCGETPGRP': 1, 'SIZEOF_SIZE_T': 4, 'HAVE_EPOLL_CREATE1': 1, 'HAVE_SYS_SELECT_H': 1, 'HAVE_CLOCK_GETTIME': 1, 'CFLAGS': '-Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'HAVE_SNPRINTF': 1, 'BLDLIBRARY': '-lpython3.3m', 'PARSER_HEADERS': '\\', 'SO': '.so', 'LIBRARY': 'libpython3.3m.a', 'HAVE_FPATHCONF': 1, 'HAVE_TERMIOS_H': 1, 'HAVE_BROKEN_PTHREAD_SIGMASK': 0, 'AST_H': 'Include/Python-ast.h', 'HAVE_GCC_UINT128_T': 0, 'HAVE_ACOSH': 1, 'MODOBJS': 'Modules/_threadmodule.o Modules/signalmodule.o Modules/arraymodule.o Modules/mathmodule.o Modules/_math.o Modules/_struct.o Modules/timemodule.o Modules/_randommodule.o Modules/atexitmodule.o Modules/_elementtree.o Modules/_pickle.o Modules/_datetimemodule.o Modules/_bisectmodule.o Modules/_heapqmodule.o Modules/unicodedata.o Modules/fcntlmodule.o Modules/spwdmodule.o Modules/grpmodule.o Modules/selectmodule.o Modules/socketmodule.o Modules/_posixsubprocess.o Modules/md5module.o Modules/sha1module.o Modules/sha256module.o Modules/sha512module.o Modules/syslogmodule.o Modules/binascii.o Modules/zlibmodule.o Modules/pyexpat.o Modules/posixmodule.o Modules/errnomodule.o Modules/pwdmodule.o Modules/_sre.o Modules/_codecsmodule.o Modules/_weakref.o Modules/_functoolsmodule.o Modules/operator.o Modules/_collectionsmodule.o Modules/itertoolsmodule.o Modules/_localemodule.o Modules/_iomodule.o Modules/iobase.o Modules/fileio.o Modules/bytesio.o Modules/bufferedio.o Modules/textio.o Modules/stringio.o Modules/zipimport.o Modules/faulthandler.o Modules/symtablemodule.o Modules/xxsubtype.o', 'AST_C': 'Python/Python-ast.c', 'HAVE_SYS_NDIR_H': 0, 'DESTDIRS': '/usr /usr/lib /usr/lib/python3.3 /usr/lib/python3.3/lib-dynload', 'HAVE_SIGNAL_H': 1, 'PACKAGE_TARNAME': 0, 'HAVE_GETPRIORITY': 1, 'INCLUDEDIR': '/usr/include', 'HAVE_INTTYPES_H': 1, 'SIGNAL_OBJS': '', 'HAVE_READV': 1, 'HAVE_SETHOSTNAME': 1, 'MODLIBS': '-lrt -lexpat -L/usr/lib -lz -lexpat', 'CC': 'i686-linux-gnu-gcc -pthread', 'HAVE_LCHMOD': 0, 'SIZEOF_UINTPTR_T': 4, 'LIBPC': '/usr/lib/i386-linux-gnu/pkgconfig', 'BYTESTR_DEPS': '\\', 'HAVE_MKDIRAT': 1, 'LIBPL': '/usr/lib/python3.3/config-3.3m-i386-linux-gnu', 'HAVE_SHADOW_H': 1, 'HAVE_SYS_EVENT_H': 0, 'INSTALL': '/usr/bin/install -c', 'HAVE_GCC_ASM_FOR_X87': 1, 'HAVE_BROKEN_UNSETENV': 0, 'BASECPPFLAGS': '', 'DOUBLE_IS_BIG_ENDIAN_IEEE754': 0, 'HAVE_STRUCT_STAT_ST_RDEV': 1, 'HAVE_SEM_UNLINK': 1, 'BUILDPYTHON': 'python', 'HAVE_RL_CATCH_SIGNAL': 1, 'HAVE_DECL_TZNAME': 0, 'RESSRCDIR': 'Mac/Resources/framework', 'HAVE_PTHREAD_SIGMASK': 1, 'HAVE_UTIMES': 1, 'DISTDIRS': 'Include Lib Misc Ext-dummy', 'HAVE_FDATASYNC': 1, 'HAVE_USABLE_WCHAR_T': 0, 'PY_FORMAT_SIZE_T': '"z"', 'HAVE_SCHED_SETSCHEDULER': 1, 'VA_LIST_IS_ARRAY': 0, 'HAVE_LINUX_NETLINK_H': 1, 'HAVE_SETREGID': 1, 'HAVE_STROPTS_H': 1, 'LDVERSION': '3.3m', 'abs_builddir': '/build/buildd/python3.3-3.3.1/build-shared', 'SITEPATH': '', 'HAVE_GETHOSTBYNAME': 0, 'HAVE_SIGPENDING': 1, 'HAVE_KQUEUE': 0, 'HAVE_SYNC': 1, 'HAVE_GETSID': 1, 'HAVE_ROUND': 1, 'HAVE_STRFTIME': 1, 'AST_H_DIR': 'Include', 'HAVE_PIPE2': 1, 'AST_C_DIR': 'Python', 'TESTPYTHONOPTS': '', 'HAVE_DEV_PTC': 0, 'GETTIMEOFDAY_NO_TZ': 0, 'HAVE_NET_IF_H': 1, 'HAVE_SENDFILE': 1, 'HAVE_SETPGRP': 1, 'HAVE_SEM_GETVALUE': 1, 'CONFIGURE_LDFLAGS': '-Wl,-Bsymbolic-functions -Wl,-z,relro', 'DLLLIBRARY': '', 'PYTHON_FOR_BUILD': './python -E', 'SETPGRP_HAVE_ARG': 0, 'HAVE_INET_ATON': 1, 'INSTALL_SHARED': '/usr/bin/install -c -m 555', 'WITH_DOC_STRINGS': 1, 'OPCODETARGETS_H': '\\', 'HAVE_INITGROUPS': 1, 'HAVE_LINKAT': 1, 'BASEMODLIBS': '', 'SGI_ABI': '', 'HAVE_SCHED_SETPARAM': 1, 'OPT': '-DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes', 'HAVE_POSIX_FADVISE': 1, 'datarootdir': '/usr/share', 'HAVE_MEMRCHR': 1, 'HGTAG': '', 'HAVE_MEMMOVE': 1, 'HAVE_GETRESUID': 1, 'DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754': 0, 'HAVE_LSTAT': 1, 'AR': 'ar', 'HAVE_WAIT4': 1, 'HAVE_SYS_MODEM_H': 0, 'INSTSONAME': 'libpython3.3m.so.1.0', 'HAVE_SYS_STATVFS_H': 1, 'HAVE_LGAMMA': 1, 'HAVE_PROTOTYPES': 1, 'HAVE_SYS_UIO_H': 1, 'MAJOR_IN_MKDEV': 0, 'QUICKTESTOPTS': '-x test_subprocess test_io test_lib2to3 \\', 'HAVE_SYS_DEVPOLL_H': 0, 'HAVE_CHFLAGS': 0, 'HAVE_FSYNC': 1, 'HAVE_FCHMOD': 1, 'INCLUDEPY': '/usr/include/python3.3m', 'HAVE_SEM_TIMEDWAIT': 1, 'LDLIBRARYDIR': '', 'HAVE_STRUCT_TM_TM_ZONE': 1, 'HAVE_CURSES_H': 1, 'TIME_WITH_SYS_TIME': 1, 'HAVE_DUP2': 1, 'ENABLE_IPV6': 1, 'WITH_VALGRIND': 0, 'HAVE_SETITIMER': 1, 'THREADOBJ': 'Python/thread.o', 'LOCALMODLIBS': '-lrt -lexpat -L/usr/lib -lz -lexpat', 'HAVE_MEMORY_H': 1, 'HAVE_GETITIMER': 1, 'HAVE_C99_BOOL': 1, 'INSTALL_DATA': '/usr/bin/install -c -m 644', 'PGEN': 'Parser/pgen', 'HAVE_GRP_H': 1, 'HAVE_WCSFTIME': 1, 'AIX_GENUINE_CPLUSPLUS': 0, 'HAVE_LIBINTL_H': 1, 'SHELL': '/bin/sh', 'HAVE_UNISTD_H': 1, 'EXTRATESTOPTS': '', 'HAVE_EXECV': 1, 'HAVE_FSEEK64': 0, 'MVWDELCH_IS_EXPRESSION': 1, 'DESTSHARED': '/usr/lib/python3.3/lib-dynload', 'OPCODETARGETGEN': '\\', 'LIBDEST': '/usr/lib/python3.3', 'CCSHARED': '-fPIC', 'HAVE_EXPM1': 1, 'HAVE_DLFCN_H': 1, 'exec_prefix': '/usr', 'HAVE_READLINK': 1, 'WINDOW_HAS_FLAGS': 1, 'HAVE_FTELL64': 0, 'HAVE_STRLCPY': 0, 'MACOSX_DEPLOYMENT_TARGET': '', 'HAVE_SYS_SYSCALL_H': 1, 'DESTLIB': '/usr/lib/python3.3', 'LDSHARED': 'i686-linux-gnu-gcc -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'HGVERSION': '', 'PYTHON_HEADERS': '\\', 'HAVE_STRINGS_H': 1, 'DOUBLE_IS_LITTLE_ENDIAN_IEEE754': 1, 'HAVE_POSIX_FALLOCATE': 1, 'HAVE_DIRFD': 1, 'HAVE_LOG2': 1, 'HAVE_GETPID': 1, 'HAVE_ALARM': 1, 'MACHDEP_OBJS': '', 'HAVE_SPAWN_H': 1, 'HAVE_FORK': 1, 'HAVE_SETRESGID': 1, 'HAVE_FCHMODAT': 1, 'HAVE_CLOCK_GETRES': 1, 'MACHDEPPATH': ':plat-i386-linux-gnu', 'STDC_HEADERS': 1, 'HAVE_SETPRIORITY': 1, 'LIBC': '', 'HAVE_SYS_EPOLL_H': 1, 'HAVE_SYS_UTSNAME_H': 1, 'HAVE_PUTENV': 1, 'HAVE_CURSES_RESIZE_TERM': 1, 'HAVE_FUTIMESAT': 1, 'WITH_DYLD': 0, 'INSTALL_PROGRAM': '/usr/bin/install -c', 'LIBS': '-lpthread -ldl -lutil', 'HAVE_TRUNCATE': 1, 'TESTOPTS': '', 'PROFILE_TASK': '../Tools/pybench/pybench.py -n 2 --with-gc --with-syscheck', 'HAVE_CURSES_RESIZETERM': 1, 'ABIFLAGS': 'm', 'HAVE_GETGROUPLIST': 1, 'OBJECT_OBJS': '\\', 'HAVE_MKNODAT': 1, 'HAVE_ST_BLOCKS': 1, 'HAVE_STRUCT_STAT_ST_GEN': 0, 'SYS_SELECT_WITH_SYS_TIME': 1, 'SHLIBS': '-lpthread -ldl -lutil', 'HAVE_GETGROUPS': 1, 'MODULE_OBJS': '\\', 'PYTHONFRAMEWORKDIR': 'no-framework', 'HAVE_FCNTL_H': 1, 'HAVE_LINK': 1, 'HAVE_SIGWAIT': 1, 'HAVE_GAMMA': 1, 'HAVE_SYS_LOCK_H': 0, 'HAVE_FORKPTY': 1, 'HAVE_SOCKADDR_SA_LEN': 0, 'HAVE_TEMPNAM': 1, 'HAVE_STRUCT_STAT_ST_BLKSIZE': 1, 'HAVE_MKFIFOAT': 1, 'HAVE_SIGWAITINFO': 1, 'HAVE_FTIME': 1, 'HAVE_EPOLL': 1, 'HAVE_SYS_SOCKET_H': 1, 'HAVE_LARGEFILE_SUPPORT': 1, 'CONFIGURE_CFLAGS': '-g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security', 'HAVE_PTHREAD_DESTRUCTOR': 0, 'CONFIGURE_CPPFLAGS': '-D_FORTIFY_SOURCE=2', 'HAVE_SYMLINK': 1, 'HAVE_LONG_LONG': 1, 'HAVE_IEEEFP_H': 0, 'LIBDIR': '/usr/lib', 'HAVE_PTHREAD_KILL': 1, 'TESTPATH': '', 'HAVE_STRDUP': 1, 'POBJS': '\\', 'NO_AS_NEEDED': '-Wl,--no-as-needed', 'HAVE_LONG_DOUBLE': 1, 'HGBRANCH': '', 'DISTFILES': 'README ChangeLog configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in', 'PTHREAD_SYSTEM_SCHED_SUPPORTED': 1, 'HAVE_FACCESSAT': 1, 'AST_ASDL': '../Parser/Python.asdl', 'CPPFLAGS': '-I. -IInclude -I../Include -D_FORTIFY_SOURCE=2', 'HAVE_MKTIME': 1, 'HAVE_NDIR_H': 0, 'PY_CFLAGS': '-Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'LIBOBJDIR': 'Python/', 'HAVE_LINUX_CAN_RAW_H': 1, 'HAVE_GETHOSTBYNAME_R_3_ARG': 0, 'PACKAGE_STRING': 0, 'GNULD': 'yes', 'LOG1P_DROPS_ZERO_SIGN': 0, 'HAVE_FTRUNCATE': 1, 'WITH_LIBINTL': 0, 'HAVE_MREMAP': 1, 'HAVE_DECL_ISNAN': 1, 'HAVE_KILLPG': 1, 'SIZEOF_LONG': 4, 'HAVE_DECL_ISFINITE': 1, 'HAVE_IPA_PURE_CONST_BUG': 0, 'WITH_PYMALLOC': 1, 'abs_srcdir': '/build/buildd/python3.3-3.3.1/build-shared/..', 'HAVE_FCHDIR': 1, 'HAVE_BROKEN_POSIX_SEMAPHORES': 0, 'AC_APPLE_UNIVERSAL_BUILD': 0, 'PGENSRCS': '\\ \\', 'DIRMODE': 755, 'HAVE_GETHOSTBYNAME_R': 1, 'HAVE_LCHFLAGS': 0, 'HAVE_SYS_PARAM_H': 1, 'SIZEOF_LONG_DOUBLE': 12, 'CONFIG_ARGS': "'--enable-shared' '--prefix=/usr' '--enable-ipv6' '--enable-loadable-sqlite-extensions' '--with-dbmliborder=bdb:gdbm' '--with-computed-gotos' '--with-system-expat' '--with-system-ffi' '--with-fpectl' 'CC=i686-linux-gnu-gcc' 'CFLAGS=-g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ' 'LDFLAGS=-Wl,-Bsymbolic-functions -Wl,-z,relro' 'CPPFLAGS=-D_FORTIFY_SOURCE=2'", 'HAVE_SCHED_H': 1, 'HAVE_KILL': 1}
|
gpl-3.0
|
rvraghav93/scikit-learn
|
examples/svm/plot_svm_nonlinear.py
|
62
|
1119
|
"""
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired,
edgecolors='k')
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
|
bsd-3-clause
|
klmitch/neutron
|
neutron/tests/unit/objects/qos/test_policy.py
|
3
|
15568
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.common import exceptions as n_exc
from neutron.db import models_v2
from neutron.objects.db import api as db_api
from neutron.objects.qos import policy
from neutron.objects.qos import rule
from neutron.tests.unit.objects import test_base
from neutron.tests.unit import testlib_api
class QosPolicyObjectTestCase(test_base.BaseObjectIfaceTestCase):
_test_class = policy.QosPolicy
def setUp(self):
super(QosPolicyObjectTestCase, self).setUp()
# qos_policy_ids will be incorrect, but we don't care in this test
self.db_qos_bandwidth_rules = [
self.get_random_fields(rule.QosBandwidthLimitRule)
for _ in range(3)]
self.db_qos_dscp_rules = [
self.get_random_fields(rule.QosDscpMarkingRule)
for _ in range(3)]
self.model_map = {
self._test_class.db_model: self.db_objs,
self._test_class.rbac_db_model: [],
self._test_class.port_binding_model: [],
self._test_class.network_binding_model: [],
rule.QosBandwidthLimitRule.db_model: self.db_qos_bandwidth_rules,
rule.QosDscpMarkingRule.db_model: self.db_qos_dscp_rules}
self.get_object = mock.patch.object(
db_api, 'get_object', side_effect=self.fake_get_object).start()
self.get_objects = mock.patch.object(
db_api, 'get_objects', side_effect=self.fake_get_objects).start()
def fake_get_objects(self, context, model, **kwargs):
return self.model_map[model]
def fake_get_object(self, context, model, **kwargs):
objects = self.model_map[model]
if not objects:
return None
return [obj for obj in objects if obj['id'] == kwargs['id']][0]
def test_get_objects(self):
admin_context = self.context.elevated()
with mock.patch.object(self.context, 'elevated',
return_value=admin_context) as context_mock:
objs = self._test_class.get_objects(self.context)
context_mock.assert_called_once_with()
self.get_objects.assert_any_call(
admin_context, self._test_class.db_model)
self._validate_objects(self.db_objs, objs)
def test_get_objects_valid_fields(self):
admin_context = self.context.elevated()
with mock.patch.object(
db_api, 'get_objects',
return_value=[self.db_obj]) as get_objects_mock:
with mock.patch.object(
self.context,
'elevated',
return_value=admin_context) as context_mock:
objs = self._test_class.get_objects(
self.context,
**self.valid_field_filter)
context_mock.assert_called_once_with()
get_objects_mock.assert_any_call(
admin_context, self._test_class.db_model,
**self.valid_field_filter)
self._validate_objects([self.db_obj], objs)
def test_get_object(self):
admin_context = self.context.elevated()
with mock.patch.object(db_api, 'get_object',
return_value=self.db_obj) as get_object_mock:
with mock.patch.object(self.context,
'elevated',
return_value=admin_context) as context_mock:
obj = self._test_class.get_object(self.context, id='fake_id')
self.assertTrue(self._is_test_class(obj))
self.assertEqual(self.db_obj, test_base.get_obj_db_fields(obj))
context_mock.assert_called_once_with()
get_object_mock.assert_called_once_with(
admin_context, self._test_class.db_model, id='fake_id')
class QosPolicyDbObjectTestCase(test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = policy.QosPolicy
def setUp(self):
super(QosPolicyDbObjectTestCase, self).setUp()
self.db_qos_bandwidth_rules = [
self.get_random_fields(rule.QosBandwidthLimitRule)
for _ in range(3)]
self.model_map.update({
rule.QosBandwidthLimitRule.db_model: self.db_qos_bandwidth_rules
})
self._create_test_network()
self._create_test_port(self._network)
def _create_test_policy(self):
policy_obj = policy.QosPolicy(self.context, **self.db_obj)
policy_obj.create()
return policy_obj
def _create_test_policy_with_bwrule(self):
policy_obj = self._create_test_policy()
rule_fields = self.get_random_fields(
obj_cls=rule.QosBandwidthLimitRule)
rule_fields['qos_policy_id'] = policy_obj.id
rule_obj = rule.QosBandwidthLimitRule(self.context, **rule_fields)
rule_obj.create()
return policy_obj, rule_obj
def test_attach_network_get_network_policy(self):
obj = self._create_test_policy()
policy_obj = policy.QosPolicy.get_network_policy(self.context,
self._network['id'])
self.assertIsNone(policy_obj)
# Now attach policy and repeat
obj.attach_network(self._network['id'])
policy_obj = policy.QosPolicy.get_network_policy(self.context,
self._network['id'])
self.assertEqual(obj, policy_obj)
def test_attach_network_nonexistent_network(self):
obj = self._create_test_policy()
self.assertRaises(n_exc.NetworkQosBindingNotFound,
obj.attach_network, 'non-existent-network')
def test_attach_network_get_policy_network(self):
obj = self._create_test_policy()
obj.attach_network(self._network['id'])
networks = obj.get_bound_networks()
self.assertEqual(1, len(networks))
self.assertEqual(self._network['id'], networks[0])
def test_attach_and_get_multiple_policy_networks(self):
net1_id = self._network['id']
net2 = db_api.create_object(self.context,
models_v2.Network,
{'name': 'test-network2'})
net2_id = net2['id']
obj = self._create_test_policy()
obj.attach_network(net1_id)
obj.attach_network(net2_id)
networks = obj.get_bound_networks()
self.assertEqual(2, len(networks))
self.assertTrue(net1_id in networks)
self.assertTrue(net2_id in networks)
def test_attach_port_nonexistent_port(self):
obj = self._create_test_policy()
self.assertRaises(n_exc.PortQosBindingNotFound,
obj.attach_port, 'non-existent-port')
def test_attach_network_nonexistent_policy(self):
policy_obj = policy.QosPolicy(self.context, **self.db_obj)
self.assertRaises(n_exc.NetworkQosBindingNotFound,
policy_obj.attach_network, self._network['id'])
def test_attach_port_nonexistent_policy(self):
policy_obj = policy.QosPolicy(self.context, **self.db_obj)
self.assertRaises(n_exc.PortQosBindingNotFound,
policy_obj.attach_port, self._port['id'])
def test_attach_port_get_port_policy(self):
obj = self._create_test_policy()
policy_obj = policy.QosPolicy.get_network_policy(self.context,
self._network['id'])
self.assertIsNone(policy_obj)
# Now attach policy and repeat
obj.attach_port(self._port['id'])
policy_obj = policy.QosPolicy.get_port_policy(self.context,
self._port['id'])
self.assertEqual(obj, policy_obj)
def test_attach_and_get_multiple_policy_ports(self):
port1_id = self._port['id']
port2 = db_api.create_object(self.context, models_v2.Port,
{'tenant_id': 'fake_tenant_id',
'name': 'test-port2',
'network_id': self._network['id'],
'mac_address': 'fake_mac2',
'admin_state_up': True,
'status': 'ACTIVE',
'device_id': 'fake_device',
'device_owner': 'fake_owner'})
port2_id = port2['id']
obj = self._create_test_policy()
obj.attach_port(port1_id)
obj.attach_port(port2_id)
ports = obj.get_bound_ports()
self.assertEqual(2, len(ports))
self.assertTrue(port1_id in ports)
self.assertTrue(port2_id in ports)
def test_attach_port_get_policy_port(self):
obj = self._create_test_policy()
obj.attach_port(self._port['id'])
ports = obj.get_bound_ports()
self.assertEqual(1, len(ports))
self.assertEqual(self._port['id'], ports[0])
def test_detach_port(self):
obj = self._create_test_policy()
obj.attach_port(self._port['id'])
obj.detach_port(self._port['id'])
policy_obj = policy.QosPolicy.get_port_policy(self.context,
self._port['id'])
self.assertIsNone(policy_obj)
def test_detach_network(self):
obj = self._create_test_policy()
obj.attach_network(self._network['id'])
obj.detach_network(self._network['id'])
policy_obj = policy.QosPolicy.get_network_policy(self.context,
self._network['id'])
self.assertIsNone(policy_obj)
def test_detach_port_nonexistent_port(self):
obj = self._create_test_policy()
self.assertRaises(n_exc.PortQosBindingNotFound,
obj.detach_port, 'non-existent-port')
def test_detach_network_nonexistent_network(self):
obj = self._create_test_policy()
self.assertRaises(n_exc.NetworkQosBindingNotFound,
obj.detach_network, 'non-existent-port')
def test_detach_port_nonexistent_policy(self):
policy_obj = policy.QosPolicy(self.context, **self.db_obj)
self.assertRaises(n_exc.PortQosBindingNotFound,
policy_obj.detach_port, self._port['id'])
def test_detach_network_nonexistent_policy(self):
policy_obj = policy.QosPolicy(self.context, **self.db_obj)
self.assertRaises(n_exc.NetworkQosBindingNotFound,
policy_obj.detach_network, self._network['id'])
def test_synthetic_rule_fields(self):
policy_obj, rule_obj = self._create_test_policy_with_bwrule()
policy_obj = policy.QosPolicy.get_object(self.context,
id=policy_obj.id)
self.assertEqual([rule_obj], policy_obj.rules)
def test_get_object_fetches_rules_non_lazily(self):
policy_obj, rule_obj = self._create_test_policy_with_bwrule()
policy_obj = policy.QosPolicy.get_object(self.context,
id=policy_obj.id)
self.assertEqual([rule_obj], policy_obj.rules)
primitive = policy_obj.obj_to_primitive()
self.assertNotEqual([], (primitive['versioned_object.data']['rules']))
def test_to_dict_returns_rules_as_dicts(self):
policy_obj, rule_obj = self._create_test_policy_with_bwrule()
policy_obj = policy.QosPolicy.get_object(self.context,
id=policy_obj.id)
obj_dict = policy_obj.to_dict()
rule_dict = rule_obj.to_dict()
# first make sure that to_dict() is still sane and does not return
# objects
for obj in (rule_dict, obj_dict):
self.assertIsInstance(obj, dict)
self.assertEqual(rule_dict, obj_dict['rules'][0])
def test_shared_default(self):
obj = self._test_class(self.context, **self.db_obj)
self.assertFalse(obj.shared)
def test_delete_not_allowed_if_policy_in_use_by_port(self):
obj = self._create_test_policy()
obj.attach_port(self._port['id'])
self.assertRaises(n_exc.QosPolicyInUse, obj.delete)
obj.detach_port(self._port['id'])
obj.delete()
def test_delete_not_allowed_if_policy_in_use_by_network(self):
obj = self._create_test_policy()
obj.attach_network(self._network['id'])
self.assertRaises(n_exc.QosPolicyInUse, obj.delete)
obj.detach_network(self._network['id'])
obj.delete()
def test_reload_rules_reloads_rules(self):
policy_obj, rule_obj = self._create_test_policy_with_bwrule()
self.assertEqual([], policy_obj.rules)
policy_obj.reload_rules()
self.assertEqual([rule_obj], policy_obj.rules)
def test_get_bound_tenant_ids_returns_set_of_tenant_ids(self):
obj = self._create_test_policy()
obj.attach_port(self._port['id'])
ids = self._test_class.get_bound_tenant_ids(self.context, obj['id'])
self.assertEqual(ids.pop(), self._port['tenant_id'])
self.assertEqual(len(ids), 0)
obj.detach_port(self._port['id'])
obj.delete()
@staticmethod
def _policy_through_version(obj, version):
primitive = obj.obj_to_primitive(target_version=version)
return policy.QosPolicy.clean_obj_from_primitive(primitive)
def _create_test_policy_with_bw_and_dscp(self):
policy_obj, rule_obj_band = self._create_test_policy_with_bwrule()
rule_fields = self.get_random_fields(obj_cls=rule.QosDscpMarkingRule)
rule_fields['qos_policy_id'] = policy_obj.id
rule_obj_dscp = rule.QosDscpMarkingRule(self.context, **rule_fields)
rule_obj_dscp.create()
policy_obj.reload_rules()
return policy_obj, rule_obj_band, rule_obj_dscp
def test_object_version(self):
policy_obj, rule_obj_band, rule_obj_dscp = (
self._create_test_policy_with_bw_and_dscp())
policy_obj_v1_1 = self._policy_through_version(policy_obj, '1.1')
self.assertIn(rule_obj_band, policy_obj_v1_1.rules)
self.assertIn(rule_obj_dscp, policy_obj_v1_1.rules)
self.assertEqual(policy_obj.VERSION, '1.1')
#TODO(davidsha) add testing for object version incrementation
def test_object_version_degradation_1_1_to_1_0(self):
policy_obj, rule_obj_band, rule_obj_dscp = (
self._create_test_policy_with_bw_and_dscp())
policy_obj_v1_0 = self._policy_through_version(policy_obj, '1.0')
self.assertIn(rule_obj_band, policy_obj_v1_0.rules)
self.assertNotIn(rule_obj_dscp, policy_obj_v1_0.rules)
#NOTE(mangelajo): we should not check .VERSION, since that's the
# local version on the class definition
|
apache-2.0
|
jrha/aquilon
|
lib/python2.6/aquilon/worker/templates/base.py
|
2
|
19094
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Any work by the broker to write out (or read in?) templates lives here."""
import os
import logging
from aquilon.exceptions_ import InternalError, IncompleteError
from aquilon.config import Config
from aquilon.worker.locks import lock_queue, CompileKey
from aquilon.worker.processes import write_file, read_file, remove_file
from aquilon.worker.templates.panutils import pan_assign, pan_variable
from mako.lookup import TemplateLookup
from aquilon.worker.formats.formatters import ObjectFormatter
LOGGER = logging.getLogger(__name__)
_config = Config()
TEMPLATE_EXTENSION = _config.get("panc", "template_extension")
class Plenary(object):
template_type = None
""" Specifies the PAN template type to generate """
handlers = {}
""" The handlers dictionary should have an entry for every subclass.
Typically this will be defined immediately after defining the
subclass.
"""
def __init__(self, dbobj=None, logger=LOGGER):
self.config = Config()
self.dbobj = dbobj
self.logger = logger
if self.template_type is None:
raise InternalError("Plenary class %s did not set the template "
"type" % self.__class__.__name__)
# Object templates live under the branch-specific build directory.
# Everything else lives under the common plenary directory.
if self.template_type == "object":
if not dbobj or not hasattr(dbobj, "branch"):
raise InternalError("Plenaries meant to be compiled need a DB "
"object that has a branch; got: %r" % dbobj)
self.dir = "%s/domains/%s/profiles" % (
self.config.get("broker", "builddir"), dbobj.branch.name)
else:
self.dir = self.config.get("broker", "plenarydir")
self.loadpath = None
self.plenary_template = None
self.plenary_core = None
self.new_content = None
# The following attributes are for stash/restore_stash
self.old_content = None
self.old_mtime = None
self.stashed = False
self.removed = False
self.changed = False
def __hash__(self):
"""Since equality is based on dbobj, just hash on it."""
return hash(self.dbobj)
def __eq__(self, other):
"""Plenary objects are equal if they describe the same object.
Technically this should probably also check that the class
matches. There are some odd cases when the plenary stores
extra information, currently ignored.
"""
if self.dbobj is None or other.dbobj is None:
return False
return self.dbobj == other.dbobj
def __str__(self):
"""For debug output."""
return "Plenary(%s)" % self.dbobj
@property
def plenary_directory(self):
""" Directory where the plenary template lives """
if self.loadpath and self.template_type != "object":
return "%s/%s/%s" % (self.dir, self.loadpath, self.plenary_core)
else:
return "%s/%s" % (self.dir, self.plenary_core)
@property
def plenary_file(self):
""" Full absolute path name of the plenary template """
return "%s/%s%s" % (self.plenary_directory, self.plenary_template,
TEMPLATE_EXTENSION)
@property
def plenary_template_name(self):
""" Name of the template as used by PAN, relative to the load path """
if self.plenary_core:
return "%s/%s" % (self.plenary_core, self.plenary_template)
else:
return self.plenary_template
def body(self, lines):
"""
The text of the template. By default, do nothing. A derived class can
override this to describe their own content.
They should do this by appending strings (each string
referring to a separate line of text in the template) into the
array. The array will already contain the appropriate header line for the
template.
"""
pass
def will_change(self):
self.stash()
if not self.new_content:
self.new_content = self._generate_content()
return self.old_content != self.new_content
def get_write_key(self):
if self.will_change():
return self.get_key()
return None
def get_remove_key(self):
"""Return the relevant key.
In many cases it will be more efficient just to create a full
compile lock and bypass this method.
"""
return self.get_key()
def get_key(self):
"""Base implementation assumes a full compile lock."""
return CompileKey(logger=self.logger)
def _generate_content(self):
"""Not meant to be overridden or called directly."""
lines = []
type = self.template_type
if type is not None and type is not "":
type = type + " "
lines.append("%stemplate %s;" % (type, self.plenary_template_name))
lines.append("")
if self.template_type == "object":
if self.loadpath:
pan_variable(lines, "LOADPATH", [self.loadpath])
lines.append("")
pan_assign(lines, "/metadata/template/branch/name",
self.dbobj.branch.name)
pan_assign(lines, "/metadata/template/branch/type",
self.dbobj.branch.branch_type)
if self.dbobj.branch.branch_type == 'sandbox':
pan_assign(lines, "/metadata/template/branch/author",
self.dbobj.sandbox_author.name)
lines.append("")
self.body(lines)
return "\n".join(lines) + "\n"
def write(self, locked=False, content=None):
"""Write out the template.
If the content is unchanged, then the file will not be modified
(preserving the mtime).
Returns the number of files that were written.
If locked is True then it is assumed that error handling happens
higher in the call stack.
"""
if self.template_type == "object" and \
hasattr(self.dbobj, "personality") and \
self.dbobj.personality and \
not self.dbobj.personality.archetype.is_compileable:
return 0
if content is None:
if not self.new_content:
self.new_content = self._generate_content()
content = self.new_content
self.stash()
if self.old_content == content and \
not self.removed and not self.changed:
# optimise out the write (leaving the mtime good for ant)
# if nothing is actually changed
return 0
key = None
try:
if not locked:
key = self.get_write_key()
lock_queue.acquire(key)
if not os.path.exists(self.plenary_directory):
os.makedirs(self.plenary_directory)
write_file(self.plenary_file, content, logger=self.logger)
self.removed = False
if self.old_content != content:
self.changed = True
except Exception, e:
if not locked:
self.restore_stash()
raise e
finally:
if not locked:
lock_queue.release(key)
return 1
def read(self):
return read_file("", self.plenary_file, logger=self.logger)
def remove(self, locked=False):
"""
remove this plenary template
"""
key = None
try:
if not locked:
key = self.get_remove_key()
lock_queue.acquire(key)
self.stash()
remove_file(self.plenary_file, logger=self.logger)
try:
os.removedirs(self.plenary_directory)
except OSError:
pass
self.removed = True
# Most of the error handling routines would restore_stash...
# but there's no need here if the remove failed. :)
finally:
if not locked:
lock_queue.release(key)
return
def cleanup(self, domain, locked=False):
"""
remove all files related to an object template including
any intermediate build files
"""
key = None
try:
if not locked:
key = self.get_remove_key()
lock_queue.acquire(key)
if self.template_type == "object":
# Can't call remove() here because it relies on the new domain.
qdir = self.config.get("broker", "quattordir")
# Only one or the other of .xml/.xml.gz should be there...
# it doesn't hurt to clean up both.
xmldir = os.path.join(qdir, "build", "xml", domain,
self.plenary_core)
xmlfile = os.path.join(xmldir, self.plenary_template + ".xml")
remove_file(xmlfile, logger=self.logger)
xmlgzfile = xmlfile + ".gz"
remove_file(xmlgzfile, logger=self.logger)
# Name used up to and including panc 9.2
depfile = xmlfile + ".dep"
remove_file(depfile, logger=self.logger)
# Name used by panc 9.4 and higher
depfile = os.path.join(xmldir, self.plenary_template + ".dep")
remove_file(depfile, logger=self.logger)
try:
os.removedirs(xmldir)
except OSError:
pass
builddir = self.config.get("broker", "builddir")
maindir = os.path.join(builddir, "domains", domain,
"profiles", self.plenary_core)
mainfile = os.path.join(maindir, self.plenary_template +
TEMPLATE_EXTENSION)
remove_file(mainfile, logger=self.logger)
try:
os.removedirs(maindir)
except OSError:
pass
self.removed = True
else:
# Non-object templates do not depend on the domain, so calling
# remove() is fine
self.remove(locked=True)
except:
if not locked:
self.restore_stash()
raise
finally:
if not locked:
lock_queue.release(key)
def stash(self):
"""Record the state of the plenary to make restoration possible.
This should only be called while holding an appropriate lock.
"""
if self.stashed:
return
try:
self.old_content = self.read()
self.old_mtime = os.stat(self.plenary_file).st_atime
except IOError:
self.old_content = None
self.stashed = True
def restore_stash(self):
"""Restore previous state of plenary.
This should only be called while holding an appropriate lock.
"""
if not self.stashed:
self.logger.info("Attempt to restore plenary '%s' "
"without having saved state." % self.plenary_file)
return
# Should this optimization be in use?
# if not self.changed and not self.removed:
# return
if (self.old_content is None):
self.remove(locked=True)
else:
self.write(locked=True, content=self.old_content)
atime = os.stat(self.plenary_file).st_atime
os.utime(self.plenary_file, (atime, self.old_mtime))
@staticmethod
def get_plenary(dbobj, logger=LOGGER):
if dbobj.__class__ not in Plenary.handlers:
raise InternalError("Class %s does not have a plenary handler" %
dbobj.__class__.__name__)
return Plenary.handlers[dbobj.__class__](dbobj, logger=logger)
def set_logger(self, logger):
self.logger = logger
class PlenaryCollection(object):
"""
A collection of plenary templates, presented behind a single
facade to make them appear as one template to the caller.
One use is for objects that logically have multiple plenary files
to subclass from this and append the real template objects into
self.plenaries.
Another is for commands to use this object directly for its
convenience methods around writing and rolling back plenary
templates.
This object cannot handle cases like a plenary file that
changes location, but it could handle storing a group of plenaries
that need to be removed and a second collection could handle another
group that needs to be written.
"""
def __init__(self, logger=LOGGER):
self.plenaries = []
self.logger = logger
def __hash__(self):
"""The hash just needs to be ballpark (and not clash with __eq__)."""
if self.plenaries:
return hash(self.plenaries[0])
return hash(None)
def __eq__(self, other):
"""Two collections are equal if they have all the same members.
This currently requires that the order by the same. It's good
enough for now - really we just want (for example) the
ServiceInstance plenary collection to evaluate as equal, and
those members will always be defined in the same order.
"""
if len(self.plenaries) != len(other.plenaries):
return False
for (i, j) in zip(self.plenaries, other.plenaries):
if i != j:
return False
return True
def __str__(self):
"""For debug output."""
return "PlenaryCollection(%s)" % ", ".join([str(plenary) for plenary
in self.plenaries])
def __iter__(self):
for plen in self.plenaries:
yield plen
def get_write_key(self):
keylist = []
for plen in self.plenaries:
keylist.append(plen.get_write_key())
return CompileKey.merge(keylist)
def get_remove_key(self):
keylist = []
for plen in self.plenaries:
keylist.append(plen.get_remove_key())
return CompileKey.merge(keylist)
def get_key(self):
# get_key doesn't make any sense for a plenary collection...
raise InternalError("get_key called on PlenaryCollection")
def stash(self):
for plen in self.plenaries:
plen.stash()
def restore_stash(self):
for plen in self.plenaries:
plen.restore_stash()
@property
def object_templates(self):
for plen in self.plenaries:
if isinstance(plen, PlenaryCollection):
for obj in plen.object_templates:
yield obj
elif plen.template_type == 'object':
yield plen.plenary_template_name
def write(self, locked=False, content=None):
# If locked is True, assume error handling happens higher
# in the stack.
total = 0
# Pre-stash all plenaries before attempting to write any
# of them. This way if an error occurs all can go through
# the same restore logic.
self.stash()
key = None
try:
if not locked:
key = self.get_write_key()
lock_queue.acquire(key)
for plen in self.plenaries:
# IncompleteError is almost pointless in this context, but
# it has the nice side effect of not updating the total.
try:
total += plen.write(locked=True, content=content)
except IncompleteError, err:
self.logger.client_info("Warning: %s" % err)
except:
if not locked:
self.restore_stash()
raise
finally:
if not locked:
lock_queue.release(key)
return total
def remove(self, locked=False):
self.stash()
key = None
try:
if not locked:
key = self.get_remove_key()
lock_queue.acquire(key)
for plen in self.plenaries:
plen.remove(locked=True)
except:
if not locked:
self.restore_stash()
raise
finally:
if not locked:
lock_queue.release(key)
def cleanup(self, domain, locked=False):
key = None
try:
if not locked:
key = self.get_remove_key()
lock_queue.acquire(key)
self.stash()
for plen in self.plenaries:
plen.cleanup(domain, locked=True)
except:
if not locked:
self.restore_stash()
raise
finally:
if not locked:
lock_queue.release(key)
def read(self):
# This should never be called, but we put it here
# just in-case, since the Plenary method is inappropriate.
raise InternalError("read called on PlenaryCollection")
def set_logger(self, logger):
for plen in self.plenaries:
plen.set_logger(logger)
def append(self, plenary):
plenary.set_logger(self.logger)
self.plenaries.append(plenary)
def extend(self, iterable):
for plenary in iterable:
plenary.set_logger(self.logger)
self.plenaries.append(plenary)
class TemplateFormatter(ObjectFormatter):
def __init__(self):
super(TemplateFormatter, self).__init__()
self.mako_dir = os.path.join(self.config.get("broker", "srcdir"), "lib", "python2.6",
"aquilon", "worker", "templates", "mako")
self.lookup_raw = TemplateLookup(directories=[os.path.join(self.mako_dir, "raw")],
imports=['from string import rstrip',
'from '
'aquilon.worker.formats.formatters '
'import shift'],
default_filters=['unicode', 'rstrip'])
|
apache-2.0
|
meisterkleister/erpnext
|
erpnext/hr/report/monthly_salary_register/monthly_salary_register.py
|
52
|
4104
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr
from frappe import msgprint, _
def execute(filters=None):
if not filters: filters = {}
salary_slips = get_salary_slips(filters)
columns, earning_types, ded_types = get_columns(salary_slips)
ss_earning_map = get_ss_earning_map(salary_slips)
ss_ded_map = get_ss_ded_map(salary_slips)
data = []
for ss in salary_slips:
row = [ss.employee, ss.employee_name, ss.branch, ss.department, ss.designation,
ss.company, ss.month, ss.leave_withut_pay, ss.payment_days]
for e in earning_types:
row.append(ss_earning_map.get(ss.name, {}).get(e))
row += [ss.arrear_amount, ss.leave_encashment_amount, ss.gross_pay]
for d in ded_types:
row.append(ss_ded_map.get(ss.name, {}).get(d))
row += [ss.total_deduction, ss.net_pay]
data.append(row)
return columns, data
def get_columns(salary_slips):
columns = [
_("Employee") + ":Link/Employee:120", _("Employee Name") + "::140", _("Branch") + ":Link/Branch:120",
_("Department") + ":Link/Department:120", _("Designation") + ":Link/Designation:120",
_("Company") + ":Link/Company:120", _("Month") + "::80", _("Leave Without Pay") + ":Float:130",
_("Payment Days") + ":Float:120"
]
earning_types = frappe.db.sql_list("""select distinct e_type from `tabSalary Slip Earning`
where ifnull(e_modified_amount, 0) != 0 and parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]))
ded_types = frappe.db.sql_list("""select distinct d_type from `tabSalary Slip Deduction`
where ifnull(d_modified_amount, 0) != 0 and parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]))
columns = columns + [(e + ":Currency:120") for e in earning_types] + \
["Arrear Amount:Currency:120", "Leave Encashment Amount:Currency:150",
"Gross Pay:Currency:120"] + [(d + ":Currency:120") for d in ded_types] + \
["Total Deduction:Currency:120", "Net Pay:Currency:120"]
return columns, earning_types, ded_types
def get_salary_slips(filters):
conditions, filters = get_conditions(filters)
salary_slips = frappe.db.sql("""select * from `tabSalary Slip` where docstatus = 1 %s
order by employee, month""" % conditions, filters, as_dict=1)
if not salary_slips:
msgprint(_("No salary slip found for month: ") + cstr(filters.get("month")) +
_(" and year: ") + cstr(filters.get("fiscal_year")), raise_exception=1)
return salary_slips
def get_conditions(filters):
conditions = ""
if filters.get("month"):
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov",
"Dec"].index(filters["month"]) + 1
filters["month"] = month
conditions += " and month = %(month)s"
if filters.get("fiscal_year"): conditions += " and fiscal_year = %(fiscal_year)s"
if filters.get("company"): conditions += " and company = %(company)s"
if filters.get("employee"): conditions += " and employee = %(employee)s"
return conditions, filters
def get_ss_earning_map(salary_slips):
ss_earnings = frappe.db.sql("""select parent, e_type, e_modified_amount
from `tabSalary Slip Earning` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_earning_map = {}
for d in ss_earnings:
ss_earning_map.setdefault(d.parent, frappe._dict()).setdefault(d.e_type, [])
ss_earning_map[d.parent][d.e_type] = flt(d.e_modified_amount)
return ss_earning_map
def get_ss_ded_map(salary_slips):
ss_deductions = frappe.db.sql("""select parent, d_type, d_modified_amount
from `tabSalary Slip Deduction` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_ded_map = {}
for d in ss_deductions:
ss_ded_map.setdefault(d.parent, frappe._dict()).setdefault(d.d_type, [])
ss_ded_map[d.parent][d.d_type] = flt(d.d_modified_amount)
return ss_ded_map
|
agpl-3.0
|
keinstein/mopidy-tunein
|
tests/test_playlists.py
|
1
|
1780
|
from __future__ import unicode_literals
import io
import unittest
from mopidy_tunein import tunein
ASX = b"""<ASX version="3.0">
<TITLE>Example</TITLE>
<ENTRY>
<TITLE>Sample Title</TITLE>
<REF href="file:///tmp/foo" />
</ENTRY>
<ENTRY>
<TITLE>Example title</TITLE>
<REF href="file:///tmp/bar" />
</ENTRY>
<ENTRY>
<TITLE>Other title</TITLE>
<REF href="file:///tmp/baz" />
</ENTRY>
</ASX>
"""
SIMPLE_ASX = b"""<ASX version="3.0">
<ENTRY href="file:///tmp/foo" />
<ENTRY href="file:///tmp/bar" />
<ENTRY href="file:///tmp/baz" />
</ASX>
"""
OLD_ASX = b"""[Reference]
Ref1=file:///tmp/foo
Ref2=file:///tmp/bar
Ref3=file:///tmp/baz
"""
ASF_ASX = b"""[Reference]
Ref1=http://tmp.com/foo-mbr?MSWMExt=.asf
Ref2=mms://tmp.com:80/bar-mbr?mswmext=.asf
Ref3=http://tmp.com/baz
"""
class BaseAsxPlaylistTest(object):
valid = None
parse = staticmethod(tunein.parse_asx)
def test_parse_valid_playlist(self):
uris = list(self.parse(io.BytesIO(self.valid)))
expected = [b'file:///tmp/foo', b'file:///tmp/bar', b'file:///tmp/baz']
self.assertEqual(uris, expected)
class AsxPlaylistTest(BaseAsxPlaylistTest, unittest.TestCase):
valid = ASX
class AsxSimplePlaylistTest(BaseAsxPlaylistTest, unittest.TestCase):
valid = SIMPLE_ASX
class AsxOldPlaylistTest(BaseAsxPlaylistTest, unittest.TestCase):
valid = OLD_ASX
class PlaylistTest(unittest.TestCase):
parse = staticmethod(tunein.parse_asx)
def test_parse_asf_playlist(self):
uris = list(self.parse(io.BytesIO(ASF_ASX)))
expected = [b'mms://tmp.com/foo-mbr?mswmext=.asf',
b'mms://tmp.com:80/bar-mbr?mswmext=.asf',
b'http://tmp.com/baz']
self.assertEqual(uris, expected)
|
apache-2.0
|
gsmcmullin/libsbp
|
python/sbp/client/drivers/base_driver.py
|
9
|
1367
|
# Copyright (C) 2015 Swift Navigation Inc.
# Contact: Mark Fine <[email protected]>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
class BaseDriver(object):
"""
BaseDriver
The :class:`BaseDriver` class wraps IO sources of SBP messages and provides
context management.
Parameters
----------
handle : port
Stream of bytes to read from and write to.
"""
def __init__(self, handle):
self.handle = handle
def __enter__(self):
self.flush()
return self
def __exit__(self, *args):
self.flush()
self.close()
def read(self, size):
"""
Read wrapper.
Parameters
----------
size : int
Number of bytes to read.
"""
return self.handle.read(size)
def write(self, s):
"""
Write wrapper.
Parameters
----------
s : bytes
Bytes to write
"""
return self.handle.write(s)
def flush(self):
"""
Flush wrapper.
"""
self.handle.flush()
def close(self):
"""
Close wrapper.
"""
self.handle.close()
|
lgpl-3.0
|
andreparames/odoo
|
addons/auth_openid/__init__.py
|
443
|
1090
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_users
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
ndardenne/pymatgen
|
pymatgen/analysis/pourbaix/maker.py
|
2
|
12786
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import logging
import numpy as np
import itertools
from itertools import chain
from scipy.spatial import ConvexHull
from pymatgen.analysis.pourbaix.entry import MultiEntry, \
ion_or_solid_comp_object
from pymatgen.core.periodic_table import Element
from pymatgen.core.composition import Composition
"""
Module containing analysis classes which compute a pourbaix diagram given a
target compound/element.
"""
from six.moves import zip
__author__ = "Sai Jayaraman"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.0"
__maintainer__ = "Sai Jayaraman"
__email__ = "[email protected]"
__status__ = "Development"
__date__ = "Nov 1, 2012"
logger = logging.getLogger(__name__)
PREFAC = 0.0591
MU_H2O = -2.4583
class PourbaixDiagram(object):
"""
Class to create a Pourbaix diagram from entries
Args:
entries: Entries list containing both Solids and Ions
comp_dict: Dictionary of compositions
"""
def __init__(self, entries, comp_dict=None):
self._solid_entries = list()
self._ion_entries = list()
for entry in entries:
if entry.phase_type == "Solid":
self._solid_entries.append(entry)
elif entry.phase_type == "Ion":
self._ion_entries.append(entry)
else:
raise Exception("Incorrect Phase type - needs to be \
Pourbaix entry of phase type Ion/Solid")
if len(self._ion_entries) == 0:
raise Exception("No ion phase. Equilibrium between ion/solid "
"is required to make a Pourbaix Diagram")
self._unprocessed_entries = self._solid_entries + self._ion_entries
self._elt_comp = comp_dict
if comp_dict:
self._multielement = True
pbx_elements = set()
for comp in comp_dict.keys():
for el in [el for el in
ion_or_solid_comp_object(comp).elements
if el not in ["H", "O"]]:
pbx_elements.add(el.symbol)
self.pourbaix_elements = pbx_elements
w = [comp_dict[key] for key in comp_dict]
A = []
for comp in comp_dict:
comp_obj = ion_or_solid_comp_object(comp)
Ai = []
for elt in self.pourbaix_elements:
Ai.append(comp_obj[elt])
A.append(Ai)
A = np.array(A).T.astype(float)
w = np.array(w)
A /= np.dot([a.sum() for a in A], w)
x = np.linalg.solve(A, w)
self._elt_comp = dict(zip(self.pourbaix_elements, x))
else:
self._multielement = False
self.pourbaix_elements = [el.symbol
for el in entries[0].composition.elements
if el.symbol not in ["H", "O"]]
self._elt_comp = {self.pourbaix_elements[0]: 1.0}
self._make_pourbaixdiagram()
def _create_conv_hull_data(self):
"""
Make data conducive to convex hull generator.
"""
if self._multielement:
self._all_entries = self._process_multielement_entries()
else:
self._all_entries = self._unprocessed_entries
entries_to_process = list()
for entry in self._all_entries:
entry.scale(entry.normalization_factor)
entry.correction += (- MU_H2O * entry.nH2O + entry.conc_term)
entries_to_process.append(entry)
self._qhull_entries = entries_to_process
return self._process_conv_hull_data(entries_to_process)
def _process_conv_hull_data(self, entries_to_process):
"""
From a sequence of ion+solid entries, generate the necessary data
for generation of the convex hull.
"""
data = []
for entry in entries_to_process:
row = [entry.npH, entry.nPhi, entry.g0]
data.append(row)
temp = sorted(zip(data, self._qhull_entries),
key=lambda x: x[0][2])
[data, self._qhull_entries] = list(zip(*temp))
return data
def _process_multielement_entries(self):
"""
Create entries for multi-element Pourbaix construction
"""
N = len(self._elt_comp) # No. of elements
entries = self._unprocessed_entries
el_list = self._elt_comp.keys()
comp_list = [self._elt_comp[el] for el in el_list]
list_of_entries = list()
for j in range(1, N + 1):
list_of_entries += list(itertools.combinations(
list(range(len(entries))), j))
processed_entries = list()
for entry_list in list_of_entries:
# Check if all elements in composition list are present in
# entry_list
if not (set([Element(el) for el in el_list]).issubset(
set(list(chain.from_iterable([entries[i].composition.keys()
for i in entry_list]))))):
continue
if len(entry_list) == 1:
# If only one entry in entry_list, then check if the composition matches with the set composition.
entry = entries[entry_list[0]]
dict_of_non_oh = dict(zip([key for key in entry.composition.keys() if key.symbol not in ["O", "H"]],
[entry.composition[key] for key in [key for key in entry.composition.keys() if key.symbol not in ["O", "H"]]]))
if Composition(dict(zip(self._elt_comp.keys(), [self._elt_comp[key] / min([self._elt_comp[key] for key in self._elt_comp.keys()])
for key in self._elt_comp.keys()]))).reduced_formula ==\
Composition(dict(zip(dict_of_non_oh.keys(), [dict_of_non_oh[el] / min([dict_of_non_oh[key] for key in dict_of_non_oh.keys()])
for el in dict_of_non_oh.keys()]))).reduced_formula:
processed_entries.append(MultiEntry([entry], [1.0]))
continue
A = [[0.0] * (len(entry_list) - 1) for _ in range(len(entry_list) - 1)]
multi_entries = [entries[j] for j in entry_list]
entry0 = entries[entry_list[0]]
comp0 = entry0.composition
if entry0.phase_type == "Solid":
red_fac = comp0.get_reduced_composition_and_factor()[1]
else:
red_fac = 1.0
sum_nel = sum([comp0[el] / red_fac for el in el_list])
b = [comp0[Element(el_list[i])] / red_fac - comp_list[i] * sum_nel
for i in range(1, len(entry_list))]
for j in range(1, len(entry_list)):
entry = entries[entry_list[j]]
comp = entry.composition
if entry.phase_type == "Solid":
red_fac = comp.get_reduced_composition_and_factor()[1]
else:
red_fac = 1.0
sum_nel = sum([comp[el] / red_fac for el in el_list])
for i in range(1, len(entry_list)):
el = el_list[i]
A[i-1][j-1] = comp_list[i] * sum_nel -\
comp[Element(el)] / red_fac
try:
weights = np.linalg.solve(np.array(A), np.array(b))
except np.linalg.linalg.LinAlgError as err:
if 'Singular matrix' in err.message:
continue
else:
raise Exception("Unknown Error message!")
if not(np.all(weights > 0.0)):
continue
weights = list(weights)
weights.insert(0, 1.0)
super_entry = MultiEntry(multi_entries, weights)
processed_entries.append(super_entry)
return processed_entries
def _make_pourbaixdiagram(self):
"""
Calculates entries on the convex hull in the dual space.
"""
stable_entries = set()
self._qhull_data = self._create_conv_hull_data()
dim = len(self._qhull_data[0])
if len(self._qhull_data) < dim:
raise Exception("Can only do elements with at-least 3 entries"
" for now")
if len(self._qhull_data) == dim:
self._facets = [list(range(dim))]
else:
facets_hull = np.array(ConvexHull(self._qhull_data).simplices)
self._facets = np.sort(np.array(facets_hull))
logger.debug("Final facets are\n{}".format(self._facets))
logger.debug("Removing vertical facets...")
vert_facets_removed = list()
for facet in self._facets:
facetmatrix = np.zeros((len(facet), len(facet)))
count = 0
for vertex in facet:
facetmatrix[count] = np.array(self._qhull_data[vertex])
facetmatrix[count, dim - 1] = 1
count += 1
if abs(np.linalg.det(facetmatrix)) > 1e-8:
vert_facets_removed.append(facet)
else:
logger.debug("Removing vertical facet : {}".format(facet))
logger.debug("Removing UCH facets by eliminating normal.z >0 ...")
# Find center of hull
vertices = set()
for facet in vert_facets_removed:
for vertex in facet:
vertices.add(vertex)
c = [0.0, 0.0, 0.0]
c[0] = np.average([self._qhull_data[vertex][0]
for vertex in vertices])
c[1] = np.average([self._qhull_data[vertex][1]
for vertex in vertices])
c[2] = np.average([self._qhull_data[vertex][2]
for vertex in vertices])
# Shift origin to c
new_qhull_data = np.array(self._qhull_data)
for vertex in vertices:
new_qhull_data[vertex] -= c
# For each facet, find normal n, find dot product with P, and
# check if this is -ve
final_facets = list()
for facet in vert_facets_removed:
a = new_qhull_data[facet[1]] - new_qhull_data[facet[0]]
b = new_qhull_data[facet[2]] - new_qhull_data[facet[0]]
n = np.cross(a, b)
val = np.dot(n, new_qhull_data[facet[0]])
if val < 0:
n = -n
if n[2] <= 0:
final_facets.append(facet)
else:
logger.debug("Removing UCH facet : {}".format(facet))
final_facets = np.array(final_facets)
self._facets = final_facets
stable_vertices = set()
for facet in self._facets:
for vertex in facet:
stable_vertices.add(vertex)
stable_entries.add(self._qhull_entries[vertex])
self._stable_entries = stable_entries
self._vertices = stable_vertices
@property
def facets(self):
"""
Facets of the convex hull in the form of [[1,2,3],[4,5,6]...]
"""
return self._facets
@property
def qhull_data(self):
"""
Data used in the convex hull operation. This is essentially a matrix of
composition data and energy per atom values created from qhull_entries.
"""
return self._qhull_data
@property
def qhull_entries(self):
"""
Return qhull entries
"""
return self._qhull_entries
@property
def stable_entries(self):
"""
Returns the stable entries in the Pourbaix diagram.
"""
return list(self._stable_entries)
@property
def unstable_entries(self):
"""
Returns all unstable entries in the Pourbaix diagram
"""
return [e for e in self.qhull_entries if e not in self.stable_entries]
@property
def all_entries(self):
"""
Return all entries
"""
return self._all_entries
@property
def vertices(self):
"""
Return vertices of the convex hull
"""
return self._vertices
@property
def unprocessed_entries(self):
"""
Return unprocessed entries
"""
return self._unprocessed_entries
|
mit
|
cgrebeld/pymel
|
pymel/util/enum.py
|
1
|
16042
|
# -*- coding: utf-8 -*-
# enum.py
# Part of enum, a package providing enumerated types for Python.
#
# Copyright © 2007 Ben Finney
# This is free software; you may copy, modify and/or distribute this work
# under the terms of the GNU General Public License, version 2 or later
# or, at your option, the terms of the Python license.
"""Robust enumerated type support in Python
This package provides a module for robust enumerations in Python.
An enumeration object is created with a sequence of string arguments
to the Enum() constructor:
>>> from enum import Enum
>>> Colours = Enum('Colours', ['red', 'blue', 'green'])
>>> Weekdays = Enum('Weekdays', ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'])
The return value is an immutable sequence object with a value for each
of the string arguments. Each value is also available as an attribute
named from the corresponding string argument:
>>> pizza_night = Weekdays[4]
>>> shirt_colour = Colours.green
The values are constants that can be compared with values from
the same enumeration, as well as with integers or strings; comparison with other
values will invoke Python's fallback comparisons:
>>> pizza_night == Weekdays.fri
True
>>> shirt_colour > Colours.red
True
>>> shirt_colour == "green"
True
Each value from an enumeration exports its sequence index
as an integer, and can be coerced to a simple string matching the
original arguments used to create the enumeration:
>>> str(pizza_night)
'fri'
>>> shirt_colour.index
2
"""
__author_name__ = "Ben Finney"
__author_email__ = "[email protected]"
#__author__ = "%s <%s>" % (__author_name__, __author_email__) # confuses epydoc
__date__ = "2007-01-24"
__copyright__ = "Copyright © %s %s" % (
__date__.split('-')[0], __author_name__
)
__license__ = "Choice of GPL or Python license"
__url__ = "http://cheeseshop.python.org/pypi/enum/"
__version__ = "0.4.3"
import operator
class EnumException(Exception):
""" Base class for all exceptions in this module """
def __init__(self):
if self.__class__ is EnumException:
raise NotImplementedError, \
"%s is an abstract class for subclassing" % self.__class__
class EnumEmptyError(AssertionError, EnumException):
""" Raised when attempting to create an empty enumeration """
def __str__(self):
return "Enumerations cannot be empty"
class EnumBadKeyError(TypeError, EnumException):
""" Raised when creating an Enum with non-string keys """
def __init__(self, key):
self.key = key
def __str__(self):
return "Enumeration keys must be strings: %s" % (self.key,)
class EnumImmutableError(TypeError, EnumException):
""" Raised when attempting to modify an Enum """
def __init__(self, *args):
self.args = args
def __str__(self):
return "Enumeration does not allow modification"
class EnumValue(object):
""" A specific value of an enumerated type """
def __init__(self, enumtype, index, key, doc=None):
""" Set up a new instance """
self.__enumtype = enumtype
self.__index = index
self.__key = key
self.__doc = doc
def __get_enumtype(self):
return self.__enumtype
enumtype = property(__get_enumtype)
def __get_key(self):
return self.__key
key = property(__get_key)
def __str__(self):
return "%s" % (self.key)
def __int__(self):
return self.index
def __get_index(self):
return self.__index
index = property(__get_index)
def __repr__(self):
if self.__doc:
return "EnumValue(%r, %r, %r, %r, %r)" % (
self.__enumtype._name,
self.__index,
self.__key,
self.__doc,
)
else:
return "EnumValue(%r, %r, %r)" % (
self.__enumtype._name,
self.__index,
self.__key,
)
def __hash__(self):
return hash(self.__index)
# def __cmp__(self, other):
# result = NotImplemented
# self_type = self.enumtype
# try:
# assert self_type == other.enumtype
# result = cmp(self.index, other.index)
# except (AssertionError, AttributeError):
# result = NotImplemented
#
# return result
def __cmp__(self, other):
result = NotImplemented
self_type = self.enumtype
try:
assert self_type == other.enumtype
result = cmp(self.index, other.index)
except (AssertionError, AttributeError):
if isinstance(other, basestring):
result=cmp(self.key, other)
elif isinstance(other, int):
result=cmp(self.index, other)
else:
result = NotImplemented
return result
class Enum(object):
""" Enumerated type """
def __init__(self, name, keys, **kwargs):
""" Create an enumeration instance """
if not keys:
raise EnumEmptyError()
if operator.isMappingType(keys):
reverse = dict( [ (v,k) for k,v in keys.items() ] )
keygen = [ ( v, reverse[v]) for v in sorted(reverse.keys()) ]
values = {}
else:
keygen = enumerate( keys )
values = [None] * len(keys)
value_type= kwargs.get('value_type', EnumValue)
#keys = tuple(keys)
docs = {}
keyDict = {}
for val, key in keygen:
#print val, key
kwargs = {}
if isinstance(key, tuple) or isinstance(key, list) and len(key)==2:
key, doc = key
docs[val]=doc
kwargs['doc'] = doc
value = value_type(self, val, key, **kwargs)
values[val] = value
keyDict[key] = val
try:
super(Enum, self).__setattr__(key, value)
except TypeError, e:
raise EnumBadKeyError(key)
super(Enum, self).__setattr__('_keys', keyDict)
super(Enum, self).__setattr__('_values', values)
super(Enum, self).__setattr__('_docs', docs)
super(Enum, self).__setattr__('_name', name)
def __repr__(self):
return '%s(\n%s)' % (self.__class__.__name__, ',\n'.join([ repr(v) for v in self.values()]))
def __str__(self):
return '%s%s' % (self.__class__.__name__, self.keys())
def __setattr__(self, name, value):
raise EnumImmutableError(name)
def __delattr__(self, name):
raise EnumImmutableError(name)
def __len__(self):
return len(self._values)
def __getitem__(self, index):
return self._values[index]
def __setitem__(self, index, value):
raise EnumImmutableError(index)
def __delitem__(self, index):
raise EnumImmutableError(index)
def __iter__(self):
return iter(self._values)
def __contains__(self, value):
is_member = False
if isinstance(value, basestring):
is_member = (value in self._keys)
else:
try:
is_member = (value in self._values)
except EnumValueCompareError, e:
is_member = False
return is_member
def getIndex(self, key):
"""
get an index value from a key. this method always returns an index. if a valid index is passed instead of a key, the index will
be returned unchanged. this is useful when you need an index, but are not certain whether you are starting with a key or an index.
>>> units = Enum('units', ['invalid', 'inches', 'feet', 'yards', 'miles', 'millimeters', 'centimeters', 'kilometers', 'meters'])
>>> units.getIndex('inches')
1
>>> units.getIndex(3)
3
>>> units.getIndex('hectares')
Traceback (most recent call last):
...
ValueError: invalid enumerator key: 'hectares'
>>> units.getIndex(10)
Traceback (most recent call last):
...
ValueError: invalid enumerator index: 10
"""
if isinstance(key, int):
# got a potential index : checking if it's valid
if key in self._values:
return key
else:
raise ValueError, "invalid enumerator index: %r" % key
else:
# got a key: retrieving index
try:
return self._keys[str(key)]
except:
raise ValueError, "invalid enumerator key: %r" % key
def getKey(self, index):
"""
get a key value from an index. this method always returns a key. if a valid key is passed instead of an index, the key will
be returned unchanged. this is useful when you need a key, but are not certain whether you are starting with a key or an index.
>>> units = Enum('units', ['invalid', 'inches', 'feet', 'yards', 'miles', 'millimeters', 'centimeters', 'kilometers', 'meters'])
>>> units.getKey(2)
'feet'
>>> units.getKey('inches')
'inches'
>>> units.getKey(10)
Traceback (most recent call last):
...
ValueError: invalid enumerator index: 10
>>> units.getKey('hectares')
Traceback (most recent call last):
...
ValueError: invalid enumerator key: 'hectares'
"""
if isinstance(index, int):
# got an index: retrieving key
try:
return self._values[index].key
except:
raise ValueError, "invalid enumerator index: %r" % index
else:
# got a potential key : checking if it's valid
if str(index) in self._keys:
return index
else:
raise ValueError, "invalid enumerator key: %r" % index
def values(self):
"return a list of `EnumValue`s"
if operator.isMappingType(self._values):
return tuple([ self._values[k] for k in sorted(self._values.keys()) ])
else:
return self._values
def keys(self):
"return a list of keys as strings"
if operator.isMappingType(self._values):
return tuple([ self._values[k].key for k in sorted(self._values.keys()) ])
else:
return tuple([ v.key for v in self._values ])
import utilitytypes
class EnumDict(utilitytypes.EquivalencePairs):
"""
This class provides a dictionary type for storing enumerations. Keys are string labels, while
values are enumerated integers.
To instantiate, pass a sequence of string arguments to the EnumDict() constructor:
>>> from enum import EnumDict
>>> Colours = EnumDict(['red', 'blue', 'green'])
>>> Weekdays = EnumDict(['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'])
>>> Weekdays
{'fri': 4, 'mon': 0, 'sat': 5, 'sun': 6, 'thu': 3, 'tue': 1, 'wed': 2}
Alternately, a dictionary of label-value pairs can be provided:
>>> Numbers = EnumDict({'one': 1, 'two': 2, 'hundred' : 100, 'thousand' : 1000 } )
To convert from one representation to another, just use normal dictionary retrieval, it
works in either direction:
>>> Weekdays[4]
'fri'
>>> Weekdays['fri']
4
If you need a particular representation, but don't know what you're starting from ( for
example, a value that was passed as an argument ) you can use `EnumDict.key` or
`EnumDict.value`:
>>> Weekdays.value(3)
3
>>> Weekdays.value('thu')
3
>>> Weekdays.key(2)
'wed'
>>> Weekdays.key('wed')
'wed'
"""
def __init__(self, keys, **kwargs):
""" Create an enumeration instance """
if not keys:
raise EnumEmptyError()
if operator.isMappingType(keys):
items = keys.items()
if isinstance(items[0][0],int):
byKey = dict( (k,v) for v,k in items )
else:
byKey = keys
else:
byKey = dict( (k,v) for v,k in enumerate( keys ) )
super(EnumDict,self).__init__(byKey)
# for key, value in byKey.items():
# try:
# super(EnumDict, self).__setattr__(key, value)
# except TypeError, e:
# raise EnumBadKeyError(key)
#
# super(EnumDict, self).__setattr__('_reverse', {})
# self.update(byKey)
#
# def __setattr__(self, name, value):
# raise EnumImmutableError(name)
#
# def __delattr__(self, name):
# raise EnumImmutableError(name)
#
# def __setitem__(self, index, value):
# raise EnumImmutableError(index)
#
# def __delitem__(self, index):
# raise EnumImmutableError(index)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self)
def value(self, key):
"""
get an index value from a key. this method always returns an index. if a valid index is passed instead of a key, the index will
be returned unchanged. this is useful when you need an index, but are not certain whether you are starting with a key or an index.
>>> units = EnumDict('units', ['invalid', 'inches', 'feet', 'yards', 'miles', 'millimeters', 'centimeters', 'kilometers', 'meters'])
>>> units.value('inches')
1
>>> units.value(3)
3
>>> units.value('hectares')
Traceback (most recent call last):
...
ValueError: invalid enumerator key: 'hectares'
>>> units.value(10)
Traceback (most recent call last):
...
ValueError: invalid enumerator index: 10
"""
if isinstance(key, int):
# got a potential index : checking if it's valid
if key in self.values():
return key
else:
raise ValueError, "invalid enumerator value: %r" % key
else:
# got a key: retrieving index
try:
return dict.__getitem__(self, key)
except KeyError:
raise ValueError, "invalid enumerator key: %r" % key
def key(self, index):
"""
get a key value from an index. this method always returns a key. if a valid key is passed instead of an index, the key will
be returned unchanged. this is useful when you need a key, but are not certain whether you are starting with a key or an index.
>>> units = EnumDict('units', ['invalid', 'inches', 'feet', 'yards', 'miles', 'millimeters', 'centimeters', 'kilometers', 'meters'])
>>> units.key(2)
'feet'
>>> units.key('inches')
'inches'
>>> units.key(10)
Traceback (most recent call last):
...
ValueError: invalid enumerator index: 10
>>> units.key('hectares')
Traceback (most recent call last):
...
ValueError: invalid enumerator key: 'hectares'
"""
if isinstance(index, int):
# got an index: retrieving key
try:
return self._reverse[index]
except KeyError:
raise ValueError( "invalid enumerator value: %r" % index )
else:
# got a potential key : checking if it's valid
if index in dict.keys(self):
return index
else:
raise ValueError( "invalid enumerator key: %r" % index)
def values(self):
"return a list of ordered integer values"
return sorted(dict.values(self))
def keys(self):
"return a list of keys as strings ordered by their enumerator value"
return [ self._reverse[v] for v in self.values() ]
|
bsd-3-clause
|
fhaoquan/kbengine
|
kbe/src/lib/python/Lib/encodings/ptcp154.py
|
219
|
14015
|
""" Python Character Mapping Codec generated from 'PTCP154.txt' with gencodec.py.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='ptcp154',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE (DEL)
'\u0496' # 0x80 -> CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER
'\u0492' # 0x81 -> CYRILLIC CAPITAL LETTER GHE WITH STROKE
'\u04ee' # 0x82 -> CYRILLIC CAPITAL LETTER U WITH MACRON
'\u0493' # 0x83 -> CYRILLIC SMALL LETTER GHE WITH STROKE
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u04b6' # 0x86 -> CYRILLIC CAPITAL LETTER CHE WITH DESCENDER
'\u04ae' # 0x87 -> CYRILLIC CAPITAL LETTER STRAIGHT U
'\u04b2' # 0x88 -> CYRILLIC CAPITAL LETTER HA WITH DESCENDER
'\u04af' # 0x89 -> CYRILLIC SMALL LETTER STRAIGHT U
'\u04a0' # 0x8A -> CYRILLIC CAPITAL LETTER BASHKIR KA
'\u04e2' # 0x8B -> CYRILLIC CAPITAL LETTER I WITH MACRON
'\u04a2' # 0x8C -> CYRILLIC CAPITAL LETTER EN WITH DESCENDER
'\u049a' # 0x8D -> CYRILLIC CAPITAL LETTER KA WITH DESCENDER
'\u04ba' # 0x8E -> CYRILLIC CAPITAL LETTER SHHA
'\u04b8' # 0x8F -> CYRILLIC CAPITAL LETTER CHE WITH VERTICAL STROKE
'\u0497' # 0x90 -> CYRILLIC SMALL LETTER ZHE WITH DESCENDER
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\u04b3' # 0x98 -> CYRILLIC SMALL LETTER HA WITH DESCENDER
'\u04b7' # 0x99 -> CYRILLIC SMALL LETTER CHE WITH DESCENDER
'\u04a1' # 0x9A -> CYRILLIC SMALL LETTER BASHKIR KA
'\u04e3' # 0x9B -> CYRILLIC SMALL LETTER I WITH MACRON
'\u04a3' # 0x9C -> CYRILLIC SMALL LETTER EN WITH DESCENDER
'\u049b' # 0x9D -> CYRILLIC SMALL LETTER KA WITH DESCENDER
'\u04bb' # 0x9E -> CYRILLIC SMALL LETTER SHHA
'\u04b9' # 0x9F -> CYRILLIC SMALL LETTER CHE WITH VERTICAL STROKE
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u040e' # 0xA1 -> CYRILLIC CAPITAL LETTER SHORT U (Byelorussian)
'\u045e' # 0xA2 -> CYRILLIC SMALL LETTER SHORT U (Byelorussian)
'\u0408' # 0xA3 -> CYRILLIC CAPITAL LETTER JE
'\u04e8' # 0xA4 -> CYRILLIC CAPITAL LETTER BARRED O
'\u0498' # 0xA5 -> CYRILLIC CAPITAL LETTER ZE WITH DESCENDER
'\u04b0' # 0xA6 -> CYRILLIC CAPITAL LETTER STRAIGHT U WITH STROKE
'\xa7' # 0xA7 -> SECTION SIGN
'\u0401' # 0xA8 -> CYRILLIC CAPITAL LETTER IO
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u04d8' # 0xAA -> CYRILLIC CAPITAL LETTER SCHWA
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\u04ef' # 0xAD -> CYRILLIC SMALL LETTER U WITH MACRON
'\xae' # 0xAE -> REGISTERED SIGN
'\u049c' # 0xAF -> CYRILLIC CAPITAL LETTER KA WITH VERTICAL STROKE
'\xb0' # 0xB0 -> DEGREE SIGN
'\u04b1' # 0xB1 -> CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE
'\u0406' # 0xB2 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0456' # 0xB3 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0499' # 0xB4 -> CYRILLIC SMALL LETTER ZE WITH DESCENDER
'\u04e9' # 0xB5 -> CYRILLIC SMALL LETTER BARRED O
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\u0451' # 0xB8 -> CYRILLIC SMALL LETTER IO
'\u2116' # 0xB9 -> NUMERO SIGN
'\u04d9' # 0xBA -> CYRILLIC SMALL LETTER SCHWA
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u0458' # 0xBC -> CYRILLIC SMALL LETTER JE
'\u04aa' # 0xBD -> CYRILLIC CAPITAL LETTER ES WITH DESCENDER
'\u04ab' # 0xBE -> CYRILLIC SMALL LETTER ES WITH DESCENDER
'\u049d' # 0xBF -> CYRILLIC SMALL LETTER KA WITH VERTICAL STROKE
'\u0410' # 0xC0 -> CYRILLIC CAPITAL LETTER A
'\u0411' # 0xC1 -> CYRILLIC CAPITAL LETTER BE
'\u0412' # 0xC2 -> CYRILLIC CAPITAL LETTER VE
'\u0413' # 0xC3 -> CYRILLIC CAPITAL LETTER GHE
'\u0414' # 0xC4 -> CYRILLIC CAPITAL LETTER DE
'\u0415' # 0xC5 -> CYRILLIC CAPITAL LETTER IE
'\u0416' # 0xC6 -> CYRILLIC CAPITAL LETTER ZHE
'\u0417' # 0xC7 -> CYRILLIC CAPITAL LETTER ZE
'\u0418' # 0xC8 -> CYRILLIC CAPITAL LETTER I
'\u0419' # 0xC9 -> CYRILLIC CAPITAL LETTER SHORT I
'\u041a' # 0xCA -> CYRILLIC CAPITAL LETTER KA
'\u041b' # 0xCB -> CYRILLIC CAPITAL LETTER EL
'\u041c' # 0xCC -> CYRILLIC CAPITAL LETTER EM
'\u041d' # 0xCD -> CYRILLIC CAPITAL LETTER EN
'\u041e' # 0xCE -> CYRILLIC CAPITAL LETTER O
'\u041f' # 0xCF -> CYRILLIC CAPITAL LETTER PE
'\u0420' # 0xD0 -> CYRILLIC CAPITAL LETTER ER
'\u0421' # 0xD1 -> CYRILLIC CAPITAL LETTER ES
'\u0422' # 0xD2 -> CYRILLIC CAPITAL LETTER TE
'\u0423' # 0xD3 -> CYRILLIC CAPITAL LETTER U
'\u0424' # 0xD4 -> CYRILLIC CAPITAL LETTER EF
'\u0425' # 0xD5 -> CYRILLIC CAPITAL LETTER HA
'\u0426' # 0xD6 -> CYRILLIC CAPITAL LETTER TSE
'\u0427' # 0xD7 -> CYRILLIC CAPITAL LETTER CHE
'\u0428' # 0xD8 -> CYRILLIC CAPITAL LETTER SHA
'\u0429' # 0xD9 -> CYRILLIC CAPITAL LETTER SHCHA
'\u042a' # 0xDA -> CYRILLIC CAPITAL LETTER HARD SIGN
'\u042b' # 0xDB -> CYRILLIC CAPITAL LETTER YERU
'\u042c' # 0xDC -> CYRILLIC CAPITAL LETTER SOFT SIGN
'\u042d' # 0xDD -> CYRILLIC CAPITAL LETTER E
'\u042e' # 0xDE -> CYRILLIC CAPITAL LETTER YU
'\u042f' # 0xDF -> CYRILLIC CAPITAL LETTER YA
'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
'\u044f' # 0xFF -> CYRILLIC SMALL LETTER YA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
lgpl-3.0
|
frodopwns/Flask-User-starter-app
|
app/core/models.py
|
5
|
2664
|
# Copyright 2014 SolidBuilds.com. All rights reserved
#
# Authors: Ling Thio <[email protected]>
from flask_user import UserMixin
from flask_user.forms import RegisterForm
from flask_wtf import Form
from wtforms import StringField, SubmitField, validators
from app import db
# Define the User data model. Make sure to add the flask_user.UserMixin !!
class User(db.Model, UserMixin):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
# User authentication information (required for Flask-User)
email = db.Column(db.Unicode(255), nullable=False, server_default=u'', unique=True)
confirmed_at = db.Column(db.DateTime())
password = db.Column(db.String(255), nullable=False, server_default='')
reset_password_token = db.Column(db.String(100), nullable=False, server_default='')
active = db.Column(db.Boolean(), nullable=False, server_default='0')
# User information
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='0')
first_name = db.Column(db.Unicode(50), nullable=False, server_default=u'')
last_name = db.Column(db.Unicode(50), nullable=False, server_default=u'')
# Relationships
roles = db.relationship('Role', secondary='users_roles',
backref=db.backref('users', lazy='dynamic'))
# Define the Role data model
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(50), nullable=False, server_default=u'', unique=True) # for @roles_accepted()
label = db.Column(db.Unicode(255), server_default=u'') # for display purposes
# Define the UserRoles association model
class UsersRoles(db.Model):
__tablename__ = 'users_roles'
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('users.id', ondelete='CASCADE'))
role_id = db.Column(db.Integer(), db.ForeignKey('roles.id', ondelete='CASCADE'))
# Define the User registration form
# It augments the Flask-User RegisterForm with additional fields
class MyRegisterForm(RegisterForm):
first_name = StringField('First name', validators=[
validators.DataRequired('First name is required')])
last_name = StringField('Last name', validators=[
validators.DataRequired('Last name is required')])
# Define the User profile form
class UserProfileForm(Form):
first_name = StringField('First name', validators=[
validators.DataRequired('First name is required')])
last_name = StringField('Last name', validators=[
validators.DataRequired('Last name is required')])
submit = SubmitField('Save')
|
bsd-2-clause
|
hamish2014/FreeCAD_drawing_dimensioning
|
drawingDimensioning/textAdd.py
|
1
|
3008
|
# This Python file uses the following encoding: utf-8
from drawingDimensioning.py3_helpers import unicode, encode_if_py2
from drawingDimensioning.command import *
d = DimensioningCommand()
def textSVG( x, y, text='text', rotation=0.0, textRenderer_addText= defaultTextRenderer):
return '<g> %s </g>' % textRenderer_addText(x,y,text,rotation=rotation)
d.registerPreference( 'textRenderer_addText', ['inherit','5', 0], 'text properties (AddText)', kind='font' )
class text_widget:
def valueChanged( self, arg1):
d.text = arg1
def generateWidget( self, dimensioningProcess ):
self.lineEdit = QtGui.QLineEdit()
self.lineEdit.setText('text')
d.text = 'text'
self.lineEdit.textChanged.connect(self.valueChanged)
return self.lineEdit
def add_properties_to_dimension_object( self, obj ):
obj.addProperty("App::PropertyString", 'text', 'Parameters')
obj.text = encode_if_py2(d.text)
def get_values_from_dimension_object( self, obj, KWs ):
KWs['text'] = obj.text #should be unicode
d.dialogWidgets.append( text_widget() )
class rotation_widget:
def valueChanged( self, arg1):
d.rotation = arg1
def generateWidget( self, dimensioningProcess ):
self.spinbox = QtGui.QDoubleSpinBox()
self.spinbox.setValue(0)
d.rotation = 0
self.spinbox.setMinimum( -180 )
self.spinbox.setMaximum( 180 )
self.spinbox.setDecimals( 1 )
self.spinbox.setSingleStep( 5 )
self.spinbox.setSuffix(unicode('°','utf8'))
self.spinbox.valueChanged.connect(self.valueChanged)
return DimensioningTaskDialog_generate_row_hbox('rotation', self.spinbox)
def add_properties_to_dimension_object( self, obj ):
obj.addProperty("App::PropertyAngle", 'rotation', 'Parameters')
obj.rotation = d.rotation
def get_values_from_dimension_object( self, obj, KWs ):
KWs['rotation'] = obj.rotation #should be unicode
d.dialogWidgets.append( rotation_widget() )
def addText_preview(mouseX, mouseY):
return textSVG(mouseX, mouseY, d.text, d.rotation, **d.dimensionConstructorKWs )
def addText_clickHandler( x, y ):
d.selections = [ PlacementClick( x, y) ]
return 'createDimension:%s' % findUnusedObjectName('text')
class Proxy_textAdd( Proxy_DimensionObject_prototype ):
def dimensionProcess( self ):
return d
d.ProxyClass = Proxy_textAdd
d.proxy_svgFun = textSVG
class AddText:
def Activated(self):
V = getDrawingPageGUIVars()
d.activate( V, dialogTitle='Add Text', dialogIconPath= ':/dd/icons/textAdd.svg', endFunction=self.Activated )
previewDimension.initializePreview( d, addText_preview, addText_clickHandler)
def GetResources(self):
return {
'Pixmap' : ':/dd/icons/textAdd.svg' ,
'MenuText': 'Add text to drawing',
'ToolTip': 'Add text to drawing'
}
FreeCADGui.addCommand('dd_addText', AddText())
|
lgpl-2.1
|
AlexStarov/Shop
|
applications/delivery/management/commands/processing_delivery_send_general.py
|
1
|
15632
|
# -*- coding: utf-8 -*-
__author__ = 'AlexStarov'
from django.core.management.base import BaseCommand
class Command(BaseCommand, ):
from optparse import make_option
option_list = BaseCommand.option_list + (
make_option('--id', '--pk', '--delivery_id', '--delivery_pk',
action='store', type='int', dest='delivery_pk',
help=''),
make_option('--t', '--delivery_test', '--test',
action='store_true', dest='delivery_test',
help=''),
make_option('--g', '--delivery_general', '--general',
action='store_true', dest='delivery_test',
help=''),
)
#self.verbosity = int(options.get('verbosity'))
#def add_arguments(self, parser):
# parser.add_argument('delivery_id', nargs='+', type=int)
def handle(self, *args, **options):
from applications.delivery.models import Delivery
try:
deliveryes = Delivery.objects.filter(delivery_test=False,
send_test=True,
send_general=False,
type__in=[1, 2, 3, ], )
except Delivery.DoesNotExist:
deliveryes = None
else:
from applications.delivery.models import EmailMiddleDelivery
for delivery in deliveryes:
# print 'delivery', delivery
try:
EmailMiddleDelivery.objects.\
get(delivery=delivery,
send_test=False,
send_general=True,
updated_at__lte=delivery.updated_at, )
except:
""" Создаем ссылочку на отсылку рассылки """
email_middle_delivery = EmailMiddleDelivery()
email_middle_delivery.delivery = delivery
email_middle_delivery.delivery_test_send = False
email_middle_delivery.delivery_send = True
email_middle_delivery.save()
""" Закрываем отсылку теста в самой рассылке """
delivery.send_general = True
delivery.save()
""" Отсылаем тестовое письмо """
from django.utils.html import strip_tags
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.yandex.ru'
EMAIL_PORT = 587
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = ''
from django.core.mail import get_connection
backend = get_connection(backend='django.core.mail.backends.smtp.EmailBackend',
host=EMAIL_HOST,
port=EMAIL_PORT,
username=EMAIL_HOST_USER,
password=EMAIL_HOST_PASSWORD,
use_tls=EMAIL_USE_TLS,
fail_silently=False, )
from django.core.mail import EmailMultiAlternatives
from proj.settings import Email_MANAGER
from applications.authModel.models import Email
""" Создаем указатели на E-Mail адреса рассылки """
try:
emails = Email.objects.filter(bad_email=False, )
except Email.DoesNotExist:
emails = None
""" Здесь нужно помудрить с коммитом """
from applications.delivery.models import EmailForDelivery
from applications.delivery.utils import parsing
i = 0
time = 0
for real_email in emails:
i += 1
# if i < 125:
# continue
email = EmailForDelivery.objects.create(delivery=email_middle_delivery,
email=real_email, )
""" Отсылка """
msg = EmailMultiAlternatives(subject=delivery.subject,
body=strip_tags(parsing(value=delivery.html,
key=email.key, ), ),
from_email='[email protected]',
to=[real_email.email, ],
connection=backend, )
msg.attach_alternative(content=parsing(value=delivery.html,
key=email.key, ),
mimetype="text/html", )
msg.content_subtype = "html"
try:
msg.send(fail_silently=False, )
except Exception as e:
msg = EmailMultiAlternatives(subject='Error for subject: %s' % delivery.subject,
body='Error: %s - E-Mail: %s - real_email.pk: %d' % (e, real_email.email, real_email.pk, ),
from_email='[email protected]',
to=['[email protected]', ],
connection=backend, )
msg.send(fail_silently=True, )
else:
print 'i: ', i, 'Pk: ', real_email.pk, ' - ', real_email.email
from random import randrange
time1 = randrange(6, 12, )
time2 = randrange(6, 12, )
time += time1 + time2
print 'Time1: ', time1, ' Time2: ', time2, ' Time all: ', time1+time2, ' average time: ', time/i
from time import sleep
sleep(time1, )
print 'Next'
sleep(time2, )
def hernya2():
try:
deliveryes = Delivery.objects.filter(delivery_test=False, )
except Delivery.DoesNotExist:
deliveryes = None
else:
for delivery in deliveryes:
try:
aaa=EmailMiddleDelivery.objects.\
get(delivery=delivery, updated_at__lte=delivery.updated_at, )
print aaa, delivery.updated_at
except:
email_middle_delivery = EmailMiddleDelivery()
email_middle_delivery.delivery = delivery
email_middle_delivery.delivery_test_send = False
email_middle_delivery.delivery_send = True
email_middle_delivery.save()
from django.utils.html import strip_tags
from django.core.mail import get_connection
backend = get_connection(backend='django.core.mail.backends.smtp.EmailBackend',
fail_silently=False, )
from django.core.mail import EmailMultiAlternatives
from proj.settings import Email_MANAGER
msg = EmailMultiAlternatives(subject=delivery.subject,
body=strip_tags(delivery.html, ),
from_email=u'[email protected]',
to=[real_email.email, ],
connection=backend, )
msg.attach_alternative(content=delivery.html,
mimetype="text/html", )
msg.content_subtype = "html"
print real_email.email
#try:
# # msg.send(fail_silently=False, )
#except Exception as inst:
# print type(inst, )
# print inst.args
# print inst
# else:
# email.send
# email.save()
#try:
# """ Берем 10 E-Mail адресов на которые мы еще не отсылали данную рассылку """
# emails = EmailForDelivery.objects.filter(delivery=email_middle_delivery,
# send=False, )[10]
#except EmailForDelivery.DoesNotExist:
# """ E-Mail адреса в этой рассылке закончились """
# emails = None
#else:
# emails = ', '.join(emails, )
# """ Отсылаем E-Mail на 10 адресатов """
def hernya():
from datetime import datetime
print datetime.now()
from applications.product.models import Category
try:
action_category = Category.objects.get(url=u'акции', )
except Category.DoesNotExist:
action_category = None
from applications.discount.models import Action
action_active = Action.objects.active()
if action_active:
print 'Action - ACTIVE:', action_active
for action in action_active:
products_of_action = action.product_in_action.all()
print 'All products:', products_of_action
# print action
"""
Если акция с автостартом,
то мы еЁ стартуем.
"""
if action.auto_start:
""" Включаем галочку 'Учавствует в акции' всем продуктам которые внесены в акцию
исключая продукты 'отсутсвующие на складе' """
products_of_action = action.product_in_action.exclude(is_availability=4, )
if len(products_of_action, ) > 0:
print 'Product auto_start:', products_of_action
for product in products_of_action:
""" Помечает товар как учавствующий в акции """
product.in_action = True
""" Добавляем категорию 'Акция' в товар """
product.category.add(action_category, )
product.save()
""" Удаляем товары учавствующие в активной акции но при этом 'отсутсвующие на складе' """
products_remove_from_action = action.product_in_action.exclude(is_availability__lt=4, )
if len(products_of_action, ) > 0:
print 'Product auto_start:', products_of_action
for product in products_remove_from_action:
""" Помечает товар как учавствующий в акции """
product.in_action = False
""" Добавляем категорию 'Акция' в товар """
product.category.remove(action_category, )
product.save()
action_not_active = Action.objects.not_active()
if action_not_active:
print 'Action - NOT ACTIVE:', action_not_active
for action in action_not_active:
products_of_action = action.product_in_action.all()
print 'All products:', products_of_action
# print action
"""
Если акция с авто окончанием,
то заканчиваем еЁ.
"""
if action.auto_end:
products_of_action = action.product_in_action.in_action()
if len(products_of_action, ) > 0:
print 'Product auto_end:', products_of_action
for product in products_of_action:
print 'Del product from Action: ', product
"""
Помечает товар как не учавствующий в акции
"""
product.category.remove(action_category, )
product.in_action = False
# """
# Меняем местами нынешнюю и акционные цены местами
# """
# price = product.price
# product.price = product.regular_price
# if action.auto_del_action_price:
# product.regular_price = 0
# else:
# product.regular_price = price
if action.auto_del_action_from_product:
product.action.remove(action, )
product.save()
if action.auto_del:
action.deleted = True
action.save()
# from applications.product.models import Product
# Product.objects.filter(is_availability=2, ).update(is_availability=5, )
# Product.objects.filter(is_availability=3, ).update(is_availability=2, )
# Product.objects.filter(is_availability=5, ).update(is_availability=3, )
""" Убираем галочку 'участвует в акции' всем продуктам у которых она почемуто установлена,
но при этом отсутвует хоть какая то акция """
from applications.product.models import Product
products = Product.objects.filter(in_action=True, action=None, ).update(in_action=False, )
print 'Товары удаленные из акции по причине вывода их из акции: ', products
""" Убираем галочку 'участвует в акции' всем продуктам которые отсутсвуют на складе """
products = Product.objects.filter(in_action=True, is_availability=4, ).update(in_action=False, )
print 'Товары удаленные из акции по причине отсутсвия на складе: ', products
""" Делаем активной акционную категорию, если есть хоть один акционный товар """
all_actions_products = action_category.products.all()
if len(all_actions_products) != 0 and not action_category.is_active:
action_category.is_active = True
action_category.save()
elif len(all_actions_products) == 0 and action_category.is_active:
action_category.is_active = False
action_category.save()
|
apache-2.0
|
kybriainfotech/iSocioCRM
|
addons/product_extended/__init__.py
|
374
|
1068
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product_extended
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
jiangzhuo/kbengine
|
kbe/res/scripts/common/Lib/test/test_urllibnet.py
|
75
|
8933
|
import unittest
from test import support
import contextlib
import socket
import urllib.request
import sys
import os
import email.message
import time
support.requires('network')
class URLTimeoutTest(unittest.TestCase):
# XXX this test doesn't seem to test anything useful.
TIMEOUT = 30.0
def setUp(self):
socket.setdefaulttimeout(self.TIMEOUT)
def tearDown(self):
socket.setdefaulttimeout(None)
def testURLread(self):
with support.transient_internet("www.example.com"):
f = urllib.request.urlopen("http://www.example.com/")
x = f.read()
class urlopenNetworkTests(unittest.TestCase):
"""Tests urllib.reqest.urlopen using the network.
These tests are not exhaustive. Assuming that testing using files does a
good job overall of some of the basic interface features. There are no
tests exercising the optional 'data' and 'proxies' arguments. No tests
for transparent redirection have been written.
setUp is not used for always constructing a connection to
http://www.example.com/ since there a few tests that don't use that address
and making a connection is expensive enough to warrant minimizing unneeded
connections.
"""
@contextlib.contextmanager
def urlopen(self, *args, **kwargs):
resource = args[0]
with support.transient_internet(resource):
r = urllib.request.urlopen(*args, **kwargs)
try:
yield r
finally:
r.close()
def test_basic(self):
# Simple test expected to pass.
with self.urlopen("http://www.example.com/") as open_url:
for attr in ("read", "readline", "readlines", "fileno", "close",
"info", "geturl"):
self.assertTrue(hasattr(open_url, attr), "object returned from "
"urlopen lacks the %s attribute" % attr)
self.assertTrue(open_url.read(), "calling 'read' failed")
def test_readlines(self):
# Test both readline and readlines.
with self.urlopen("http://www.example.com/") as open_url:
self.assertIsInstance(open_url.readline(), bytes,
"readline did not return a string")
self.assertIsInstance(open_url.readlines(), list,
"readlines did not return a list")
def test_info(self):
# Test 'info'.
with self.urlopen("http://www.example.com/") as open_url:
info_obj = open_url.info()
self.assertIsInstance(info_obj, email.message.Message,
"object returned by 'info' is not an "
"instance of email.message.Message")
self.assertEqual(info_obj.get_content_subtype(), "html")
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
URL = "http://www.example.com/"
with self.urlopen(URL) as open_url:
gotten_url = open_url.geturl()
self.assertEqual(gotten_url, URL)
def test_getcode(self):
# test getcode() with the fancy opener to get 404 error codes
URL = "http://www.example.com/XXXinvalidXXX"
with support.transient_internet(URL):
open_url = urllib.request.FancyURLopener().open(URL)
try:
code = open_url.getcode()
finally:
open_url.close()
self.assertEqual(code, 404)
# On Windows, socket handles are not file descriptors; this
# test can't pass on Windows.
@unittest.skipIf(sys.platform in ('win32',), 'not appropriate for Windows')
def test_fileno(self):
# Make sure fd returned by fileno is valid.
with self.urlopen("http://www.google.com/", timeout=None) as open_url:
fd = open_url.fileno()
with os.fdopen(fd, 'rb') as f:
self.assertTrue(f.read(), "reading from file created using fd "
"returned by fileno failed")
def test_bad_address(self):
# Make sure proper exception is raised when connecting to a bogus
# address.
bogus_domain = "sadflkjsasf.i.nvali.d"
try:
socket.gethostbyname(bogus_domain)
except OSError:
# socket.gaierror is too narrow, since getaddrinfo() may also
# fail with EAI_SYSTEM and ETIMEDOUT (seen on Ubuntu 13.04),
# i.e. Python's TimeoutError.
pass
else:
# This happens with some overzealous DNS providers such as OpenDNS
self.skipTest("%r should not resolve for test to work" % bogus_domain)
failure_explanation = ('opening an invalid URL did not raise OSError; '
'can be caused by a broken DNS server '
'(e.g. returns 404 or hijacks page)')
with self.assertRaises(OSError, msg=failure_explanation):
# SF patch 809915: In Sep 2003, VeriSign started highjacking
# invalid .com and .net addresses to boost traffic to their own
# site. This test started failing then. One hopes the .invalid
# domain will be spared to serve its defined purpose.
urllib.request.urlopen("http://sadflkjsasf.i.nvali.d/")
class urlretrieveNetworkTests(unittest.TestCase):
"""Tests urllib.request.urlretrieve using the network."""
@contextlib.contextmanager
def urlretrieve(self, *args, **kwargs):
resource = args[0]
with support.transient_internet(resource):
file_location, info = urllib.request.urlretrieve(*args, **kwargs)
try:
yield file_location, info
finally:
support.unlink(file_location)
def test_basic(self):
# Test basic functionality.
with self.urlretrieve("http://www.example.com/") as (file_location, info):
self.assertTrue(os.path.exists(file_location), "file location returned by"
" urlretrieve is not a valid path")
with open(file_location, 'rb') as f:
self.assertTrue(f.read(), "reading from the file location returned"
" by urlretrieve failed")
def test_specified_path(self):
# Make sure that specifying the location of the file to write to works.
with self.urlretrieve("http://www.example.com/",
support.TESTFN) as (file_location, info):
self.assertEqual(file_location, support.TESTFN)
self.assertTrue(os.path.exists(file_location))
with open(file_location, 'rb') as f:
self.assertTrue(f.read(), "reading from temporary file failed")
def test_header(self):
# Make sure header returned as 2nd value from urlretrieve is good.
with self.urlretrieve("http://www.example.com/") as (file_location, info):
self.assertIsInstance(info, email.message.Message,
"info is not an instance of email.message.Message")
logo = "http://www.example.com/"
def test_data_header(self):
with self.urlretrieve(self.logo) as (file_location, fileheaders):
datevalue = fileheaders.get('Date')
dateformat = '%a, %d %b %Y %H:%M:%S GMT'
try:
time.strptime(datevalue, dateformat)
except ValueError:
self.fail('Date value not in %r format', dateformat)
def test_reporthook(self):
records = []
def recording_reporthook(blocks, block_size, total_size):
records.append((blocks, block_size, total_size))
with self.urlretrieve(self.logo, reporthook=recording_reporthook) as (
file_location, fileheaders):
expected_size = int(fileheaders['Content-Length'])
records_repr = repr(records) # For use in error messages.
self.assertGreater(len(records), 1, msg="There should always be two "
"calls; the first one before the transfer starts.")
self.assertEqual(records[0][0], 0)
self.assertGreater(records[0][1], 0,
msg="block size can't be 0 in %s" % records_repr)
self.assertEqual(records[0][2], expected_size)
self.assertEqual(records[-1][2], expected_size)
block_sizes = {block_size for _, block_size, _ in records}
self.assertEqual({records[0][1]}, block_sizes,
msg="block sizes in %s must be equal" % records_repr)
self.assertGreaterEqual(records[-1][0]*records[0][1], expected_size,
msg="number of blocks * block size must be"
" >= total size in %s" % records_repr)
if __name__ == "__main__":
unittest.main()
|
lgpl-3.0
|
szezso/T.E.S.C.O-kernel_vivo
|
scripts/tracing/draw_functrace.py
|
14676
|
3560
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
gpl-2.0
|
petosegan/scikit-learn
|
sklearn/datasets/olivetti_faces.py
|
198
|
4688
|
"""Modified Olivetti faces dataset.
The original database was available from (now defunct)
http://www.uk.research.att.com/facedatabase.html
The version retrieved here comes in MATLAB format from the personal
web page of Sam Roweis:
http://www.cs.nyu.edu/~roweis/
There are ten different images of each of 40 distinct subjects. For some
subjects, the images were taken at different times, varying the lighting,
facial expressions (open / closed eyes, smiling / not smiling) and facial
details (glasses / no glasses). All the images were taken against a dark
homogeneous background with the subjects in an upright, frontal position (with
tolerance for some side movement).
The original dataset consisted of 92 x 112, while the Roweis version
consists of 64x64 images.
"""
# Copyright (c) 2011 David Warde-Farley <wardefar at iro dot umontreal dot ca>
# License: BSD 3 clause
from io import BytesIO
from os.path import join, exists
from os import makedirs
try:
# Python 2
import urllib2
urlopen = urllib2.urlopen
except ImportError:
# Python 3
import urllib.request
urlopen = urllib.request.urlopen
import numpy as np
from scipy.io.matlab import loadmat
from .base import get_data_home, Bunch
from ..utils import check_random_state
from ..externals import joblib
DATA_URL = "http://cs.nyu.edu/~roweis/data/olivettifaces.mat"
TARGET_FILENAME = "olivetti.pkz"
# Grab the module-level docstring to use as a description of the
# dataset
MODULE_DOCS = __doc__
def fetch_olivetti_faces(data_home=None, shuffle=False, random_state=0,
download_if_missing=True):
"""Loader for the Olivetti faces data-set from AT&T.
Read more in the :ref:`User Guide <olivetti_faces>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
shuffle : boolean, optional
If True the order of the dataset is shuffled to avoid having
images of the same person grouped.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : optional, integer or RandomState object
The seed or the random number generator used to shuffle the
data.
Returns
-------
An object with the following attributes:
data : numpy array of shape (400, 4096)
Each row corresponds to a ravelled face image of original size 64 x 64 pixels.
images : numpy array of shape (400, 64, 64)
Each row is a face image corresponding to one of the 40 subjects of the dataset.
target : numpy array of shape (400, )
Labels associated to each face image. Those labels are ranging from
0-39 and correspond to the Subject IDs.
DESCR : string
Description of the modified Olivetti Faces Dataset.
Notes
------
This dataset consists of 10 pictures each of 40 individuals. The original
database was available from (now defunct)
http://www.uk.research.att.com/facedatabase.html
The version retrieved here comes in MATLAB format from the personal
web page of Sam Roweis:
http://www.cs.nyu.edu/~roweis/
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
if not exists(join(data_home, TARGET_FILENAME)):
print('downloading Olivetti faces from %s to %s'
% (DATA_URL, data_home))
fhandle = urlopen(DATA_URL)
buf = BytesIO(fhandle.read())
mfile = loadmat(buf)
faces = mfile['faces'].T.copy()
joblib.dump(faces, join(data_home, TARGET_FILENAME), compress=6)
del mfile
else:
faces = joblib.load(join(data_home, TARGET_FILENAME))
# We want floating point data, but float32 is enough (there is only
# one byte of precision in the original uint8s anyway)
faces = np.float32(faces)
faces = faces - faces.min()
faces /= faces.max()
faces = faces.reshape((400, 64, 64)).transpose(0, 2, 1)
# 10 images per class, 400 images total, each class is contiguous.
target = np.array([i // 10 for i in range(400)])
if shuffle:
random_state = check_random_state(random_state)
order = random_state.permutation(len(faces))
faces = faces[order]
target = target[order]
return Bunch(data=faces.reshape(len(faces), -1),
images=faces,
target=target,
DESCR=MODULE_DOCS)
|
bsd-3-clause
|
harshilasu/LinkurApp
|
y/google-cloud-sdk/lib/requests/packages/chardet/escprober.py
|
2936
|
3187
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel,
ISO2022KRSMModel)
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .compat import wrap_ord
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM:
continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM:
continue
if not codingSM.active:
continue
codingState = codingSM.next_state(wrap_ord(c))
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8
return self.get_state()
return self.get_state()
|
gpl-3.0
|
vishwa91/ewaste_printer
|
tools/arm/arm-none-eabi/share/gdb/python/gdb/prompt.py
|
137
|
4210
|
# Extended prompt utilities.
# Copyright (C) 2011-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Extended prompt library functions."""
import gdb
import os
def _prompt_pwd(ignore):
"The current working directory."
return os.getcwdu()
def _prompt_object_attr(func, what, attr, nattr):
"""Internal worker for fetching GDB attributes."""
if attr is None:
attr = nattr
try:
obj = func()
except gdb.error:
return '<no %s>' % what
if hasattr(obj, attr):
result = getattr(obj, attr)
if callable(result):
result = result()
return result
else:
return '<no attribute %s on current %s>' % (attr, what)
def _prompt_frame(attr):
"The selected frame; an argument names a frame parameter."
return _prompt_object_attr(gdb.selected_frame, 'frame', attr, 'name')
def _prompt_thread(attr):
"The selected thread; an argument names a thread parameter."
return _prompt_object_attr(gdb.selected_thread, 'thread', attr, 'num')
def _prompt_version(attr):
"The version of GDB."
return gdb.VERSION
def _prompt_esc(attr):
"The ESC character."
return '\033'
def _prompt_bs(attr):
"A backslash."
return '\\'
def _prompt_n(attr):
"A newline."
return '\n'
def _prompt_r(attr):
"A carriage return."
return '\r'
def _prompt_param(attr):
"A parameter's value; the argument names the parameter."
return gdb.parameter(attr)
def _prompt_noprint_begin(attr):
"Begins a sequence of non-printing characters."
return '\001'
def _prompt_noprint_end(attr):
"Ends a sequence of non-printing characters."
return '\002'
prompt_substitutions = {
'e': _prompt_esc,
'\\': _prompt_bs,
'n': _prompt_n,
'r': _prompt_r,
'v': _prompt_version,
'w': _prompt_pwd,
'f': _prompt_frame,
't': _prompt_thread,
'p': _prompt_param,
'[': _prompt_noprint_begin,
']': _prompt_noprint_end
}
def prompt_help():
"""Generate help dynamically from the __doc__ strings of attribute
functions."""
result = ''
keys = sorted (prompt_substitutions.keys())
for key in keys:
result += ' \\%s\t%s\n' % (key, prompt_substitutions[key].__doc__)
result += """
A substitution can be used in a simple form, like "\\f".
An argument can also be passed to it, like "\\f{name}".
The meaning of the argument depends on the particular substitution."""
return result
def substitute_prompt(prompt):
"Perform substitutions on PROMPT."
result = ''
plen = len(prompt)
i = 0
while i < plen:
if prompt[i] == '\\':
i = i + 1
if i >= plen:
break
cmdch = prompt[i]
if cmdch in prompt_substitutions:
cmd = prompt_substitutions[cmdch]
if i + 1 < plen and prompt[i + 1] == '{':
j = i + 1
while j < plen and prompt[j] != '}':
j = j + 1
# Just ignore formatting errors.
if j >= plen or prompt[j] != '}':
arg = None
else:
arg = prompt[i + 2 : j]
i = j
else:
arg = None
result += str(cmd(arg))
else:
# Unrecognized escapes are turned into the escaped
# character itself.
result += prompt[i]
else:
result += prompt[i]
i = i + 1
return result
|
mit
|
edx/xqueue-watcher
|
xqueue_watcher/jailedgrader.py
|
1
|
11710
|
"""
An implementation of a grader that uses codejail to sandbox submission execution.
"""
import codecs
import os
import sys
import imp
import json
import random
import gettext
from path import Path
import six
import codejail
from grader_support.gradelib import EndTest
from grader_support.graderutil import LANGUAGE
import grader_support
from .grader import Grader
TIMEOUT = 1
def path_to_six():
"""
Return the full path to six.py
"""
if any(six.__file__.endswith(suffix) for suffix in ('.pyc', '.pyo')):
# __file__ points to the compiled bytecode in python 2
return Path(six.__file__[:-1])
else:
# __file__ points to the .py file in python 3
return Path(six.__file__)
SUPPORT_FILES = [
Path(grader_support.__file__).dirname(),
path_to_six(),
]
def truncate(out):
"""
Truncate test output that's too long. This is per-test.
"""
TOO_LONG = 5000 # 5K bytes seems like enough for a single test.
if len(out) > TOO_LONG:
out = out[:TOO_LONG] + "...OUTPUT TRUNCATED"
return out
def prepend_coding(code):
"""
Add a coding line--makes submissions with inline unicode not
explode (as long as they're utf8, I guess)
"""
return '# coding: utf8\n' + code
class JailedGrader(Grader):
"""
A grader implementation that uses codejail.
Instantiate it with grader_root="path/to/graders"
and optionally codejail_python="python name" (the name that you used to configure codejail)
"""
def __init__(self, *args, **kwargs):
self.codejail_python = kwargs.pop("codejail_python", "python")
super().__init__(*args, **kwargs)
self.locale_dir = self.grader_root / "conf" / "locale"
self.fork_per_item = False # it's probably safe not to fork
# EDUCATOR-3368: OpenBLAS library is allowed to allocate 1 thread
os.environ["OPENBLAS_NUM_THREADS"] = "1"
def _enable_i18n(self, language):
trans = gettext.translation('graders', localedir=self.locale_dir, fallback=True, languages=[language])
trans.install(names=None)
def _run(self, grader_path, thecode, seed):
files = SUPPORT_FILES + [grader_path]
if self.locale_dir.exists():
files.append(self.locale_dir)
extra_files = [('submission.py', thecode.encode('utf-8'))]
argv = ["-m", "grader_support.run", Path(grader_path).basename(), 'submission.py', seed]
r = codejail.jail_code.jail_code(self.codejail_python, files=files, extra_files=extra_files, argv=argv)
return r
def grade(self, grader_path, grader_config, submission):
if type(submission) != str:
self.log.warning("Submission is NOT unicode")
results = {
'errors': [],
'tests': [],
'correct': False,
'score': 0,
}
# There are some cases where the course team would like to accept a
# student submission but not process the student code. Some examples are
# cases where the problem would require dependencies that are difficult
# or impractical to install in a sandbox or if the complexity of the
# solution would cause the runtime of the student code to exceed what is
# possible in the sandbox.
# skip_grader is a flag in the grader config which is a boolean. If it
# is set to true on a problem then it will always show that the
# submission is correct and give the student a full score for the
# problem.
if grader_config.get('skip_grader', False):
results['correct'] = True
results['score'] = 1
self.log.debug('Skipping the grader.')
return results
self._enable_i18n(grader_config.get("lang", LANGUAGE))
answer_path = Path(grader_path).dirname() / 'answer.py'
with open(answer_path, 'rb') as f:
answer = f.read().decode('utf-8')
# Import the grader, straight from the original file. (It probably isn't in
# sys.path, and we may be in a long running gunicorn process, so we don't
# want to add stuff to sys.path either.)
grader_module = imp.load_source("grader_module", str(grader_path))
grader = grader_module.grader
# Preprocess for grader-specified errors
errors = grader.input_errors(submission)
if errors != []:
results['errors'].extend(errors)
# Don't run tests if there were errors
return results
# Add a unicode encoding declaration.
processed_answer = prepend_coding(grader.preprocess(answer))
processed_submission = prepend_coding(grader.preprocess(submission))
# Same seed for both runs
seed = str(random.randint(0, 20000))
# Run the official answer, to get the expected output.
expected_ok = False
expected_exc = None
try:
# If we want a factor of two speedup for now: trust the staff solution to
# avoid hitting the sandbox. (change run to run_trusted)
expected_outputs = None # in case run_trusted raises an exception.
expected_outputs = self._run(grader_path, processed_answer, seed).stdout
if expected_outputs:
expected = json.loads(expected_outputs.decode('utf-8'))
expected_ok = True
except Exception:
expected_exc = sys.exc_info()
else:
# We just ran the official answer, nothing should have gone wrong, so check
# everything, and note it as bad if anything is wrong.
if expected_ok:
if expected['exceptions'] \
or expected['grader']['status'] != 'ok' \
or expected['submission']['status'] != 'ok':
expected_ok = False
if not expected_ok:
# We couldn't run the official answer properly, bail out, but don't show
# details to the student, since none of it is their code.
results['errors'].append(_('There was a problem running the staff solution (Staff debug: L364)'))
self.log.error("Couldn't run staff solution. grader = %s, output: %r",
grader_path, expected_outputs, exc_info=expected_exc)
return results
# The expected code ran fine, go ahead and run the student submission.
actual_ok = False
actual_exc = None
try:
# Do NOT trust the student solution (in production).
actual_outputs = None # in case run raises an exception.
actual_outputs = self._run(grader_path, processed_submission, seed).stdout
if actual_outputs:
actual = json.loads(actual_outputs.decode('utf-8'))
actual_ok = True
else:
results['errors'].append(_("There was a problem running your solution (Staff debug: L379)."))
except Exception:
actual_exc = sys.exc_info()
else:
if actual_ok and actual['grader']['status'] == 'ok':
if actual['submission']['status'] != 'ok':
# The grader ran OK, but the student code didn't, so show the student
# details of what went wrong. There is probably an exception to show.
shown_error = actual['submission']['exception'] or _('There was an error thrown while running your solution.')
results['errors'].append(shown_error)
else:
# The grader didn't run well, we are going to bail.
actual_ok = False
# If something went wrong, then don't continue
if not actual_ok:
results['errors'].append(_("We couldn't run your solution (Staff debug: L397)."))
self.log.error("Couldn't run student solution. grader = %s, output: %r",
grader_path, actual_outputs, exc_info=actual_exc)
return results
# Compare actual and expected through the grader tests, but only if we haven't
# already found a problem.
corrects = []
if not results['errors']:
expected_results = expected['results']
actual_results = actual['results']
if len(expected_results) != len(actual_results):
results['errors'].append(_('Something went wrong: different numbers of '
'tests ran for your code and for our reference code.'))
return results
for test, exp, act in zip(grader.tests(), expected_results, actual_results):
exp_short_desc, exp_long_desc, exp_output = exp
act_short_desc, act_long_desc, act_output = act
if exp_short_desc != act_short_desc:
results['errors'].append(_("Something went wrong: tests don't match up."))
# TODO: don't give up so easily?
return results
# Truncate here--we don't want to send long output back, and also don't want to
# confuse students by comparing the full output but sending back truncated output.
act_output = truncate(act_output)
try:
correct = test.compare_results(exp_output, act_output)
except EndTest as e:
# Allows a grader's compare_results function to raise an EndTest exception
# (defined in gradelib.py). This enables the checker to print out an error
# message to the student, which will be appended to the end of stdout.
if e is not None:
act_output += '\n'
error_msg = _("ERROR")
act_output += "*** {error_msg}: {error_detail} ***".format(
error_msg=error_msg,
error_detail=e
)
correct = False
corrects.append(correct)
if not grader_config.get("hide_output", False):
results['tests'].append((exp_short_desc, exp_long_desc,
correct, exp_output, act_output))
# If there were no tests run, then there was probably an error, so it's incorrect
n = len(corrects)
results['correct'] = all(corrects) and n > 0
results['score'] = float(sum(corrects))/n if n > 0 else 0
if n == 0 and len(results['errors']) == 0:
results['errors'] = [
_("There was a problem while running your code (Staff debug: L450). "
"Please contact the course staff for assistance.")
]
return results
def main(args): # pragma: no cover
"""
Prints a json list:
[ ("Test description", "value") ]
TODO: what about multi-file submission?
"""
import logging
from pprint import pprint
from codejail.jail_code import configure
import getpass
logging.basicConfig(level=logging.DEBUG)
if len(args) != 2:
return
configure("python", sys.executable, user=getpass.getuser())
(grader_path, submission_path) = args
with open(submission_path) as f:
submission = f.read().decode('utf-8')
grader_config = {"lang": "eo"}
grader_path = path(grader_path).abspath()
g = JailedGrader(grader_root=grader_path.dirname().parent.parent)
pprint(g.grade(grader_path, grader_config, submission))
if __name__ == '__main__': # pragma: no cover
main(sys.argv[1:])
|
agpl-3.0
|
df-5/skybot
|
plugins/util/timesince.py
|
33
|
4117
|
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
#ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
def timesince(d, now=None):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
chunks = (
(60 * 60 * 24 * 365, ('year', 'years')),
(60 * 60 * 24 * 30, ('month', 'months')),
(60 * 60 * 24 * 7, ('week', 'weeks')),
(60 * 60 * 24, ('day', 'days')),
(60 * 60, ('hour', 'hours')),
(60, ('minute', 'minutes'))
)
# Convert int or float (unix epoch) to datetime.datetime for comparison
if isinstance(d, int) or isinstance(d, float):
d = datetime.datetime.fromtimestamp(d)
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
now = datetime.datetime.now()
# ignore microsecond part of 'd' since we removed it from 'now'
delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return u'0 ' + 'minutes'
for i, (seconds, name) in enumerate(chunks):
count = since // seconds
if count != 0:
break
if count == 1:
s = '%(number)d %(type)s' % {'number': count, 'type': name[0]}
else:
s = '%(number)d %(type)s' % {'number': count, 'type': name[1]}
if i + 1 < len(chunks):
# Now get the second item
seconds2, name2 = chunks[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
if count2 == 1:
s += ', %d %s' % (count2, name2[0])
else:
s += ', %d %s' % (count2, name2[1])
return s
def timeuntil(d, now=None):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
if not now:
now = datetime.datetime.now()
return timesince(now, d)
|
unlicense
|
barachka/odoo
|
addons/l10n_in_hr_payroll/wizard/hr_salary_employee_bymonth.py
|
374
|
2830
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class hr_salary_employee_bymonth(osv.osv_memory):
_name = 'hr.salary.employee.month'
_description = 'Hr Salary Employee By Month Report'
_columns = {
'start_date': fields.date('Start Date', required=True),
'end_date': fields.date('End Date', required=True),
'employee_ids': fields.many2many('hr.employee', 'payroll_year_rel', 'payroll_year_id', 'employee_id', 'Employees', required=True),
'category_id': fields.many2one('hr.salary.rule.category', 'Category', required=True),
}
def _get_default_category(self, cr, uid, context=None):
category_ids = self.pool.get('hr.salary.rule.category').search(cr, uid, [('code', '=', 'NET')], context=context)
return category_ids and category_ids[0] or False
_defaults = {
'start_date': lambda *a: time.strftime('%Y-01-01'),
'end_date': lambda *a: time.strftime('%Y-%m-%d'),
'category_id': _get_default_category
}
def print_report(self, cr, uid, ids, context=None):
"""
To get the date and print the report
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: return report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, context=context)
res = res and res[0] or {}
datas.update({'form': res})
return self.pool['report'].get_action(cr, uid, ids,
'l10n_in_hr_payroll.report_hrsalarybymonth',
data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
dana-i2cat/felix
|
expedient/src/python/plugins/vt_plugin/controller/dispatchers/ProvisioningResponseDispatcher.py
|
3
|
9357
|
from django.http import *
from django.core.urlresolvers import reverse
import os
import sys
from vt_plugin.models import *
from vt_manager.communication.utils.XmlHelper import *
from vt_plugin.utils.ServiceThread import *
from vt_plugin.utils.Translator import *
from vt_plugin.controller.dispatchers.ProvisioningDispatcher import ProvisioningDispatcher
from expedient.common.messaging.models import DatedMessage
from vt_plugin.controller.vtAggregateController import vtAggregateController
from vt_plugin.models.VtPlugin import VtPlugin
from expedient.clearinghouse.project.models import Project
class ProvisioningResponseDispatcher():
'''
Handles all the VT AM vm provisioning responses and all the actions that go
from the VT AM to the VT Plugin
'''
@staticmethod
def processResponse(response):
for action in response.action:
#if Action.objects.filter(uuid = action.id).exists():
if Action.objects.filter(uuid = action.id):
#actionModel is an instance to the action stored in DB with uuid the same as the incomming action
actionModel = Action.objects.get(uuid = action.id)
else:
actionModel =Translator.ActionToModel(action, "provisioning", save = "noSave")
#TODO adapt code in order to enter "if" when received SUCCESS status from actions generated by island manager
actionModel.status = 'QUEUED' #this state is just to enter de if later
actionModel.vm = VM.objects.get(uuid = action.server.virtual_machines[0].uuid)
if actionModel.status is 'QUEUED' or 'ONGOING':
print "The response is:"
print actionModel
print actionModel.uuid
print "actionModel.status = %s" %actionModel.status
#update action status in DB
actionModel.status = action.status
print "The action.status is %s" %action.status
print "The action.description is %s" %action.description
actionModel.description = action.description
actionModel.save()
#according to incoming action response we do update the vm state
if actionModel.status == 'SUCCESS':
if actionModel.type == 'create':
actionModel.vm.setState('created (stopped)')
actionModel.vm.save()
elif actionModel.type == 'start' or actionModel.type == 'reboot':
actionModel.vm.setState('running')
actionModel.vm.save()
elif actionModel.type == 'hardStop':
actionModel.vm.setState('stopped')
actionModel.vm.save()
elif actionModel.type == 'delete':
actionModel.vm.completeDelete()
if actionModel.description == None:
actionModel.description = ""
else:
actionModel.description = ": "+actionModel.description
if actionModel.requestUser:
DatedMessage.objects.post_message_to_user(
"Action %s on VM %s succeed %s" % (actionModel.type, actionModel.vm.name, actionModel.description),
actionModel.requestUser, msg_type=DatedMessage.TYPE_SUCCESS,
)
else:
project = Project.objects.get(uuid=actionModel.vm.getProjectId())
for user in project.members_as_permittees.all():
DatedMessage.objects.post_message_to_user(
"Action %s on VM %s succeed %s" % (actionModel.type, actionModel.vm.name, actionModel.description),
user, msg_type=DatedMessage.TYPE_SUCCESS,
)
elif actionModel.status == 'FAILED':
if actionModel.description == None:
actionModel.description = ""
else:
actionModel.description = ": "+actionModel.description
if actionModel.requestUser:
DatedMessage.objects.post_message_to_user(
"Action %s on VM %s failed: %s" % (actionModel.type, actionModel.vm.name, actionModel.description),
actionModel.requestUser, msg_type=DatedMessage.TYPE_ERROR,
)
else:
project = Project.objects.get(uuid=actionModel.vm.getProjectId())
for user in project.members_as_permittees.all():
DatedMessage.objects.post_message_to_user(
"Action %s on VM %s failed: %s" % (actionModel.type, actionModel.vm.name, actionModel.description),
user, msg_type=DatedMessage.TYPE_ERROR,
)
if actionModel.type == 'start':
actionModel.vm.setState('stopped')
actionModel.vm.save()
elif actionModel.type == 'hardStop':
actionModel.vm.setState('running')
actionModel.vm.save()
elif actionModel.type == 'reboot':
actionModel.vm.setState('stopped')
actionModel.vm.save()
elif actionModel.type == 'create':
ProvisioningDispatcher.cleanWhenFail(actionModel.vm, VTServer.objects.get(uuid = actionModel.vm.serverID))
else:
actionModel.vm.setState('failed')
actionModel.vm.save()
elif actionModel.status == 'ONGOING':
if actionModel.type == 'create':
actionModel.vm.setState('creating...')
actionModel.vm.save()
vtplugin = VtPlugin.objects.get(id=actionModel.vm.aggregate_id)
projectUUID = actionModel.vm.projectId
sliceUUID = actionModel.vm.sliceId
vtAggregateController.askForAggregateResources(vtplugin,projectUUID,sliceUUID)
elif actionModel.type == 'start':
actionModel.vm.setState('starting...')
actionModel.vm.save()
elif actionModel.type == 'hardStop':
actionModel.vm.setState('stopping...')
actionModel.vm.save()
elif actionModel.type == 'delete':
actionModel.vm.setState('deleting...')
actionModel.vm.save()
elif actionModel.type == 'reboot':
actionModel.vm.setState('rebooting...')
actionModel.vm.save()
#if actionModel.requestUser:
# DatedMessage.objects.post_message_to_user(
# "Action %s on VM %s succeed: %s" % (actionModel.type, actionModel.vm.name, actionModel.description),
# actionModel.requestUser, msg_type=DatedMessage.TYPE_SUCCESS,
# )
#else:
# project = Project.objects.get(uuid=actionModel.vm.getProjectId())
# for user in project.members_as_permittees.all():
# DatedMessage.objects.post_message_to_user(
# "Action %s on VM %s succeed: %s" % (actionModel.type, actionModel.vm.name, actionModel.description),
# user, msg_type=DatedMessage.TYPE_SUCCESS,
# )
else:
actionModel.vm.setState('unknown')
actionModel.vm.save()
#if actionModel.requestUser:
# DatedMessage.objects.post_message_to_user(
# "Action %s on VM %s succeed: %s" % (actionModel.type, actionModel.vm.name, actionModel.description),
# actionModel.requestUser, msg_type=DatedMessage.TYPE_SUCCESS,
# )
#else:
# project = Project.objects.get(uuid=actionModel.vm.getProjectId())
# for user in project.members_as_permittees.all():
# DatedMessage.objects.post_message_to_user(
# "Action %s on VM %s succeed: %s" % (actionModel.type, actionModel.vm.name, actionModel.description),
# user, msg_type=DatedMessage.TYPE_SUCCESS,
# )
return "Done Response"
else:
try:
raise Exception
except Exception as e:
print e
return
|
apache-2.0
|
victorzhao/miniblink49
|
third_party/WebKit/Source/devtools/scripts/modular_build.py
|
32
|
6644
|
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Utilities for the modular DevTools build.
"""
from os import path
import os
try:
import simplejson as json
except ImportError:
import json
def read_file(filename):
with open(path.normpath(filename), 'rt') as input:
return input.read()
def write_file(filename, content):
if path.exists(filename):
os.remove(filename)
with open(filename, 'wt') as output:
output.write(content)
def bail_error(message):
raise Exception(message)
def load_and_parse_json(filename):
try:
return json.loads(read_file(filename))
except:
print 'ERROR: Failed to parse %s' % filename
raise
def concatenate_scripts(file_names, module_dir, output_dir, output):
for file_name in file_names:
output.write('/* %s */\n' % file_name)
file_path = path.join(module_dir, file_name)
if not path.isfile(file_path):
file_path = path.join(output_dir, path.basename(module_dir), file_name)
output.write(read_file(file_path))
output.write(';')
class Descriptors:
def __init__(self, application_dir, application_descriptor, module_descriptors):
self.application_dir = application_dir
self.application = application_descriptor
self.modules = module_descriptors
self._cached_sorted_modules = None
def application_json(self):
return json.dumps(self.application.values())
def all_compiled_files(self):
files = {}
for name in self.modules:
module = self.modules[name]
skipped_files = set(module.get('skip_compilation', []))
for script in module.get('scripts', []):
if script not in skipped_files:
files[path.normpath(path.join(self.application_dir, name, script))] = True
return files.keys()
def module_compiled_files(self, name):
files = []
module = self.modules.get(name)
skipped_files = set(module.get('skip_compilation', []))
for script in module.get('scripts', []):
if script not in skipped_files:
files.append(script)
return files
def module_resources(self, name):
return [name + '/' + resource for resource in self.modules[name].get('resources', [])]
def sorted_modules(self):
if self._cached_sorted_modules:
return self._cached_sorted_modules
result = []
unvisited_modules = set(self.modules)
temp_modules = set()
def visit(parent, name):
if name not in unvisited_modules:
return None
if name not in self.modules:
return (parent, name)
if name in temp_modules:
bail_error('Dependency cycle found at module "%s"' % name)
temp_modules.add(name)
deps = self.modules[name].get('dependencies')
if deps:
for dep_name in deps:
bad_dep = visit(name, dep_name)
if bad_dep:
return bad_dep
unvisited_modules.remove(name)
temp_modules.remove(name)
result.append(name)
return None
while len(unvisited_modules):
for next in unvisited_modules:
break
failure = visit(None, next)
if failure:
# failure[0] can never be None
bail_error('Unknown module "%s" encountered in dependencies of "%s"' % (failure[1], failure[0]))
self._cached_sorted_modules = result
return result
def sorted_dependencies_closure(self, module_name):
visited = set()
def sorted_deps_for_module(name):
result = []
desc = self.modules[name]
deps = desc.get('dependencies', [])
for dep in deps:
result += sorted_deps_for_module(dep)
if name not in visited:
result.append(name)
visited.add(name)
return result
return sorted_deps_for_module(module_name)
class DescriptorLoader:
def __init__(self, application_dir):
self.application_dir = application_dir
def load_application(self, application_descriptor_name):
return self.load_applications([application_descriptor_name])
def load_applications(self, application_descriptor_names):
merged_application_descriptor = {}
all_module_descriptors = {}
for application_descriptor_name in application_descriptor_names:
module_descriptors = {}
application_descriptor_filename = path.join(self.application_dir, application_descriptor_name)
application_descriptor = {desc['name']: desc for desc in load_and_parse_json(application_descriptor_filename)}
for name in application_descriptor:
merged_application_descriptor[name] = application_descriptor[name]
for (module_name, module) in application_descriptor.items():
if module_descriptors.get(module_name):
bail_error('Duplicate definition of module "%s" in %s' % (module_name, application_descriptor_filename))
if not all_module_descriptors.get(module_name):
module_descriptors[module_name] = self._read_module_descriptor(module_name, application_descriptor_filename)
all_module_descriptors[module_name] = module_descriptors[module_name]
for module in module_descriptors.values():
deps = module.get('dependencies', [])
for dep in deps:
if dep not in application_descriptor:
bail_error('Module "%s" (dependency of "%s") not listed in application descriptor %s' % (dep, module['name'], application_descriptor_filename))
return Descriptors(self.application_dir, merged_application_descriptor, all_module_descriptors)
def _read_module_descriptor(self, module_name, application_descriptor_filename):
json_filename = path.join(self.application_dir, module_name, 'module.json')
if not path.exists(json_filename):
bail_error('Module descriptor %s referenced in %s is missing' % (json_filename, application_descriptor_filename))
module_json = load_and_parse_json(json_filename)
module_json['name'] = module_name
return module_json
|
gpl-3.0
|
ibotty/ansible
|
lib/ansible/executor/playbook_executor.py
|
21
|
11974
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import locale
import signal
import sys
from ansible import constants as C
from ansible.errors import *
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.playbook import Playbook
from ansible.template import Templar
from ansible.utils.color import colorize, hostcolor
from ansible.utils.debug import debug
from ansible.utils.encrypt import do_encrypt
from ansible.utils.unicode import to_unicode
class PlaybookExecutor:
'''
This is the primary class for executing playbooks, and thus the
basis for bin/ansible-playbook operation.
'''
def __init__(self, playbooks, inventory, variable_manager, loader, display, options, passwords):
self._playbooks = playbooks
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._display = display
self._options = options
self.passwords = passwords
if options.listhosts or options.listtasks or options.listtags or options.syntax:
self._tqm = None
else:
self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=self.passwords)
def run(self):
'''
Run the given playbook, based on the settings in the play which
may limit the runs to serialized groups, etc.
'''
signal.signal(signal.SIGINT, self._cleanup)
result = 0
entrylist = []
entry = {}
try:
for playbook_path in self._playbooks:
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
self._inventory.set_playbook_basedir(os.path.dirname(playbook_path))
if self._tqm is None: # we are doing a listing
entry = {'playbook': playbook_path}
entry['plays'] = []
i = 1
plays = pb.get_plays()
self._display.vv('%d plays in %s' % (len(plays), playbook_path))
for play in plays:
# clear any filters which may have been applied to the inventory
self._inventory.remove_restriction()
if play.vars_prompt:
for var in play.vars_prompt:
if 'name' not in var:
raise AnsibleError("'vars_prompt' item is missing 'name:'", obj=play._ds)
vname = var['name']
prompt = var.get("prompt", vname)
default = var.get("default", None)
private = var.get("private", True)
confirm = var.get("confirm", False)
encrypt = var.get("encrypt", None)
salt_size = var.get("salt_size", None)
salt = var.get("salt", None)
if vname not in play.vars:
self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default)
play.vars[vname] = self._do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default)
# Create a temporary copy of the play here, so we can run post_validate
# on it without the templating changes affecting the original object.
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
if self._options.syntax:
continue
if self._tqm is None:
# we are just doing a listing
entry['plays'].append(new_play)
else:
# make sure the tqm has callbacks loaded
self._tqm.load_callbacks()
# we are actually running plays
for batch in self._get_serialized_batches(new_play):
if len(batch) == 0:
self._tqm.send_callback('v2_playbook_on_play_start', new_play)
self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
break
# restrict the inventory to the hosts in the serialized batch
self._inventory.restrict_to_hosts(batch)
# and run it...
result = self._tqm.run(play=play)
# check the number of failures here, to see if they're above the maximum
# failure percentage allowed, or if any errors are fatal. If either of those
# conditions are met, we break out, otherwise we only break out if the entire
# batch failed
failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts)
if new_play.any_errors_fatal and failed_hosts_count > 0:
break
elif new_play.max_fail_percentage is not None and \
int((new_play.max_fail_percentage)/100.0 * len(batch)) > int((len(batch) - failed_hosts_count) / len(batch) * 100.0):
break
elif len(batch) == failed_hosts_count:
break
# clear the failed hosts dictionaires in the TQM for the next batch
self._tqm.clear_failed_hosts()
# if the last result wasn't zero, break out of the serial batch loop
if result != 0:
break
i = i + 1 # per play
if entry:
entrylist.append(entry) # per playbook
# if the last result wasn't zero, break out of the playbook file name loop
if result != 0:
break
if entrylist:
return entrylist
finally:
if self._tqm is not None:
self._cleanup()
if self._options.syntax:
self.display.display("No issues encountered")
return result
# FIXME: this stat summary stuff should be cleaned up and moved
# to a new method, if it even belongs here...
self._display.banner("PLAY RECAP")
hosts = sorted(self._tqm._stats.processed.keys())
for h in hosts:
t = self._tqm._stats.summarize(h)
self._display.display("%s : %s %s %s %s" % (
hostcolor(h, t),
colorize('ok', t['ok'], 'green'),
colorize('changed', t['changed'], 'yellow'),
colorize('unreachable', t['unreachable'], 'red'),
colorize('failed', t['failures'], 'red')),
screen_only=True
)
self._display.display("%s : %s %s %s %s" % (
hostcolor(h, t, False),
colorize('ok', t['ok'], None),
colorize('changed', t['changed'], None),
colorize('unreachable', t['unreachable'], None),
colorize('failed', t['failures'], None)),
log_only=True
)
self._display.display("", screen_only=True)
# END STATS STUFF
return result
def _cleanup(self, signum=None, framenum=None):
return self._tqm.cleanup()
def _get_serialized_batches(self, play):
'''
Returns a list of hosts, subdivided into batches based on
the serial size specified in the play.
'''
# make sure we have a unique list of hosts
all_hosts = self._inventory.get_hosts(play.hosts)
# check to see if the serial number was specified as a percentage,
# and convert it to an integer value based on the number of hosts
if isinstance(play.serial, basestring) and play.serial.endswith('%'):
serial_pct = int(play.serial.replace("%",""))
serial = int((serial_pct/100.0) * len(all_hosts))
else:
serial = int(play.serial)
# if the serial count was not specified or is invalid, default to
# a list of all hosts, otherwise split the list of hosts into chunks
# which are based on the serial size
if serial <= 0:
return [all_hosts]
else:
serialized_batches = []
while len(all_hosts) > 0:
play_hosts = []
for x in range(serial):
if len(all_hosts) > 0:
play_hosts.append(all_hosts.pop(0))
serialized_batches.append(play_hosts)
return serialized_batches
def _do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
if sys.__stdin__.isatty():
if prompt and default is not None:
msg = "%s [%s]: " % (prompt, default)
elif prompt:
msg = "%s: " % prompt
else:
msg = 'input for %s: ' % varname
def do_prompt(prompt, private):
if sys.stdout.encoding:
msg = prompt.encode(sys.stdout.encoding)
else:
# when piping the output, or at other times when stdout
# may not be the standard file descriptor, the stdout
# encoding may not be set, so default to something sane
msg = prompt.encode(locale.getpreferredencoding())
if private:
return getpass.getpass(msg)
return raw_input(msg)
if confirm:
while True:
result = do_prompt(msg, private)
second = do_prompt("confirm " + msg, private)
if result == second:
break
self._display.display("***** VALUES ENTERED DO NOT MATCH ****")
else:
result = do_prompt(msg, private)
else:
result = None
self._display.warning("Not prompting as we are not in interactive mode")
# if result is false and default is not None
if not result and default is not None:
result = default
if encrypt:
result = do_encrypt(result, encrypt, salt_size, salt)
# handle utf-8 chars
result = to_unicode(result, errors='strict')
return result
|
gpl-3.0
|
BorgERP/borg-erp-6of3
|
addons/point_of_sale/wizard/__init__.py
|
9
|
1354
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pos_confirm
import pos_discount
import pos_open_statement
import pos_close_statement
import pos_box_entries
import pos_box_out
import pos_details
import pos_sales_user
import pos_sales_user_today
import pos_receipt
import pos_payment_report_user
import pos_payment_report
import pos_payment
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
lz1988/company-site
|
build/lib/django/db/models/loading.py
|
104
|
10633
|
"Utilities for loading models and the modules that contain them."
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.datastructures import SortedDict
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from django.utils._os import upath
from django.utils import six
import imp
import sys
import os
__all__ = ('get_apps', 'get_app', 'get_models', 'get_model', 'register_models',
'load_app', 'app_cache_ready')
class AppCache(object):
"""
A cache that stores installed applications and their models. Used to
provide reverse-relations and for app introspection (e.g. admin).
"""
# Use the Borg pattern to share state between all instances. Details at
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66531.
__shared_state = dict(
# Keys of app_store are the model modules for each application.
app_store=SortedDict(),
# Mapping of installed app_labels to model modules for that app.
app_labels={},
# Mapping of app_labels to a dictionary of model names to model code.
# May contain apps that are not installed.
app_models=SortedDict(),
# Mapping of app_labels to errors raised when trying to import the app.
app_errors={},
# -- Everything below here is only used when populating the cache --
loaded=False,
handled={},
postponed=[],
nesting_level=0,
_get_models_cache={},
)
def __init__(self):
self.__dict__ = self.__shared_state
def _populate(self):
"""
Fill in all the cache information. This method is threadsafe, in the
sense that every caller will see the same state upon return, and if the
cache is already initialised, it does no work.
"""
if self.loaded:
return
# Note that we want to use the import lock here - the app loading is
# in many cases initiated implicitly by importing, and thus it is
# possible to end up in deadlock when one thread initiates loading
# without holding the importer lock and another thread then tries to
# import something which also launches the app loading. For details of
# this situation see #18251.
imp.acquire_lock()
try:
if self.loaded:
return
for app_name in settings.INSTALLED_APPS:
if app_name in self.handled:
continue
self.load_app(app_name, True)
if not self.nesting_level:
for app_name in self.postponed:
self.load_app(app_name)
self.loaded = True
finally:
imp.release_lock()
def _label_for(self, app_mod):
"""
Return app_label for given models module.
"""
return app_mod.__name__.split('.')[-2]
def load_app(self, app_name, can_postpone=False):
"""
Loads the app with the provided fully qualified name, and returns the
model module.
"""
self.handled[app_name] = None
self.nesting_level += 1
app_module = import_module(app_name)
try:
models = import_module('.models', app_name)
except ImportError:
self.nesting_level -= 1
# If the app doesn't have a models module, we can just ignore the
# ImportError and return no models for it.
if not module_has_submodule(app_module, 'models'):
return None
# But if the app does have a models module, we need to figure out
# whether to suppress or propagate the error. If can_postpone is
# True then it may be that the package is still being imported by
# Python and the models module isn't available yet. So we add the
# app to the postponed list and we'll try it again after all the
# recursion has finished (in populate). If can_postpone is False
# then it's time to raise the ImportError.
else:
if can_postpone:
self.postponed.append(app_name)
return None
else:
raise
self.nesting_level -= 1
if models not in self.app_store:
self.app_store[models] = len(self.app_store)
self.app_labels[self._label_for(models)] = models
return models
def app_cache_ready(self):
"""
Returns true if the model cache is fully populated.
Useful for code that wants to cache the results of get_models() for
themselves once it is safe to do so.
"""
return self.loaded
def get_apps(self):
"Returns a list of all installed modules that contain models."
self._populate()
# Ensure the returned list is always in the same order (with new apps
# added at the end). This avoids unstable ordering on the admin app
# list page, for example.
apps = [(v, k) for k, v in self.app_store.items()]
apps.sort()
return [elt[1] for elt in apps]
def get_app(self, app_label, emptyOK=False):
"""
Returns the module containing the models for the given app_label. If
the app has no models in it and 'emptyOK' is True, returns None.
"""
self._populate()
imp.acquire_lock()
try:
for app_name in settings.INSTALLED_APPS:
if app_label == app_name.split('.')[-1]:
mod = self.load_app(app_name, False)
if mod is None:
if emptyOK:
return None
raise ImproperlyConfigured("App with label %s is missing a models.py module." % app_label)
else:
return mod
raise ImproperlyConfigured("App with label %s could not be found" % app_label)
finally:
imp.release_lock()
def get_app_errors(self):
"Returns the map of known problems with the INSTALLED_APPS."
self._populate()
return self.app_errors
def get_models(self, app_mod=None,
include_auto_created=False, include_deferred=False,
only_installed=True, include_swapped=False):
"""
Given a module containing models, returns a list of the models.
Otherwise returns a list of all installed models.
By default, auto-created models (i.e., m2m models without an
explicit intermediate table) are not included. However, if you
specify include_auto_created=True, they will be.
By default, models created to satisfy deferred attribute
queries are *not* included in the list of models. However, if
you specify include_deferred, they will be.
By default, models that aren't part of installed apps will *not*
be included in the list of models. However, if you specify
only_installed=False, they will be.
By default, models that have been swapped out will *not* be
included in the list of models. However, if you specify
include_swapped, they will be.
"""
cache_key = (app_mod, include_auto_created, include_deferred, only_installed, include_swapped)
try:
return self._get_models_cache[cache_key]
except KeyError:
pass
self._populate()
if app_mod:
if app_mod in self.app_store:
app_list = [self.app_models.get(self._label_for(app_mod),
SortedDict())]
else:
app_list = []
else:
if only_installed:
app_list = [self.app_models.get(app_label, SortedDict())
for app_label in six.iterkeys(self.app_labels)]
else:
app_list = six.itervalues(self.app_models)
model_list = []
for app in app_list:
model_list.extend(
model for model in app.values()
if ((not model._deferred or include_deferred) and
(not model._meta.auto_created or include_auto_created) and
(not model._meta.swapped or include_swapped))
)
self._get_models_cache[cache_key] = model_list
return model_list
def get_model(self, app_label, model_name,
seed_cache=True, only_installed=True):
"""
Returns the model matching the given app_label and case-insensitive
model_name.
Returns None if no model is found.
"""
if seed_cache:
self._populate()
if only_installed and app_label not in self.app_labels:
return None
return self.app_models.get(app_label, SortedDict()).get(model_name.lower())
def register_models(self, app_label, *models):
"""
Register a set of models as belonging to an app.
"""
for model in models:
# Store as 'name: model' pair in a dictionary
# in the app_models dictionary
model_name = model._meta.object_name.lower()
model_dict = self.app_models.setdefault(app_label, SortedDict())
if model_name in model_dict:
# The same model may be imported via different paths (e.g.
# appname.models and project.appname.models). We use the source
# filename as a means to detect identity.
fname1 = os.path.abspath(upath(sys.modules[model.__module__].__file__))
fname2 = os.path.abspath(upath(sys.modules[model_dict[model_name].__module__].__file__))
# Since the filename extension could be .py the first time and
# .pyc or .pyo the second time, ignore the extension when
# comparing.
if os.path.splitext(fname1)[0] == os.path.splitext(fname2)[0]:
continue
model_dict[model_name] = model
self._get_models_cache.clear()
cache = AppCache()
# These methods were always module level, so are kept that way for backwards
# compatibility.
get_apps = cache.get_apps
get_app = cache.get_app
get_app_errors = cache.get_app_errors
get_models = cache.get_models
get_model = cache.get_model
register_models = cache.register_models
load_app = cache.load_app
app_cache_ready = cache.app_cache_ready
|
bsd-3-clause
|
yongwen/teletraan
|
deploy-board/deploy_board/webapp/helpers/agents_helper.py
|
8
|
2479
|
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""Collection of all agents related calls
"""
from deploy_board.webapp.helpers.deployclient import DeployClient
deployclient = DeployClient()
def get_agents(request, env_name, stage_name):
return deployclient.get("/envs/%s/%s/agents" % (env_name, stage_name),
request.teletraan_user_id.token)
def reset_failed_agents(request, env_name, stage_name, deploy_id):
return deployclient.put("/envs/%s/%s/agents/reset_failed_agents/%s" % (env_name, stage_name,
deploy_id),
request.teletraan_user_id.token)
def get_agent_error(request, env_name, stage_name, host_name):
return deployclient.get("/envs/%s/%s/agents/errors/%s" % (env_name, stage_name, host_name),
request.teletraan_user_id.token)['errorMessage']
def retry_deploy(request, env_name, stage_name, host_id):
return deployclient.put("/envs/%s/%s/agents/%s" % (env_name, stage_name, host_id),
request.teletraan_user_id.token, data={"state": "RESET"})
def pause_deploy(request, env_name, stage_name, host_id):
return deployclient.put("/envs/%s/%s/agents/%s" % (env_name, stage_name, host_id),
request.teletraan_user_id.token, data={"state": "PAUSED_BY_USER"})
def resume_deploy(request, env_name, stage_name, host_id):
return deployclient.put("/envs/%s/%s/agents/%s" % (env_name, stage_name, host_id),
request.teletraan_user_id.token, data={"state": "NORMAL"})
def get_agents_by_host(request, host_name):
return deployclient.get("/agents/%s" % host_name, request.teletraan_user_id.token)
def get_agents_total_by_env(request, env_id):
return deployclient.get("/agents/env/%s/total" % env_id, request.teletraan_user_id.token)
|
apache-2.0
|
wareash/linux-xylon
|
scripts/gdb/linux/modules.py
|
182
|
2953
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# module tools
#
# Copyright (c) Siemens AG, 2013
#
# Authors:
# Jan Kiszka <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import cpus, utils
module_type = utils.CachedType("struct module")
def module_list():
global module_type
module_ptr_type = module_type.get_type().pointer()
modules = gdb.parse_and_eval("modules")
entry = modules['next']
end_of_list = modules.address
while entry != end_of_list:
yield utils.container_of(entry, module_ptr_type, "list")
entry = entry['next']
def find_module_by_name(name):
for module in module_list():
if module['name'].string() == name:
return module
return None
class LxModule(gdb.Function):
"""Find module by name and return the module variable.
$lx_module("MODULE"): Given the name MODULE, iterate over all loaded modules
of the target and return that module variable which MODULE matches."""
def __init__(self):
super(LxModule, self).__init__("lx_module")
def invoke(self, mod_name):
mod_name = mod_name.string()
module = find_module_by_name(mod_name)
if module:
return module.dereference()
else:
raise gdb.GdbError("Unable to find MODULE " + mod_name)
LxModule()
class LxLsmod(gdb.Command):
"""List currently loaded modules."""
_module_use_type = utils.CachedType("struct module_use")
def __init__(self):
super(LxLsmod, self).__init__("lx-lsmod", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
gdb.write(
"Address{0} Module Size Used by\n".format(
" " if utils.get_long_type().sizeof == 8 else ""))
for module in module_list():
ref = 0
module_refptr = module['refptr']
for cpu in cpus.cpu_list("cpu_possible_mask"):
refptr = cpus.per_cpu(module_refptr, cpu)
ref += refptr['incs']
ref -= refptr['decs']
gdb.write("{address} {name:<19} {size:>8} {ref}".format(
address=str(module['module_core']).split()[0],
name=module['name'].string(),
size=str(module['core_size']),
ref=str(ref)))
source_list = module['source_list']
t = self._module_use_type.get_type().pointer()
entry = source_list['next']
first = True
while entry != source_list.address:
use = utils.container_of(entry, t, "source_list")
gdb.write("{separator}{name}".format(
separator=" " if first else ",",
name=use['source']['name'].string()))
first = False
entry = entry['next']
gdb.write("\n")
LxLsmod()
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.