code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# (c) 2018 Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
author: Ansible Networking Team
connection: httpapi
short_description: Use httpapi to run command on network appliances
description:
- This connection plugin provides a connection to remote devices over a
HTTP(S)-based api.
version_added: "2.6"
options:
host:
description:
- Specifies the remote device FQDN or IP address to establish the HTTP(S)
connection to.
default: inventory_hostname
vars:
- name: ansible_host
port:
type: int
description:
- Specifies the port on the remote device that listens for connections
when establishing the HTTP(S) connection.
- When unspecified, will pick 80 or 443 based on the value of use_ssl.
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_httpapi_port
network_os:
description:
- Configures the device platform network operating system. This value is
used to load the correct httpapi plugin to communicate with the remote
device
vars:
- name: ansible_network_os
remote_user:
description:
- The username used to authenticate to the remote device when the API
connection is first established. If the remote_user is not specified,
the connection will use the username of the logged in user.
- Can be configured from the CLI via the C(--user) or C(-u) options.
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
password:
description:
- Configures the user password used to authenticate to the remote device
when needed for the device API.
vars:
- name: ansible_password
- name: ansible_httpapi_pass
use_ssl:
type: boolean
description:
- Whether to connect using SSL (HTTPS) or not (HTTP).
default: False
vars:
- name: ansible_httpapi_use_ssl
validate_certs:
type: boolean
version_added: '2.7'
description:
- Whether to validate SSL certificates
default: True
vars:
- name: ansible_httpapi_validate_certs
timeout:
type: int
description:
- Sets the connection time, in seconds, for communicating with the
remote device. This timeout is used as the default timeout value for
commands when issuing a command to the network CLI. If the command
does not return in timeout seconds, an error is generated.
default: 120
become:
type: boolean
description:
- The become option will instruct the CLI session to attempt privilege
escalation on platforms that support it. Normally this means
transitioning from user mode to C(enable) mode in the CLI session.
If become is set to True and the remote device does not support
privilege escalation or the privilege has already been elevated, then
this option is silently ignored.
- Can be configured from the CLI via the C(--become) or C(-b) options.
default: False
ini:
- section: privilege_escalation
key: become
env:
- name: ANSIBLE_BECOME
vars:
- name: ansible_become
become_method:
description:
- This option allows the become method to be specified in for handling
privilege escalation. Typically the become_method value is set to
C(enable) but could be defined as other values.
default: sudo
ini:
- section: privilege_escalation
key: become_method
env:
- name: ANSIBLE_BECOME_METHOD
vars:
- name: ansible_become_method
persistent_connect_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait when trying to
initially establish a persistent connection. If this value expires
before the connection to the remote device is completed, the connection
will fail.
default: 30
ini:
- section: persistent_connection
key: connect_timeout
env:
- name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT
persistent_command_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait for a command to
return from the remote device. If this timer is exceeded before the
command returns, the connection plugin will raise an exception and
close.
default: 10
ini:
- section: persistent_connection
key: command_timeout
env:
- name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT
vars:
- name: ansible_command_timeout
"""
from io import BytesIO
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_bytes
from ansible.module_utils.six import PY3
from ansible.module_utils.six.moves import cPickle
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import open_url
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import httpapi_loader
from ansible.plugins.connection import NetworkConnectionBase
from ansible.utils.display import Display
display = Display()
class Connection(NetworkConnectionBase):
'''Network API connection'''
transport = 'httpapi'
has_pipelining = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self._url = None
self._auth = None
if self._network_os:
self.httpapi = httpapi_loader.get(self._network_os, self)
if self.httpapi:
self._sub_plugin = {'type': 'httpapi', 'name': self._network_os, 'obj': self.httpapi}
display.vvvv('loaded API plugin for network_os %s' % self._network_os)
else:
raise AnsibleConnectionFailure('unable to load API plugin for network_os %s' % self._network_os)
else:
raise AnsibleConnectionFailure(
'Unable to automatically determine host network os. Please '
'manually configure ansible_network_os value for this host'
)
display.display('network_os is set to %s' % self._network_os, log_only=True)
def update_play_context(self, pc_data):
"""Updates the play context information for the connection"""
pc_data = to_bytes(pc_data)
if PY3:
pc_data = cPickle.loads(pc_data, encoding='bytes')
else:
pc_data = cPickle.loads(pc_data)
play_context = PlayContext()
play_context.deserialize(pc_data)
messages = ['updating play_context for connection']
if self._play_context.become ^ play_context.become:
self.set_become(play_context)
if play_context.become is True:
messages.append('authorizing connection')
else:
messages.append('deauthorizing connection')
self._play_context = play_context
return messages
def _connect(self):
if not self.connected:
protocol = 'https' if self.get_option('use_ssl') else 'http'
host = self.get_option('host')
port = self.get_option('port') or (443 if protocol == 'https' else 80)
self._url = '%s://%s:%s' % (protocol, host, port)
self.httpapi.set_become(self._play_context)
self.httpapi.login(self.get_option('remote_user'), self.get_option('password'))
self._connected = True
def close(self):
'''
Close the active session to the device
'''
# only close the connection if its connected.
if self._connected:
display.vvvv("closing http(s) connection to device", host=self._play_context.remote_addr)
self.logout()
super(Connection, self).close()
def send(self, path, data, **kwargs):
'''
Sends the command to the device over api
'''
url_kwargs = dict(
timeout=self.get_option('timeout'), validate_certs=self.get_option('validate_certs'),
headers={},
)
url_kwargs.update(kwargs)
if self._auth:
# Avoid modifying passed-in headers
headers = dict(kwargs.get('headers', {}))
headers.update(self._auth)
url_kwargs['headers'] = headers
else:
url_kwargs['url_username'] = self.get_option('remote_user')
url_kwargs['url_password'] = self.get_option('password')
try:
response = open_url(self._url + path, data=data, **url_kwargs)
except HTTPError as exc:
is_handled = self.handle_httperror(exc)
if is_handled is True:
return self.send(path, data, **kwargs)
elif is_handled is False:
raise AnsibleConnectionFailure('Could not connect to {0}: {1}'.format(self._url + path, exc.reason))
else:
raise
except URLError as exc:
raise AnsibleConnectionFailure('Could not connect to {0}: {1}'.format(self._url + path, exc.reason))
response_buffer = BytesIO()
response_buffer.write(response.read())
# Try to assign a new auth token if one is given
self._auth = self.update_auth(response, response_buffer) or self._auth
return response, response_buffer
| veger/ansible | lib/ansible/plugins/connection/httpapi.py | Python | gpl-3.0 | 9,657 |
#!/usr/bin/env python
#
# Output classes for ETL.
#
# Author: Just van den Broecke
#
import subprocess
import os
import re
import shutil
from stetl.component import Config
from stetl.output import Output
from stetl.util import Util, gdal, ogr, osr
from stetl.packet import FORMAT
log = Util.get_log('ogroutput')
class OgrOutput(Output):
"""
Direct GDAL OGR output via Python OGR wrapper. Via the Python API http://gdal.org/python
OGR Features are written.
This output can write almost any geospatial, OGR-defined, dataformat.
consumes=FORMAT.ogr_feature or FORMAT.ogr_feature_array
"""
# Start attribute config meta
# Applying Decorator pattern with the Config class to provide
# read-only config values from the configured properties.
@Config(ptype=bool, default=False, required=False)
def append(self):
"""
Add to destination destination if it extists (ogr2ogr -append option).
"""
pass
@Config(ptype=str, default=None, required=True)
def dest_data_source(self):
"""
String denoting the OGR data destination. Usually a path to a file like "path/rivers.shp" or connection string
to PostgreSQL like "PG: host=localhost dbname='rivers' user='postgres'".
"""
pass
@Config(ptype=str, default=None, required=False)
def dest_format(self):
"""
Instructs GDAL to use driver by that name to open data destination. Not required for
many standard formats that are self-describing like ESRI Shapefile.
Examples: 'PostgreSQL', 'GeoJSON' etc
"""
pass
@Config(ptype=list, default=[], required=False)
def dest_create_options(self):
"""
Creation options.
Examples: ..
"""
pass
@Config(ptype=dict, default=None, required=False)
def dest_options(self):
"""
Custom data destination-specific options. Used in gdal.SetConfigOption().
"""
pass
@Config(ptype=list, default=[], required=False)
def layer_create_options(self):
"""
Options for newly created layer (-lco).
"""
pass
@Config(ptype=str, default=None, required=True)
def new_layer_name(self):
"""
Layer name for layer created in the destination source.
"""
pass
@Config(ptype=bool, default=False, required=False)
def overwrite(self):
"""
Overwrite destination if it extists (ogr2ogr -overwrite option).
"""
pass
@Config(ptype=str, default=None, required=False)
def target_srs(self):
"""
SRS (projection) for the target.
"""
pass
@Config(ptype=str, default=None, required=False)
def sql(self):
"""
String with SQL query. Mandatory for PostgreSQL OGR dest.
"""
pass
@Config(ptype=bool, default=False, required=False)
def always_apply_lco(self):
"""
Flag to indicate whether the layer creation options should be applied
to all runs.
"""
pass
# End attribute config meta
# Constructor
def __init__(self, configdict, section):
Output.__init__(self, configdict, section, consumes=[FORMAT.ogr_feature, FORMAT.ogr_feature_array])
def init(self):
self.ogr = ogr
# http://trac.osgeo.org/gdal/wiki/PythonGotchas
self.gdal = gdal
self.gdal.UseExceptions()
log.info("Using GDAL/OGR version: %d" % int(gdal.VersionInfo('VERSION_NUM')))
# GDAL error handler function
# http://pcjericks.github.io/py-gdalogr-cookbook/gdal_general.html
def gdal_error_handler(err_class, err_num, err_msg):
err_type = {
gdal.CE_None: 'None',
gdal.CE_Debug: 'Debug',
gdal.CE_Warning: 'Warning',
gdal.CE_Failure: 'Failure',
gdal.CE_Fatal: 'Fatal'
}
err_msg = err_msg.replace('\n', ' ')
err_class = err_type.get(err_class, 'None')
log.error('Error Number: %s, Type: %s, Msg: %s' % (err_num, err_class, err_msg))
# install error handler
self.gdal.PushErrorHandler(gdal_error_handler)
# Raise a dummy error for testing
# self.gdal.Error(1, 2, 'test error')
self.update = self.overwrite or self.append
if self.dest_options:
for k in self.dest_options:
self.gdal.SetConfigOption(k, self.dest_options[k])
self.dest_driver = None
self.dest_fd = None
# Loosely based on https://github.com/OSGeo/gdal/blob/trunk/gdal/swig/python/samples/ogr2ogr.py
# /* -------------------------------------------------------------------- */
# /* Try opening the output data source as an existing, writable */
# /* -------------------------------------------------------------------- */
if self.update:
# Try opening in update mode
self.dest_fd = ogr.Open(self.dest_data_source, True)
if self.dest_fd is not None:
if len(self.dest_create_options) > 0:
log.warn("Datasource creation options ignored since an existing datasource being updated.")
self.dest_driver = self.dest_fd.GetDriver()
if self.overwrite:
self.dest_driver.DeleteDataSource(self.dest_data_source)
self.dest_fd = None
self.dest_driver = None
self.update = False
# /* -------------------------------------------------------------------- */
# /* Find the output driver. */
# /* -------------------------------------------------------------------- */
if self.dest_driver is None:
# Open OGR data dest in write-only mode.
self.dest_driver = ogr.GetDriverByName(self.dest_format)
# Report failure if failed
if self.dest_driver is None:
log.error("Cannot open OGR data destination: %s with the following drivers." % self.dest_data_source)
for iDriver in range(self.ogr.GetDriverCount()):
log.info(" -> " + self.ogr.GetDriver(iDriver).GetName())
raise Exception()
if self.dest_driver.TestCapability(ogr.ODrCCreateDataSource) is False:
log.error("%s driver does not support data source creation." % self.dest_format)
raise Exception()
# /* -------------------------------------------------------------------- */
# /* Create the output data source. */
# /* -------------------------------------------------------------------- */
if self.dest_fd is None:
self.dest_fd = self.dest_driver.CreateDataSource(self.dest_data_source, options=self.dest_create_options)
if self.dest_fd is None:
log.error("%s driver failed to create %s" % (self.dest_format, Util.safe_string_value(self.dest_data_source)))
raise Exception()
# /* -------------------------------------------------------------------- */
# /* Parse the output SRS definition if possible. */
# /* -------------------------------------------------------------------- */
output_srs_ref = None
if self.target_srs is not None:
output_srs_ref = osr.SpatialReference()
if output_srs_ref.SetFromUserInput(self.target_srs) != 0:
log.error("Failed to process SRS definition: %s" % self.target_srs)
raise Exception()
self.layer = self.dest_fd.CreateLayer(self.new_layer_name, output_srs_ref, ogr.wkbUnknown,
self.layer_create_options)
self.feature_def = None
log.info("Opened OGR dest ok: %s " % Util.safe_string_value(self.dest_data_source))
def write(self, packet):
# Are we all done?
if packet.data is None or self.dest_fd is None:
self.write_end(packet)
return packet
if self.layer is None:
log.info("No Layer, end writing to: %s" % Util.safe_string_value(self.dest_data_source))
return packet
# Assume ogr_feature_array input, otherwise convert ogr_feature to list
if type(packet.data) is list:
# Write feature collection to OGR Layer output
for feature in packet.data:
self.write_feature(feature)
self.write_end(packet)
else:
# Write single feature to OGR Layer output
if packet.end_of_stream or packet.end_of_doc:
self.write_end(packet)
return packet
self.write_feature(packet.data)
return packet
def write_feature(self, feature):
# Set the Layer Feature Definition once, using the first feature received
if self.feature_def is None:
self.feature_def = feature.GetDefnRef()
field_count = self.feature_def.GetFieldCount()
for i in range(0, field_count):
field_def = self.feature_def.GetFieldDefn(i)
self.layer.CreateField(field_def)
# Write Feature to the Layer
self.layer.CreateFeature(feature)
# Dispose memory
feature.Destroy()
def write_end(self, packet):
# Destroy not required anymore: http://trac.osgeo.org/gdal/wiki/PythonGotchas
# self.dest_fd.Destroy()
log.info("End writing to: %s" % Util.safe_string_value(self.dest_data_source))
self.dest_fd = None
self.layer = None
return packet
class Ogr2OgrOutput(Output):
"""
Output from GML etree doc to any OGR2OGR output using the GDAL/OGR ogr2ogr command
consumes=FORMAT.etree_doc
"""
def __init__(self, configdict, section):
Output.__init__(self, configdict, section, consumes=FORMAT.etree_doc)
self.temp_file = self.cfg.get('temp_file')
self.regex_vsi_filter = re.compile("^/vsi[a-z0-9_]+/.*", re.I)
# For creating tables the GFS file needs to be newer than
# the .gml file. -lco GML_GFS_TEMPLATE somehow does not work
# so we copy the .gfs file each time with the .gml file with
# the same base name
self.gfs_file = self.cfg.get('gfs_file')
self.lco = self.cfg.get('lco')
self.spatial_extent = self.cfg.get('spatial_extent')
self.ogr2ogr_cmd = self.cfg.get('ogr2ogr_cmd').replace('\\\n', ' ').replace('\n', ' ')
if self.spatial_extent:
self.ogr2ogr_cmd += ' -spat ' + self.spatial_extent
self.first_run = True
def save_doc(self, packet, file_path):
if packet.data is None:
return packet
log.info('writing to file %s' % file_path)
out_file = open(file_path, 'w')
out_file.writelines(packet.to_string())
out_file.close()
# Copy the .gfs file if required, use the same base name
# so ogr2ogr will pick it up.
# Note that for now using a GFS file is not supported with a VSI filter.
if self.gfs_file and not self.regex_vsi_filter.match(file_path):
file_ext = os.path.splitext(file_path)
shutil.copy(self.gfs_file, file_ext[0] + '.gfs')
log.info("written to %s" % file_path)
return packet
def execute_cmd(self, cmd):
use_shell = True
if os.name == 'nt':
use_shell = False
log.info("executing cmd=%s" % cmd)
subprocess.call(cmd, shell=use_shell)
log.info("execute done")
def write(self, packet):
if packet.data is None:
return packet
# Save the doc to a temp file
self.save_doc(packet, self.temp_file)
# Execute ogr2ogr
ogr2ogr_cmd = self.ogr2ogr_cmd
if self.lco and (self.first_run or self.always_apply_lco):
ogr2ogr_cmd += ' ' + self.lco
self.first_run = False
self.execute_cmd(ogr2ogr_cmd)
return packet
| fsteggink/stetl | stetl/outputs/ogroutput.py | Python | gpl-3.0 | 12,260 |
from rest_framework import serializers
from rest_framework.pagination import PaginationSerializer
from rest_framework.serializers import HyperlinkedModelSerializerOptions
from oclapi.fields import HyperlinkedVersionedResourceIdentityField, HyperlinkedResourceVersionIdentityField
from django.conf import settings
class HeaderPaginationSerializer(PaginationSerializer):
def __init__(self, *args, **kwargs):
self._headers_and_data = None
super(HeaderPaginationSerializer, self).__init__(*args, **kwargs)
@property
def data(self):
if self._headers_and_data is None:
self._populate_headers_and_data()
return self._headers_and_data['data']
@property
def headers(self):
if self._headers_and_data is None:
self._populate_headers_and_data()
return self._headers_and_data['headers']
def _populate_headers_and_data(self):
self._headers_and_data = {}
obj = self.object
page_number = obj.number
page_size = obj.paginator.per_page
offset = (page_number - 1) * page_size
page_fields = self.to_native(obj)
results = page_fields['results']
self._headers_and_data['headers'] = {}
self._headers_and_data['headers']['num_found'] = page_fields['count']
self._headers_and_data['headers']['num_returned'] = len(results)
self._headers_and_data['headers']['offset'] = offset
self.fix_base_url(page_fields, 'next')
self._headers_and_data['headers']['next'] = page_fields['next']
self.fix_base_url(page_fields, 'previous')
self._headers_and_data['headers']['previous'] = page_fields['previous']
self._headers_and_data['data'] = results
def fix_base_url(self, page_fields, field):
if not page_fields[field]:
return
url = page_fields[field].split('/')
del url[0:3]
page_fields[field] = settings.BASE_URL + '/' + '/'.join(url)
class ResourceVersionSerializerOptions(HyperlinkedModelSerializerOptions):
def __init__(self, meta):
super(ResourceVersionSerializerOptions, self).__init__(meta)
self.versioned_object_view_name = getattr(meta, 'versioned_object_view_name', None)
self.versioned_object_field_name = getattr(meta, 'versioned_object_field_name', None)
class ResourceVersionSerializer(serializers.Serializer):
"""
A ResourceVersionSerializer generates a URL for a particular version of a resource,
and another URL for the resource that is versioned.
It does not extend HyperlinkedResourceSerializer, because its URL-generation strategy is different.
"""
_options_class = ResourceVersionSerializerOptions
_default_view_name = '%(model_name)s-detail'
def get_locale(self, obj):
if hasattr(obj, 'names'):
names = filter(lambda n: n.type == 'ISO 639-1', obj.names)
if names:
return names[0].name
else:
return None
def get_default_fields(self):
fields = super(ResourceVersionSerializer, self).get_default_fields()
if self.opts.view_name is None:
self.opts.view_name = self._get_default_view_name(self.opts.model)
if self.opts.versioned_object_view_name is None:
object = self.object[0] if self.many and len(self.object) > 0 else self.object
if object:
versioned_object_model = object.versioned_object_type.model_class()
self.opts.versioned_object_view_name = self._get_default_view_name(versioned_object_model)
ret = self._dict_class()
if 'version_url' not in fields:
url_field = HyperlinkedResourceVersionIdentityField(
view_name=self.opts.view_name,
)
ret['version_url'] = url_field
versioned_object_field_name = self.opts.versioned_object_field_name or 'versioned_object_url'
if versioned_object_field_name not in fields:
url_field = HyperlinkedVersionedResourceIdentityField(
view_name=self.opts.versioned_object_view_name,
)
ret[versioned_object_field_name] = url_field
ret.update(fields)
fields = ret
return fields
def _get_default_view_name(self, model):
model_meta = model._meta
format_kwargs = {
'app_label': model_meta.app_label,
'model_name': model_meta.object_name.lower()
}
return self._default_view_name % format_kwargs
| snyaggarwal/oclapi | ocl/oclapi/serializers.py | Python | mpl-2.0 | 4,544 |
# coding: utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Amazon Elastic Cloud Computing API"""
import collections
import datetime
import logging
import ssl
import sys
import time
logger = logging.getLogger('laniakea')
try:
import boto.ec2
import boto.exception
except ImportError as msg:
logger.error(msg)
sys.exit(-1)
class EC2ManagerException(Exception):
"""Exception class for Azure Manager."""
class EC2Manager:
"""
Amazon Elastic Cloud Computing manager class.
"""
def __init__(self, images):
self.ec2 = None
self.images = images
self.remote_images = {}
def retry_on_ec2_error(self, func, *args, **kwargs):
"""
Call the given method with the given arguments, retrying if the call
failed due to an EC2ResponseError. This method will wait at most 30
seconds and perform up to 6 retries. If the method still fails, it will
propagate the error.
:param func: Function to call
:type func: function
"""
exception_retry_count = 6
while True:
try:
return func(*args, **kwargs)
except (boto.exception.EC2ResponseError, ssl.SSLError) as msg:
exception_retry_count -= 1
if exception_retry_count <= 0:
raise msg
time.sleep(5)
def connect(self, region, **kw_params):
"""Connect to a EC2.
:param region: The name of the region to connect to.
:type region: str
:param kw_params:
:type kw_params: dict
"""
self.ec2 = boto.ec2.connect_to_region(region, **kw_params)
if not self.ec2:
raise EC2ManagerException('Unable to connect to region "%s"' % region)
self.remote_images.clear()
if self.images and any(('image_name' in img and 'image_id' not in img) for img in self.images.values()):
for img in self.images.values():
if 'image_name' in img and 'image_id' not in img:
img['image_id'] = self.resolve_image_name(img.pop('image_name'))
def resolve_image_name(self, image_name):
"""Look up an AMI for the connected region based on an image name.
:param image_name: The name of the image to resolve.
:type image_name: str
:return: The AMI for the given image.
:rtype: str
"""
# look at each scope in order of size
scopes = ['self', 'amazon', 'aws-marketplace', None]
if image_name in self.remote_images:
return self.remote_images[image_name]
for scope in scopes:
logger.info('Retrieving available AMIs owned by %s...', scope)
if scope is not None:
remote_images = self.ec2.get_all_images(owners=[scope], filters={'name': image_name})
else:
remote_images = self.ec2.get_all_images(filters={'name': image_name})
self.remote_images.update({ri.name: ri.id for ri in remote_images})
if image_name in self.remote_images:
return self.remote_images[image_name]
raise EC2ManagerException('Failed to resolve AMI name "%s" to an AMI' % image_name)
def create_on_demand(self,
instance_type='default',
tags=None,
root_device_type='ebs',
size='default',
vol_type='gp2',
delete_on_termination=False):
"""Create one or more EC2 on-demand instances.
:param size: Size of root device
:type size: int
:param delete_on_termination:
:type delete_on_termination: boolean
:param vol_type:
:type vol_type: str
:param root_device_type: The type of the root device.
:type root_device_type: str
:param instance_type: A section name in amazon.json
:type instance_type: str
:param tags:
:type tags: dict
:return: List of instances created
:rtype: list
"""
name, size = self._get_default_name_size(instance_type, size)
if root_device_type == 'ebs':
self.images[instance_type]['block_device_map'] = \
self._configure_ebs_volume(vol_type, name, size, delete_on_termination)
reservation = self.ec2.run_instances(**self.images[instance_type])
logger.info('Creating requested tags...')
for i in reservation.instances:
self.retry_on_ec2_error(self.ec2.create_tags, [i.id], tags or {})
instances = []
logger.info('Waiting for instances to become ready...')
while len(reservation.instances): # pylint: disable=len-as-condition
for i in reservation.instances:
if i.state == 'running':
instances.append(i)
reservation.instances.pop(reservation.instances.index(i))
logger.info('%s is %s at %s (%s)',
i.id,
i.state,
i.public_dns_name,
i.ip_address)
else:
self.retry_on_ec2_error(i.update)
return instances
def create_spot_requests(self,
price,
instance_type='default',
root_device_type='ebs',
size='default',
vol_type='gp2',
delete_on_termination=False,
timeout=None):
"""Request creation of one or more EC2 spot instances.
:param size:
:param vol_type:
:param delete_on_termination:
:param root_device_type: The type of the root device.
:type root_device_type: str
:param price: Max price to pay for spot instance per hour.
:type price: float
:param instance_type: A section name in amazon.json
:type instance_type: str
:param timeout: Seconds to keep the request open (cancelled if not fulfilled).
:type timeout: int
:return: List of requests created
:rtype: list
"""
name, size = self._get_default_name_size(instance_type, size)
if root_device_type == 'ebs':
self.images[instance_type]['block_device_map'] = \
self._configure_ebs_volume(vol_type, name, size, delete_on_termination)
valid_until = None
if timeout is not None:
valid_until = (datetime.datetime.now() + datetime.timedelta(seconds=timeout)).isoformat()
requests = self.ec2.request_spot_instances(price, valid_until=valid_until, **self.images[instance_type])
return [r.id for r in requests]
def check_spot_requests(self, requests, tags=None):
"""Check status of one or more EC2 spot instance requests.
:param requests: List of EC2 spot instance request IDs.
:type requests: list
:param tags:
:type tags: dict
:return: List of boto.ec2.instance.Instance's created, order corresponding to requests param (None if request
still open, boto.ec2.instance.Reservation if request is no longer open)
:rtype: list
"""
instances = [None] * len(requests)
ec2_requests = self.retry_on_ec2_error(self.ec2.get_all_spot_instance_requests, request_ids=requests)
successes_by_id = collections.OrderedDict()
for req in ec2_requests:
logger.info('Request %s is %s and %s.',
req.id,
req.status.code,
req.state)
if req.instance_id:
successes_by_id[req.instance_id] = req.id
elif req.state != "open":
# return the request so we don't try again
instances[requests.index(req.id)] = req
if successes_by_id:
ec2_instances = self.retry_on_ec2_error(self.ec2.get_only_instances, list(successes_by_id.keys()))
if not ec2_instances:
raise EC2ManagerException('Failed to get instances [%s] for requests [%s]'
% (', '.join(successes_by_id.keys()),
', '.join(successes_by_id.values())))
if tags:
self.retry_on_ec2_error(self.ec2.create_tags, [instance.id for instance in ec2_instances], tags)
for req_id, instance in zip(successes_by_id.values(), ec2_instances):
instances[requests.index(req_id)] = instance
logger.info('%s is %s at %s (%s)',
instance.id,
instance.state,
instance.public_dns_name,
instance.ip_address)
return instances
def cancel_spot_requests(self, requests):
"""Cancel one or more EC2 spot instance requests.
:param requests: List of EC2 spot instance request IDs.
:type requests: list
"""
ec2_requests = self.retry_on_ec2_error(self.ec2.get_all_spot_instance_requests, request_ids=requests)
for req in ec2_requests:
req.cancel()
def create_spot(self,
price,
instance_type='default',
tags=None,
root_device_type='ebs',
size='default',
vol_type='gp2',
delete_on_termination=False,
timeout=None):
"""Create one or more EC2 spot instances.
:param root_device_type:
:param size:
:param vol_type:
:param delete_on_termination:
:param timeout:
:param price: Max price to pay for spot instance per hour.
:type price: float
:param instance_type: A section name in amazon.json
:type instance_type: str
:param tags:
:type tags: dict
:return: List of instances created
:rtype: list
"""
request_ids = self.create_spot_requests(price,
instance_type=instance_type,
root_device_type=root_device_type,
size=size,
vol_type=vol_type,
delete_on_termination=delete_on_termination)
instances = []
logger.info('Waiting on fulfillment of requested spot instances.')
poll_resolution = 5.0
time_exceeded = False
while request_ids:
time.sleep(poll_resolution)
new_instances = self.check_spot_requests(request_ids, tags=tags)
if timeout is not None:
timeout -= poll_resolution
time_exceeded = timeout <= 0
fulfilled = []
for idx, instance in enumerate(new_instances):
if instance.status.code == "bad-parameters":
logging.error('Spot request for "%s" failed due to bad parameters.', instance.id)
self.cancel_spot_requests([instance.id])
if instance is not None:
fulfilled.append(idx)
if isinstance(instance, boto.ec2.instance.Instance):
instances.append(instance)
for idx in reversed(fulfilled):
request_ids.pop(idx)
if request_ids and time_exceeded:
self.cancel_spot_requests(request_ids)
break
return instances
def _scale_down(self, instances, count):
"""Return a list of |count| last created instances by launch time.
:param instances: A list of instances.
:type instances: list
:param count: Number of instances to scale down.
:type count: integer
:return: List of instances to be scaled down.
:rtype: list
"""
i = sorted(instances, key=lambda i: i.launch_time, reverse=True)
if not i:
return []
running = len(i)
logger.info('%d instance/s are running.', running)
logger.info('Scaling down %d instances of those.', count)
if count > running:
logger.info('Scale-down value is > than running instance/s - using maximum of %d!', running)
count = running
return i[:count]
def _get_default_name_size(self, instance_type, size):
"""Checks if root device name/size were specified in the image definition.
:param instance_type: A section name in amazon.json.
:type instance_type: str
:param size:
:type size: int
:return: Root device name and size
:rtype: tuple(str, int)
"""
if 'root_size' in self.images[instance_type]:
size = self.images[instance_type].pop('root_size')
if 'root_device' in self.images[instance_type]:
name = self.images[instance_type].pop('root_device')
else:
name = '/dev/sda1'
return name, size
def _configure_ebs_volume(self, vol_type, name, size, delete_on_termination):
"""Sets the desired root EBS size, otherwise the default EC2 value is used.
:param vol_type: Type of EBS storage - gp2 (SSD), io1 or standard (magnetic)
:type vol_type: str
:param size: Desired root EBS size.
:type size: int
:param delete_on_termination: Toggle this flag to delete EBS volume on termination.
:type delete_on_termination: bool
:return: A BlockDeviceMapping object.
:rtype: object
"""
# From GitHub boto docs: http://git.io/veyDv
root_dev = boto.ec2.blockdevicemapping.BlockDeviceType()
root_dev.delete_on_termination = delete_on_termination
root_dev.volume_type = vol_type
if size != 'default':
root_dev.size = size # change root volume to desired size
bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping()
bdm[name] = root_dev
return bdm
def stop(self, instances, count=0):
"""Stop each provided running instance.
:param count:
:param instances: A list of instances.
:type instances: list
"""
if not instances:
return
if count > 0:
instances = self._scale_down(instances, count)
self.ec2.stop_instances([i.id for i in instances])
def terminate(self, instances, count=0):
"""Terminate each provided running or stopped instance.
:param count:
:param instances: A list of instances.
:type instances: list
"""
if not instances:
return
if count > 0:
instances = self._scale_down(instances, count)
self.ec2.terminate_instances([i.id for i in instances])
def find(self, instance_ids=None, filters=None):
"""Flatten list of reservations to a list of instances.
:param instance_ids: A list of instance ids to filter by
:type instance_ids: list
:param filters: A dict of Filter.N values defined in http://goo.gl/jYNej9
:type filters: dict
:return: A flattened list of filtered instances.
:rtype: list
"""
instances = []
reservations = self.retry_on_ec2_error(self.ec2.get_all_instances, instance_ids=instance_ids, filters=filters)
for reservation in reservations:
instances.extend(reservation.instances)
return instances
| MozillaSecurity/laniakea | laniakea/core/providers/ec2/manager.py | Python | mpl-2.0 | 15,959 |
# -*- coding: utf-8 -*-
# Read raw data
# Author: Jimmy Royer
# [email protected]
# June 20, 2016
import pandas as pd
# Training Sample -- All the Mutations
data = pd.read_csv("./input/str.csv")
# Create target variable
data['y'] = (data['dr'] == "r") * 1
data.drop('dr', axis=1, inplace=True)
# List of Features to Keep in the Analysis
features = [var for var in data.columns if var != "y"]
# List subset of Features
features_small = ["SNP_CN_781687_A128G_K43R_rpsL", "SNP_N_1472359_A514C_rrs", "SNP_CN_781822_A263C_K88T_rpsL", "SNP_N_1473246_A1401G_rrs",
"SNP_CN_781822_A263G_K88R_rpsL", "SNP_CN_4407809_C394A_D132Y_gid", "SNP_CN_4408156_A47C_L16R_gid", "SNP_N_1472358_C513T_rrs", "SNP_CN_4407927_T276G_E92D_gid",
"SNP_N_1472751_A906G_rrs", "SNP_CN_4407934_A269C_L90R_gid", "SNP_N_1472362_C517T_rrs", "SNP_N_1472753_A908C_rrs", "SNP_CN_781822_A263T_K88M_rpsL",
"SNP_CN_4407832_A371G_V124A_gid", "SNP_CN_4408091_G112T_P38T_gid", "SNP_I_1473637_A21G_inter_rrs_rrl", "SNP_CN_4408094_C109T_G37R_gid", "DEL_CF_4407640_d563A_188_gid",
"SNP_N_1473109_T1264G_rrs", "SNP_CN_4407967_A236C_L79W_gid", "SNP_CN_4407967_A236G_L79S_gid", "SNP_CN_4407768_C435A_L145F_gid", "SNP_CN_4407995_T208G_S70R_gid",
"DEL_CF_4407852_d351C_117_gid", "SNP_N_1473167_T1322G_rrs", "DEL_CF_4408023_d180T_60_gid", "DEL_CF_4408116_d87G_29_gid", "SNP_CN_4408060_T143G_H48P_gid",
"SNP_CN_4408138_T65C_Y22C_gid", "SNP_CN_4408064_G139A_R47W_gid", "SNP_CN_4408148_C55G_A19P_gid", "SNP_CN_4407947_G256A_L86F_gid", "SNP_CN_4407916_C287A_R96L_gid",
"SNP_CN_4407748_A455G_L152S_gid", "SNP_N_1473343_G1498T_rrs", "SNP_N_1472337_C492T_rrs", "SNP_CN_4407985_C218G_G73A_gid", "SNP_CN_4408102_C101T_G34E_gid"]
| IQSS/gentb-site | R/Neural_Network/program/Load_Data_STR.py | Python | agpl-3.0 | 1,728 |
from rest_framework import serializers
from api.generics.serializers import DynamicFieldsModelSerializer
from iati.models import Activity
from traceability import models as chain_models
class SimpleActivitySerializer(serializers.ModelSerializer):
class Meta:
model = Activity
fields = (
'id',
'iati_identifier',
)
class ChainNodeSerializer(DynamicFieldsModelSerializer):
activity = SimpleActivitySerializer()
class Meta:
model = chain_models.ChainNode
fields = (
'id',
'chain',
'activity',
'activity_oipa_id',
'activity_iati_id',
'tier',
'bol',
'eol'
)
class ChainSerializer(DynamicFieldsModelSerializer):
links = serializers.HyperlinkedIdentityField(
read_only=True,
view_name='chains:chain-link-list',
)
errors = serializers.HyperlinkedIdentityField(
read_only=True,
view_name='chains:chain-error-list',
)
activities = serializers.HyperlinkedIdentityField(
read_only=True,
view_name='chains:chain-activity-list',
)
transactions = serializers.HyperlinkedIdentityField(
read_only=True,
view_name='chains:chain-transaction-list',
)
nodes = serializers.HyperlinkedIdentityField(
read_only=True,
view_name='chains:chain-node-list',
)
url = serializers.HyperlinkedIdentityField(
view_name='chains:chain-detail', read_only=True
)
class Meta:
model = chain_models.Chain
fields = (
'id',
'url',
'name',
'last_updated',
'links',
'errors',
'activities',
'transactions',
'nodes'
)
class ChainLinkRelationSerializer(serializers.ModelSerializer):
class Meta:
model = chain_models.ChainLinkRelation
fields = (
'relation',
'from_node',
'related_id'
)
class ChainLinkSerializer(DynamicFieldsModelSerializer):
start_node = ChainNodeSerializer(
fields=(
'id',
'activity_oipa_id',
'activity_iati_id',
'tier',
'eol',
'bol'))
end_node = ChainNodeSerializer(
fields=(
'id',
'activity_oipa_id',
'activity_iati_id',
'tier',
'eol',
'bol'))
relations = ChainLinkRelationSerializer(many=True)
class Meta:
model = chain_models.ChainLink
fields = (
'id',
'start_node',
'end_node',
'relations'
)
class ChainNodeErrorSerializer(DynamicFieldsModelSerializer):
class Meta:
model = chain_models.ChainNodeError
fields = (
'chain_node',
'error_type',
'mentioned_activity_or_org',
'related_id',
'warning_level'
)
| openaid-IATI/OIPA | OIPA/api/chain/serializers.py | Python | agpl-3.0 | 3,050 |
from django.db import models
import pandas as pd
from qPRC.lib.parser import parse
from qPRC.lib.fitter import fit as _fit
from qPRC.lib.fitter import slice as _slice
class Dataset(models.Model):
file = models.FileField(upload_to='files/%Y/%m/%d')
def __str__(self):
return str(self.file)
def data(self):
"""return the parsed data from the Dataset file"""
try:
return self._data
except AttributeError:
self._data = parse(self.file.url)
return self._data
def fit(self, well, dye):
"""return the fitted model of the concentrations"""
params = _fit(self.data(), well, dye)
return params
# TODO: method to return the time series of fitted values
def fitted_values(self, well, dye):
params = self.fit(well, dye)
concentrations = _slice(data.loc[:,(well, dye)])
times = list(range(1, len(concentrations) + 1))
fitted_vals = f(times, *params)
s = pd.Series(fitted_vals, times)
return fitted_vals
| dpelc/qPRC | qPRC/apps/datasets/models.py | Python | agpl-3.0 | 1,059 |
# -*- coding: utf-8 -*-
# (c) 2017 Diagram Software S.L.
# Copyright 2017 Ignacio Ibeas <[email protected]>
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from . import aeat_certificate_password
| factorlibre/l10n-spain | l10n_es_aeat_certificate/wizards/__init__.py | Python | agpl-3.0 | 215 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-07 19:06
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("wizard_builder", "0020_choice_extra_info_text")]
operations = [
migrations.CreateModel(
name="ChoiceOption",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.TextField()),
(
"question",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="wizard_builder.Choice",
),
),
],
)
]
| project-callisto/callisto-core | callisto_core/wizard_builder/migrations/0021_choiceoption.py | Python | agpl-3.0 | 1,029 |
from queue import Queue
from bears.python.PyStringConcatBear import PyStringConcatBear
from coalib.testing.LocalBearTestHelper import LocalBearTestHelper
from coalib.results.Result import Result
from coalib.settings.Section import Section
valid_string = """
string = 'foo'
'bar'
"""
concat_non_string = """
list = [] +
[]
"""
invalid_single_quote_string = """
string = 'foo' +
'bar'
"""
invalid_double_quote_string = """
string = "foo" +
"bar"
"""
invalid_acute_string = """
string = `foo` +
`bar`
"""
class PyStringConcatBearTest(LocalBearTestHelper):
def setUp(self):
self.uut = PyStringConcatBear(Section('name'), Queue())
def test_valid(self):
self.check_validity(self.uut, valid_string.splitlines())
def test_non_string(self):
self.check_validity(self.uut, concat_non_string.splitlines())
def test_single_quote_string_invalid(self):
self.check_results(
self.uut,
invalid_single_quote_string.splitlines(),
[Result.from_values(
'PyStringConcatBear',
'Use of explicit string concatenation with `+` '
'should be avoided.',
line=2, column=16, end_line=3, end_column=17, file='default')
],
filename='default'
)
def test_double_quote_string_invalid(self):
self.check_results(
self.uut,
invalid_double_quote_string.splitlines(),
[Result.from_values(
'PyStringConcatBear',
'Use of explicit string concatenation with `+` '
'should be avoided.',
line=2, column=16, end_line=3, end_column=17, file='default')
],
filename='default'
)
def test_acute_string_invalid(self):
self.check_results(
self.uut,
invalid_acute_string.splitlines(),
[Result.from_values(
'PyStringConcatBear',
'Use of explicit string concatenation with `+` '
'should be avoided.',
line=2, column=16, end_line=3, end_column=17, file='default')
],
filename='default'
)
| coala/coala-bears | tests/python/PyStringConcatBearTest.py | Python | agpl-3.0 | 2,269 |
from .models import ExtendedRegisterForm
| yewsiang/botmother | app/auth/__init__.py | Python | agpl-3.0 | 41 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2016 Jesús Espino <[email protected]>
# Copyright (C) 2014-2016 David Barragán <[email protected]>
# Copyright (C) 2014-2016 Alejandro Alonso <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.apps import AppConfig
from django.apps import apps
from django.db.models import signals
def connect_userstories_signals():
from taiga.projects.tagging import signals as tagging_handlers
from . import signals as handlers
# When deleting user stories we must disable task signals while delating and
# enabling them in the end
signals.pre_delete.connect(handlers.disable_task_signals,
sender=apps.get_model("userstories", "UserStory"),
dispatch_uid='disable_task_signals')
signals.post_delete.connect(handlers.enable_tasks_signals,
sender=apps.get_model("userstories", "UserStory"),
dispatch_uid='enable_tasks_signals')
# Cached prev object version
signals.pre_save.connect(handlers.cached_prev_us,
sender=apps.get_model("userstories", "UserStory"),
dispatch_uid="cached_prev_us")
# Role Points
signals.post_save.connect(handlers.update_role_points_when_create_or_edit_us,
sender=apps.get_model("userstories", "UserStory"),
dispatch_uid="update_role_points_when_create_or_edit_us")
# Tasks
signals.post_save.connect(handlers.update_milestone_of_tasks_when_edit_us,
sender=apps.get_model("userstories", "UserStory"),
dispatch_uid="update_milestone_of_tasks_when_edit_us")
# Open/Close US and Milestone
signals.post_save.connect(handlers.try_to_close_or_open_us_and_milestone_when_create_or_edit_us,
sender=apps.get_model("userstories", "UserStory"),
dispatch_uid="try_to_close_or_open_us_and_milestone_when_create_or_edit_us")
signals.post_delete.connect(handlers.try_to_close_milestone_when_delete_us,
sender=apps.get_model("userstories", "UserStory"),
dispatch_uid="try_to_close_milestone_when_delete_us")
# Tags
signals.pre_save.connect(tagging_handlers.tags_normalization,
sender=apps.get_model("userstories", "UserStory"),
dispatch_uid="tags_normalization_user_story")
def connect_userstories_custom_attributes_signals():
from taiga.projects.custom_attributes import signals as custom_attributes_handlers
signals.post_save.connect(custom_attributes_handlers.create_custom_attribute_value_when_create_user_story,
sender=apps.get_model("userstories", "UserStory"),
dispatch_uid="create_custom_attribute_value_when_create_user_story")
def connect_all_userstories_signals():
connect_userstories_signals()
connect_userstories_custom_attributes_signals()
def disconnect_userstories_signals():
signals.pre_save.disconnect(sender=apps.get_model("userstories", "UserStory"),
dispatch_uid="cached_prev_us")
signals.post_save.disconnect(sender=apps.get_model("userstories", "UserStory"),
dispatch_uid="update_role_points_when_create_or_edit_us")
signals.post_save.disconnect(sender=apps.get_model("userstories", "UserStory"),
dispatch_uid="update_milestone_of_tasks_when_edit_us")
signals.post_save.disconnect(sender=apps.get_model("userstories", "UserStory"),
dispatch_uid="try_to_close_or_open_us_and_milestone_when_create_or_edit_us")
signals.post_delete.disconnect(sender=apps.get_model("userstories", "UserStory"),
dispatch_uid="try_to_close_milestone_when_delete_us")
signals.pre_save.disconnect(sender=apps.get_model("userstories", "UserStory"),
dispatch_uid="tags_normalization_user_story")
def disconnect_userstories_custom_attributes_signals():
signals.post_save.disconnect(sender=apps.get_model("userstories", "UserStory"),
dispatch_uid="create_custom_attribute_value_when_create_user_story")
def disconnect_all_userstories_signals():
disconnect_userstories_signals()
disconnect_userstories_custom_attributes_signals()
class UserStoriesAppConfig(AppConfig):
name = "taiga.projects.userstories"
verbose_name = "User Stories"
def ready(self):
connect_all_userstories_signals()
| xdevelsistemas/taiga-back-community | taiga/projects/userstories/apps.py | Python | agpl-3.0 | 5,467 |
# -*- coding: utf-8 -*-
# Kuulemma
# Copyright (C) 2014, Fast Monkeys Oy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from flask import abort, Blueprint, jsonify, request
from flask.ext.login import current_user
from kuulemma.extensions import db
from kuulemma.models import Comment, Hearing
from kuulemma.models.comment import COMMENTABLE_TYPES
from kuulemma.schemas import CommentSchema
comment = Blueprint(
name='comment',
import_name=__name__,
url_prefix='/hearings/<int:hearing_id>/links/comments'
)
@comment.route('')
def index(hearing_id):
hearing = Hearing.query.get_or_404(hearing_id)
# Fetching comments.
comments = (
hearing
.all_comments
.options(db.joinedload(Comment.comment))
.options(db.joinedload(Comment.image))
.options(db.joinedload(Comment.alternative))
.options(db.joinedload(Comment.section))
.options(db.joinedload(Comment.question))
)
if not (
current_user.is_authenticated() and
(current_user.is_official or current_user.is_admin)
):
comments = comments.filter_by(is_hidden=False)
# Pagination.
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', 20, type=int)
order_by = request.args.get('order_by', 'created_at')
if order_by == 'like_count':
comments = (
comments
.filter(Comment.like_count > 0)
.order_by(
db.desc(Comment.like_count),
db.desc(Comment.id)
)
)
else:
comments = comments.order_by(db.desc(Comment.created_at))
pagination = comments.paginate(page, per_page)
# Serialization
serialized = CommentSchema(
pagination.items,
exclude=('object_type', 'object_id'),
many=True
)
return jsonify({
'comments': serialized.data,
'page': page,
'per_page': per_page
}), 200
@comment.route('', methods=['POST'])
def create(hearing_id):
hearing = Hearing.query.get_or_404(hearing_id)
schema = CommentSchema()
data, errors = schema.load(request.get_json())
if errors:
return jsonify({'error': errors}), 400
if not hearing.is_open:
return jsonify({'error': 'The hearing is no longer open.'}), 400
if is_spam(request.get_json()):
abort(400)
commented_object = (
COMMENTABLE_TYPES[data['object_type']].query
.get(int(data['object_id']))
)
if not commented_object:
return jsonify(
{'error': 'The target of this comment was not found.'}
), 400
# TODO: Check that the commented object belongs to the hearing.
comment = Comment(
title=data['title'],
body=data['body'],
username=data['username']
)
setattr(comment, data['object_type'], commented_object)
db.session.add(comment)
db.session.commit()
return jsonify({'comments': CommentSchema(comment).data}), 201
@comment.route('/<int:comment_id>', methods=['PUT'])
def update(hearing_id, comment_id):
if not (
current_user.is_authenticated() and
(current_user.is_official or current_user.is_admin)
):
abort(401)
Hearing.query.get_or_404(hearing_id)
comment = Comment.query.get_or_404(comment_id)
if not request.get_json() or is_spam(request.get_json()):
abort(400)
schema = CommentSchema(
only=('title', 'body', 'username', 'is_hidden')
)
data, errors = schema.load(request.get_json())
if errors:
return jsonify({'error': errors}), 400
comment.title = data['title']
comment.body = data['body']
comment.username = data['username']
comment.is_hidden = data['is_hidden']
comment.updated_at = datetime.utcnow()
db.session.commit()
serialized = CommentSchema(
comment,
exclude=('object_type', 'object_id')
)
return jsonify({'comment': serialized.data}), 200
def is_spam(json):
return json.get('hp') is not None
| fastmonkeys/kuulemma | kuulemma/views/comment.py | Python | agpl-3.0 | 4,693 |
"""
Content Type Gating Configuration Models
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from course_modes.models import CourseMode
from lms.djangoapps.courseware.masquerade import (
get_course_masquerade,
get_masquerading_user_group,
is_masquerading_as_specific_student,
)
from openedx.core.djangoapps.config_model_utils.models import StackedConfigurationModel
from openedx.core.djangoapps.config_model_utils.utils import is_in_holdback
from openedx.features.content_type_gating.helpers import FULL_ACCESS, LIMITED_ACCESS
from openedx.features.course_duration_limits.config import (
CONTENT_TYPE_GATING_FLAG,
FEATURE_BASED_ENROLLMENT_GLOBAL_KILL_FLAG,
)
from student.models import CourseEnrollment
from student.role_helpers import has_staff_roles
from xmodule.partitions.partitions import ENROLLMENT_TRACK_PARTITION_ID
@python_2_unicode_compatible
class ContentTypeGatingConfig(StackedConfigurationModel):
"""
A ConfigurationModel used to manage configuration for Content Type Gating (Feature Based Enrollments).
"""
STACKABLE_FIELDS = ('enabled', 'enabled_as_of', 'studio_override_enabled')
enabled_as_of = models.DateTimeField(
default=None,
null=True,
verbose_name=_('Enabled As Of'),
blank=True,
help_text=_(
'If the configuration is Enabled, then all enrollments '
'created after this date and time (UTC) will be affected.'
)
)
studio_override_enabled = models.NullBooleanField(
default=None,
verbose_name=_('Studio Override Enabled'),
blank=True,
help_text=_(
'Allow Feature Based Enrollment visibility to be overriden '
'on a per-component basis in Studio.'
)
)
@classmethod
def has_full_access_role_in_masquerade(cls, user, course_key, course_masquerade, student_masquerade,
user_partition):
"""
The roles of the masquerade user are used to determine whether the content gate displays.
The gate will not appear if the masquerade user has any of the following roles:
Staff, Instructor, Beta Tester, Forum Community TA, Forum Group Moderator, Forum Moderator, Forum Administrator
"""
if student_masquerade:
# If a request is masquerading as a specific user, the user variable will represent the correct user.
if user and user.id and has_staff_roles(user, course_key):
return True
elif user_partition:
# If the current user is masquerading as a generic student in a specific group,
# then return the value based on that group.
masquerade_group = get_masquerading_user_group(course_key, user, user_partition)
if masquerade_group is None:
audit_mode_id = settings.COURSE_ENROLLMENT_MODES.get(CourseMode.AUDIT, {}).get('id')
# We are checking the user partition id here because currently content
# cannot have both the enrollment track partition and content gating partition
# configured simultaneously. We may change this in the future and allow
# configuring both partitions on content and selecting both partitions in masquerade.
if course_masquerade.user_partition_id == ENROLLMENT_TRACK_PARTITION_ID:
return course_masquerade.group_id != audit_mode_id
elif masquerade_group is FULL_ACCESS:
return True
elif masquerade_group is LIMITED_ACCESS:
return False
@classmethod
def enabled_for_enrollment(cls, enrollment=None, user=None, course_key=None, user_partition=None):
"""
Return whether Content Type Gating is enabled for this enrollment.
Content Type Gating is enabled for an enrollment if it is enabled for
the course being enrolled in (either specifically, or via a containing context,
such as the org, site, or globally), and if the configuration is specified to be
``enabled_as_of`` before the enrollment was created.
Only one of enrollment and (user, course_key) may be specified at a time.
Arguments:
enrollment: The enrollment being queried.
user: The user being queried.
course_key: The CourseKey of the course being queried.
"""
if FEATURE_BASED_ENROLLMENT_GLOBAL_KILL_FLAG.is_enabled():
return False
if CONTENT_TYPE_GATING_FLAG.is_enabled():
return True
if enrollment is not None and (user is not None or course_key is not None):
raise ValueError('Specify enrollment or user/course_key, but not both')
if enrollment is None and (user is None or course_key is None):
raise ValueError('Both user and course_key must be specified if no enrollment is provided')
if enrollment is None and user is None and course_key is None:
raise ValueError('At least one of enrollment or user and course_key must be specified')
if course_key is None:
course_key = enrollment.course_id
if enrollment is None:
enrollment = CourseEnrollment.get_enrollment(user, course_key)
if user is None and enrollment is not None:
user = enrollment.user
course_masquerade = get_course_masquerade(user, course_key)
no_masquerade = course_masquerade is None
student_masquerade = is_masquerading_as_specific_student(user, course_key)
user_variable_represents_correct_user = (no_masquerade or student_masquerade)
if course_masquerade:
if cls.has_full_access_role_in_masquerade(user, course_key, course_masquerade, student_masquerade,
user_partition):
return False
# When a request is not in a masquerade state the user variable represents the correct user.
elif user and user.id and has_staff_roles(user, course_key):
return False
# check if user is in holdback
if user_variable_represents_correct_user and is_in_holdback(user):
return False
# enrollment might be None if the user isn't enrolled. In that case,
# return enablement as if the user enrolled today
# Also, ignore enrollment creation date if the user is masquerading.
if enrollment is None or course_masquerade:
return cls.enabled_for_course(course_key=course_key, target_datetime=timezone.now())
else:
current_config = cls.current(course_key=enrollment.course_id)
return current_config.enabled_as_of_datetime(target_datetime=enrollment.created)
@classmethod
def enabled_for_course(cls, course_key, target_datetime=None):
"""
Return whether Content Type Gating is enabled for this course as of a particular date.
Content Type Gating is enabled for a course on a date if it is enabled either specifically,
or via a containing context, such as the org, site, or globally, and if the configuration
is specified to be ``enabled_as_of`` before ``target_datetime``.
Only one of enrollment and (user, course_key) may be specified at a time.
Arguments:
course_key: The CourseKey of the course being queried.
target_datetime: The datetime to checked enablement as of. Defaults to the current date and time.
"""
if FEATURE_BASED_ENROLLMENT_GLOBAL_KILL_FLAG.is_enabled():
return False
if CONTENT_TYPE_GATING_FLAG.is_enabled():
return True
if target_datetime is None:
target_datetime = timezone.now()
current_config = cls.current(course_key=course_key)
return current_config.enabled_as_of_datetime(target_datetime=target_datetime)
def clean(self):
if self.enabled and self.enabled_as_of is None:
raise ValidationError({'enabled_as_of': _('enabled_as_of must be set when enabled is True')})
def enabled_as_of_datetime(self, target_datetime):
"""
Return whether this Content Type Gating configuration context is enabled as of a date and time.
Arguments:
target_datetime (:class:`datetime.datetime`): The datetime that ``enabled_as_of`` must be equal to or before
"""
if FEATURE_BASED_ENROLLMENT_GLOBAL_KILL_FLAG.is_enabled():
return False
if CONTENT_TYPE_GATING_FLAG.is_enabled():
return True
# Explicitly cast this to bool, so that when self.enabled is None the method doesn't return None
return bool(self.enabled and self.enabled_as_of <= target_datetime)
def __str__(self):
return "ContentTypeGatingConfig(enabled={!r}, enabled_as_of={!r}, studio_override_enabled={!r})".format(
self.enabled,
self.enabled_as_of,
self.studio_override_enabled,
)
| a-parhom/edx-platform | openedx/features/content_type_gating/models.py | Python | agpl-3.0 | 9,344 |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Unit tests for Extended Temporal Memory.
"""
import tempfile
import unittest
from nupic.data.generators.pattern_machine import PatternMachine
from nupic.data.generators.sequence_machine import SequenceMachine
from htmresearch.algorithms.extended_temporal_memory import ExtendedTemporalMemory
# No serialization for now, skip corresponding tests
capnp = None
class ExtendedTemporalMemoryTest(unittest.TestCase):
def setUp(self):
self.tm = ExtendedTemporalMemory(learnOnOneCell=False)
def testInitInvalidParams(self):
# Invalid columnDimensions
kwargs = {"columnDimensions": [], "cellsPerColumn": 32}
self.assertRaises(ValueError, ExtendedTemporalMemory, **kwargs)
# Invalid cellsPerColumn
kwargs = {"columnDimensions": [2048], "cellsPerColumn": 0}
self.assertRaises(ValueError, ExtendedTemporalMemory, **kwargs)
kwargs = {"columnDimensions": [2048], "cellsPerColumn": -10}
self.assertRaises(ValueError, ExtendedTemporalMemory, **kwargs)
def testlearnOnOneCellParam(self):
tm = self.tm
self.assertFalse(tm.learnOnOneCell)
tm = ExtendedTemporalMemory(learnOnOneCell=True)
self.assertTrue(tm.learnOnOneCell)
def testActivateCorrectlyPredictiveCells(self):
tm = self.tm
prevPredictiveCells = set([0, 237, 1026, 26337, 26339, 55536])
activeColumns = set([32, 47, 823])
prevMatchingCells = set()
(activeCells,
winnerCells,
predictedColumns,
predictedInactiveCells) = tm.activateCorrectlyPredictiveCells(prevPredictiveCells,
prevMatchingCells,
activeColumns)
self.assertEqual(activeCells, set([1026, 26337, 26339]))
self.assertEqual(winnerCells, set([1026, 26337, 26339]))
self.assertEqual(predictedColumns, set([32, 823]))
self.assertEqual(predictedInactiveCells, set())
def testActivateCorrectlyPredictiveCellsEmpty(self):
tm = self.tm
# No previous predictive cells, no active columns
prevPredictiveCells = set()
activeColumns = set()
prevMatchingCells = set()
(activeCells,
winnerCells,
predictedColumns,
predictedInactiveCells) = tm.activateCorrectlyPredictiveCells(prevPredictiveCells,
prevMatchingCells,
activeColumns)
self.assertEqual(activeCells, set())
self.assertEqual(winnerCells, set())
self.assertEqual(predictedColumns, set())
self.assertEqual(predictedInactiveCells, set())
# No previous predictive cells, with active columns
prevPredictiveCells = set()
activeColumns = set([32, 47, 823])
prevMatchingCells = set()
(activeCells,
winnerCells,
predictedColumns,
predictedInactiveCells) = tm.activateCorrectlyPredictiveCells(prevPredictiveCells,
prevMatchingCells,
activeColumns)
self.assertEqual(activeCells, set())
self.assertEqual(winnerCells, set())
self.assertEqual(predictedColumns, set())
self.assertEqual(predictedInactiveCells, set())
# No active columns, with previously predictive cells
prevPredictiveCells = set([0, 237, 1026, 26337, 26339, 55536])
activeColumns = set()
prevMatchingCells = set()
(activeCells,
winnerCells,
predictedColumns,
predictedInactiveCells) = tm.activateCorrectlyPredictiveCells(prevPredictiveCells,
prevMatchingCells,
activeColumns)
self.assertEqual(activeCells, set())
self.assertEqual(winnerCells, set())
self.assertEqual(predictedColumns, set())
self.assertEqual(predictedInactiveCells, set())
def testActivateCorrectlyPredictiveCellsOrphan(self):
tm = self.tm
tm.predictedSegmentDecrement = 0.001
prevPredictiveCells = set([])
activeColumns = set([32, 47, 823])
prevMatchingCells = set([32, 47])
(activeCells,
winnerCells,
predictedColumns,
predictedInactiveCells) = tm.activateCorrectlyPredictiveCells(prevPredictiveCells,
prevMatchingCells,
activeColumns)
self.assertEqual(activeCells, set([]))
self.assertEqual(winnerCells, set([]))
self.assertEqual(predictedColumns, set([]))
self.assertEqual(predictedInactiveCells, set([32,47]))
def testBurstColumns(self):
tm = ExtendedTemporalMemory(
cellsPerColumn=4,
connectedPermanence=0.50,
minThreshold=1,
seed=42
)
connections = tm.connections
connections.createSegment(0)
connections.createSynapse(0, 23, 0.6)
connections.createSynapse(0, 37, 0.4)
connections.createSynapse(0, 477, 0.9)
connections.createSegment(0)
connections.createSynapse(1, 49, 0.9)
connections.createSynapse(1, 3, 0.8)
connections.createSegment(1)
connections.createSynapse(2, 733, 0.7)
connections.createSegment(108)
connections.createSynapse(3, 486, 0.9)
activeColumns = set([0, 1, 26])
predictedColumns = set([26])
prevActiveCells = set([23, 37, 49, 733])
prevWinnerCells = set([23, 37, 49, 733])
prevActiveApicalCells = set()
learnOnOneCell = False
chosenCellForColumn = {}
(activeCells,
winnerCells,
learningSegments,
apicalLearningSegments,
chosenCellForColumn) = tm.burstColumns(activeColumns,
predictedColumns,
prevActiveCells,
prevActiveApicalCells,
prevWinnerCells,
learnOnOneCell,
chosenCellForColumn,
connections,
tm.apicalConnections)
self.assertEqual(activeCells, set([0, 1, 2, 3, 4, 5, 6, 7]))
randomWinner = 4 # 4 should be randomly chosen cell
self.assertEqual(winnerCells, set([0, randomWinner]))
self.assertEqual(learningSegments, set([0, 4])) # 4 is new segment created
# Check that new segment was added to winner cell (6) in column 1
self.assertEqual(connections.segmentsForCell(randomWinner), set([4]))
def testBurstColumnsEmpty(self):
tm = self.tm
activeColumns = set()
predictedColumns = set()
prevActiveCells = set()
prevWinnerCells = set()
connections = tm.connections
prevActiveApicalCells = set()
learnOnOneCell = False
chosenCellForColumn = {}
(activeCells,
winnerCells,
learningSegments,
apicalLearningSegments,
chosenCellForColumn) = tm.burstColumns(activeColumns,
predictedColumns,
prevActiveCells,
prevActiveApicalCells,
prevWinnerCells,
learnOnOneCell,
chosenCellForColumn,
connections,
tm.apicalConnections)
self.assertEqual(activeCells, set())
self.assertEqual(winnerCells, set())
self.assertEqual(learningSegments, set())
self.assertEqual(apicalLearningSegments, set())
def testLearnOnSegments(self):
tm = ExtendedTemporalMemory(maxNewSynapseCount=2)
connections = tm.connections
connections.createSegment(0)
connections.createSynapse(0, 23, 0.6)
connections.createSynapse(0, 37, 0.4)
connections.createSynapse(0, 477, 0.9)
connections.createSegment(1)
connections.createSynapse(1, 733, 0.7)
connections.createSegment(8)
connections.createSynapse(2, 486, 0.9)
connections.createSegment(100)
prevActiveSegments = set([0, 2])
learningSegments = set([1, 3])
prevActiveCells = set([23, 37, 733])
winnerCells = set([0])
prevWinnerCells = set([10, 11, 12, 13, 14])
predictedInactiveCells = set()
prevMatchingSegments = set()
tm.learnOnSegments(prevActiveSegments,
learningSegments,
prevActiveCells,
winnerCells,
prevWinnerCells,
connections,
predictedInactiveCells,
prevMatchingSegments)
# Check segment 0
synapseData = connections.dataForSynapse(0)
self.assertAlmostEqual(synapseData.permanence, 0.7)
synapseData = connections.dataForSynapse(1)
self.assertAlmostEqual(synapseData.permanence, 0.5)
synapseData = connections.dataForSynapse(2)
self.assertAlmostEqual(synapseData.permanence, 0.8)
# Check segment 1
synapseData = connections.dataForSynapse(3)
self.assertAlmostEqual(synapseData.permanence, 0.8)
self.assertEqual(len(connections.synapsesForSegment(1)), 2)
# Check segment 2
synapseData = connections.dataForSynapse(4)
self.assertAlmostEqual(synapseData.permanence, 0.9)
self.assertEqual(len(connections.synapsesForSegment(2)), 1)
# Check segment 3
self.assertEqual(len(connections.synapsesForSegment(3)), 2)
def testComputePredictiveCells(self):
tm = ExtendedTemporalMemory(
activationThreshold=2,
minThreshold=2,
predictedSegmentDecrement=0.004
)
connections = tm.connections
connections.createSegment(0)
connections.createSynapse(0, 23, 0.6)
connections.createSynapse(0, 37, 0.5)
connections.createSynapse(0, 477, 0.9)
connections.createSegment(1)
connections.createSynapse(1, 733, 0.7)
connections.createSynapse(1, 733, 0.4)
connections.createSegment(1)
connections.createSynapse(2, 974, 0.9)
connections.createSegment(8)
connections.createSynapse(3, 486, 0.9)
connections.createSegment(100)
activeCells = set([23, 37, 733, 974])
(activeSegments,
predictiveCells,
matchingSegments,
matchingCells) = tm.computePredictiveCells(activeCells, connections)
self.assertEqual(activeSegments, set([0]))
self.assertEqual(predictiveCells, set([0]))
self.assertEqual(matchingSegments, set([0,1]))
self.assertEqual(matchingCells, set([0,1]))
def testBestMatchingCell(self):
tm = ExtendedTemporalMemory(
connectedPermanence=0.50,
minThreshold=1,
seed=42
)
connections = tm.connections
connections.createSegment(0)
connections.createSynapse(0, 23, 0.6)
connections.createSynapse(0, 37, 0.4)
connections.createSynapse(0, 477, 0.9)
connections.createSegment(0)
connections.createSynapse(1, 49, 0.9)
connections.createSynapse(1, 3, 0.8)
connections.createSegment(1)
connections.createSynapse(2, 733, 0.7)
connections.createSegment(108)
connections.createSynapse(3, 486, 0.9)
activeCells = set([23, 37, 49, 733])
activeApicalCells = set()
self.assertEqual(tm.bestMatchingCell(tm.cellsForColumn(0),
activeCells,
activeApicalCells,
connections,
tm.apicalConnections),
(0, 0, None))
self.assertEqual(tm.bestMatchingCell(tm.cellsForColumn(3),
activeCells,
activeApicalCells,
connections,
tm.apicalConnections),
(103, None, None)) # Random cell from column
self.assertEqual(tm.bestMatchingCell(tm.cellsForColumn(999),
activeCells,
activeApicalCells,
connections,
tm.apicalConnections),
(31979, None, None)) # Random cell from column
def testBestMatchingCellFewestSegments(self):
tm = ExtendedTemporalMemory(
columnDimensions=[2],
cellsPerColumn=2,
connectedPermanence=0.50,
minThreshold=1,
seed=42
)
connections = tm.connections
connections.createSegment(0)
connections.createSynapse(0, 3, 0.3)
activeSynapsesForSegment = set([])
activeApicalCells = set()
for _ in range(100):
# Never pick cell 0, always pick cell 1
(cell, _, _) = tm.bestMatchingCell(tm.cellsForColumn(0),
activeSynapsesForSegment,
activeApicalCells,
connections,
tm.apicalConnections)
self.assertEqual(cell, 1)
def testBestMatchingSegment(self):
tm = ExtendedTemporalMemory(
connectedPermanence=0.50,
minThreshold=1
)
connections = tm.connections
connections.createSegment(0)
connections.createSynapse(0, 23, 0.6)
connections.createSynapse(0, 37, 0.4)
connections.createSynapse(0, 477, 0.9)
connections.createSegment(0)
connections.createSynapse(1, 49, 0.9)
connections.createSynapse(1, 3, 0.8)
connections.createSegment(1)
connections.createSynapse(2, 733, 0.7)
connections.createSegment(8)
connections.createSynapse(3, 486, 0.9)
activeCells = set([23, 37, 49, 733])
self.assertEqual(tm.bestMatchingSegment(0,
activeCells,
connections),
(0, 2))
self.assertEqual(tm.bestMatchingSegment(1,
activeCells,
connections),
(2, 1))
self.assertEqual(tm.bestMatchingSegment(8,
activeCells,
connections),
(None, None))
self.assertEqual(tm.bestMatchingSegment(100,
activeCells,
connections),
(None, None))
def testLeastUsedCell(self):
tm = ExtendedTemporalMemory(
columnDimensions=[2],
cellsPerColumn=2,
seed=42
)
connections = tm.connections
connections.createSegment(0)
connections.createSynapse(0, 3, 0.3)
for _ in range(100):
# Never pick cell 0, always pick cell 1
self.assertEqual(tm.leastUsedCell(tm.cellsForColumn(0),
connections),
1)
def testAdaptSegment(self):
tm = self.tm
connections = tm.connections
connections.createSegment(0)
connections.createSynapse(0, 23, 0.6)
connections.createSynapse(0, 37, 0.4)
connections.createSynapse(0, 477, 0.9)
tm.adaptSegment(0, set([0, 1]), connections,
tm.permanenceIncrement,
tm.permanenceDecrement)
synapseData = connections.dataForSynapse(0)
self.assertAlmostEqual(synapseData.permanence, 0.7)
synapseData = connections.dataForSynapse(1)
self.assertAlmostEqual(synapseData.permanence, 0.5)
synapseData = connections.dataForSynapse(2)
self.assertAlmostEqual(synapseData.permanence, 0.8)
def testAdaptSegmentToMax(self):
tm = self.tm
connections = tm.connections
connections.createSegment(0)
connections.createSynapse(0, 23, 0.9)
tm.adaptSegment(0, set([0]), connections,
tm.permanenceIncrement,
tm.permanenceDecrement)
synapseData = connections.dataForSynapse(0)
self.assertAlmostEqual(synapseData.permanence, 1.0)
# Now permanence should be at max
tm.adaptSegment(0, set([0]), connections,
tm.permanenceIncrement,
tm.permanenceDecrement)
synapseData = connections.dataForSynapse(0)
self.assertAlmostEqual(synapseData.permanence, 1.0)
def testAdaptSegmentToMin(self):
tm = self.tm
connections = tm.connections
connections.createSegment(0)
connections.createSynapse(0, 23, 0.1)
tm.adaptSegment(0, set(), connections,
tm.permanenceIncrement,
tm.permanenceDecrement)
synapses = connections.synapsesForSegment(0)
self.assertFalse(0 in synapses)
def testPickCellsToLearnOn(self):
tm = ExtendedTemporalMemory(seed=42)
connections = tm.connections
connections.createSegment(0)
winnerCells = set([4, 47, 58, 93])
self.assertEqual(tm.pickCellsToLearnOn(2, 0, winnerCells, connections),
set([4, 93])) # randomly picked
self.assertEqual(tm.pickCellsToLearnOn(100, 0, winnerCells, connections),
set([4, 47, 58, 93]))
self.assertEqual(tm.pickCellsToLearnOn(0, 0, winnerCells, connections),
set())
def testPickCellsToLearnOnAvoidDuplicates(self):
tm = ExtendedTemporalMemory(seed=42)
connections = tm.connections
connections.createSegment(0)
connections.createSynapse(0, 23, 0.6)
winnerCells = set([23])
# Ensure that no additional (duplicate) cells were picked
self.assertEqual(tm.pickCellsToLearnOn(2, 0, winnerCells, connections),
set())
def testColumnForCell1D(self):
tm = ExtendedTemporalMemory(
columnDimensions=[2048],
cellsPerColumn=5
)
self.assertEqual(tm.columnForCell(0), 0)
self.assertEqual(tm.columnForCell(4), 0)
self.assertEqual(tm.columnForCell(5), 1)
self.assertEqual(tm.columnForCell(10239), 2047)
def testColumnForCell2D(self):
tm = ExtendedTemporalMemory(
columnDimensions=[64, 64],
cellsPerColumn=4
)
self.assertEqual(tm.columnForCell(0), 0)
self.assertEqual(tm.columnForCell(3), 0)
self.assertEqual(tm.columnForCell(4), 1)
self.assertEqual(tm.columnForCell(16383), 4095)
def testColumnForCellInvalidCell(self):
tm = ExtendedTemporalMemory(
columnDimensions=[64, 64],
cellsPerColumn=4
)
try:
tm.columnForCell(16383)
except IndexError:
self.fail("IndexError raised unexpectedly")
args = [16384]
self.assertRaises(IndexError, tm.columnForCell, *args)
args = [-1]
self.assertRaises(IndexError, tm.columnForCell, *args)
def testCellsForColumn1D(self):
tm = ExtendedTemporalMemory(
columnDimensions=[2048],
cellsPerColumn=5
)
expectedCells = set([5, 6, 7, 8, 9])
self.assertEqual(tm.cellsForColumn(1), expectedCells)
def testCellsForColumn2D(self):
tm = ExtendedTemporalMemory(
columnDimensions=[64, 64],
cellsPerColumn=4
)
expectedCells = set([256, 257, 258, 259])
self.assertEqual(tm.cellsForColumn(64), expectedCells)
def testCellsForColumnInvalidColumn(self):
tm = ExtendedTemporalMemory(
columnDimensions=[64, 64],
cellsPerColumn=4
)
try:
tm.cellsForColumn(4095)
except IndexError:
self.fail("IndexError raised unexpectedly")
args = [4096]
self.assertRaises(IndexError, tm.cellsForColumn, *args)
args = [-1]
self.assertRaises(IndexError, tm.cellsForColumn, *args)
def testNumberOfColumns(self):
tm = ExtendedTemporalMemory(
columnDimensions=[64, 64],
cellsPerColumn=32
)
self.assertEqual(tm.numberOfColumns(), 64 * 64)
def testNumberOfCells(self):
tm = ExtendedTemporalMemory(
columnDimensions=[64, 64],
cellsPerColumn=32
)
self.assertEqual(tm.numberOfCells(), 64 * 64 * 32)
def testMapCellsToColumns(self):
tm = ExtendedTemporalMemory(
columnDimensions=[100],
cellsPerColumn=4
)
columnsForCells = tm.mapCellsToColumns(set([0, 1, 2, 5, 399]))
self.assertEqual(columnsForCells[0], set([0, 1, 2]))
self.assertEqual(columnsForCells[1], set([5]))
self.assertEqual(columnsForCells[99], set([399]))
def testCalculatePredictiveCells(self):
tm = ExtendedTemporalMemory(
columnDimensions=[4],
cellsPerColumn=5
)
predictiveDistalCells = set([2, 3, 5, 8, 10, 12, 13, 14])
predictiveApicalCells = set([1, 5, 7, 11, 14, 15, 17])
self.assertEqual(
tm.calculatePredictiveCells(predictiveDistalCells, predictiveApicalCells),
set([2, 3, 5, 14])
)
def testCompute(self):
tm = ExtendedTemporalMemory(
columnDimensions=[4],
cellsPerColumn=10,
learnOnOneCell=False,
initialPermanence=0.2,
connectedPermanence=0.7,
activationThreshold=1
)
seg1 = tm.connections.createSegment(0)
seg2 = tm.connections.createSegment(20)
seg3 = tm.connections.createSegment(25)
try:
tm.connections.createSynapse(seg1, 15, 0.9)
tm.connections.createSynapse(seg2, 35, 0.9)
tm.connections.createSynapse(seg2, 45, 0.9) # external cell
tm.connections.createSynapse(seg3, 35, 0.9)
tm.connections.createSynapse(seg3, 50, 0.9) # external cell
except IndexError:
self.fail("IndexError raised unexpectedly for distal segments")
aSeg1 = tm.apicalConnections.createSegment(1)
aSeg2 = tm.apicalConnections.createSegment(25)
try:
tm.apicalConnections.createSynapse(aSeg1, 3, 0.9)
tm.apicalConnections.createSynapse(aSeg2, 1, 0.9)
except IndexError:
self.fail("IndexError raised unexpectedly for apical segments")
activeColumns = set([1, 3])
activeExternalCells = set([5, 10, 15])
activeApicalCells = set([1, 2, 3, 4])
tm.compute(
activeColumns,
activeExternalCells=activeExternalCells,
activeApicalCells=activeApicalCells,
learn=False
)
activeColumns = set([0, 2])
tm.compute(
activeColumns,
activeExternalCells=set(),
activeApicalCells=set()
)
self.assertEqual(tm.activeCells, set([0, 20, 25]))
def testLearning(self):
tm = ExtendedTemporalMemory(
columnDimensions=[4],
cellsPerColumn=10,
learnOnOneCell=False,
initialPermanence=0.5,
connectedPermanence=0.6,
activationThreshold=1,
minThreshold=1,
maxNewSynapseCount=2,
permanenceDecrement=0.05,
permanenceIncrement=0.2
)
seg1 = tm.connections.createSegment(0)
seg2 = tm.connections.createSegment(10)
seg3 = tm.connections.createSegment(20)
seg4 = tm.connections.createSegment(30)
try:
tm.connections.createSynapse(seg1, 10, 0.9)
tm.connections.createSynapse(seg2, 20, 0.9)
tm.connections.createSynapse(seg3, 30, 0.9)
tm.connections.createSynapse(seg3, 41, 0.9)
tm.connections.createSynapse(seg3, 25, 0.9)
tm.connections.createSynapse(seg4, 0, 0.9)
except IndexError:
self.fail("IndexError raised unexpectedly for distal segments")
aSeg1 = tm.apicalConnections.createSegment(0)
aSeg2 = tm.apicalConnections.createSegment(20)
try:
tm.apicalConnections.createSynapse(aSeg1, 42, 0.8)
tm.apicalConnections.createSynapse(aSeg2, 43, 0.8)
except IndexError:
self.fail("IndexError raised unexpectedly for apical segments")
activeColumns = set([1, 3])
activeExternalCells = set([1]) # will be re-indexed to 41
activeApicalCells = set([2, 3]) # will be re-indexed to 42, 43
tm.compute(
activeColumns,
activeExternalCells=activeExternalCells,
activeApicalCells=activeApicalCells,
learn=False
)
activeColumns = set([0, 2])
tm.compute(
activeColumns,
activeExternalCells=None,
activeApicalCells=None,
learn=True
)
self.assertEqual(tm.activeCells, set([0, 20]))
# distal learning
synapse = list(tm.connections.synapsesForSegment(seg1))[0]
self.assertEqual(tm.connections.dataForSynapse(synapse).permanence, 1.0)
synapse = list(tm.connections.synapsesForSegment(seg2))[0]
self.assertEqual(tm.connections.dataForSynapse(synapse).permanence, 0.9)
synapse = list(tm.connections.synapsesForSegment(seg3))[0]
self.assertEqual(tm.connections.dataForSynapse(synapse).permanence, 1.0)
synapse = list(tm.connections.synapsesForSegment(seg3))[1]
self.assertEqual(tm.connections.dataForSynapse(synapse).permanence, 1.0)
synapse = list(tm.connections.synapsesForSegment(seg3))[2]
self.assertEqual(tm.connections.dataForSynapse(synapse).permanence, 0.85)
synapse = list(tm.connections.synapsesForSegment(seg4))[0]
self.assertEqual(tm.connections.dataForSynapse(synapse).permanence, 0.9)
# apical learning
synapse = list(tm.apicalConnections.synapsesForSegment(aSeg1))[0]
self.assertEqual(tm.apicalConnections.dataForSynapse(synapse).permanence,
1.0)
synapse = list(tm.apicalConnections.synapsesForSegment(aSeg2))[0]
self.assertEqual(tm.apicalConnections.dataForSynapse(synapse).permanence,
1.0)
@unittest.skipUnless(capnp is not None, "No serialization available for ETM")
def testWriteRead(self):
tm1 = ExtendedTemporalMemory(
columnDimensions=[100],
cellsPerColumn=4,
activationThreshold=7,
initialPermanence=0.37,
connectedPermanence=0.58,
minThreshold=4,
maxNewSynapseCount=18,
permanenceIncrement=0.23,
permanenceDecrement=0.08,
seed=91
)
# Run some data through before serializing
self.patternMachine = PatternMachine(100, 4)
self.sequenceMachine = SequenceMachine(self.patternMachine)
sequence = self.sequenceMachine.generateFromNumbers(range(5))
for _ in range(3):
for pattern in sequence:
tm1.compute(pattern)
proto1 = TemporalMemoryProto_capnp.TemporalMemoryProto.new_message()
tm1.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = TemporalMemoryProto_capnp.TemporalMemoryProto.read(f)
# Load the deserialized proto
tm2 = ExtendedTemporalMemory.read(proto2)
# Check that the two temporal memory objects have the same attributes
self.assertEqual(tm1, tm2)
# Run a couple records through after deserializing and check results match
tm1.compute(self.patternMachine.get(0))
tm2.compute(self.patternMachine.get(0))
self.assertEqual(set(tm1.getActiveCells()), set(tm2.getActiveCells()))
self.assertEqual(set(tm1.getPredictiveCells()),
set(tm2.getPredictiveCells()))
self.assertEqual(set(tm1.getWinnerCells()), set(tm2.getWinnerCells()))
self.assertEqual(tm1.connections, tm2.connections)
tm1.compute(self.patternMachine.get(3))
tm2.compute(self.patternMachine.get(3))
self.assertEqual(set(tm1.getActiveCells()), set(tm2.getActiveCells()))
self.assertEqual(set(tm1.getPredictiveCells()),
set(tm2.getPredictiveCells()))
self.assertEqual(set(tm1.getWinnerCells()), set(tm2.getWinnerCells()))
self.assertEqual(tm1.connections, tm2.connections)
if __name__ == '__main__':
unittest.main()
| cogmission/nupic.research | tests/extended_temporal_memory/etm_unit_test.py | Python | agpl-3.0 | 28,519 |
# easy_thumbnails face cropping processor
# Much of the below taken from http://stackoverflow.com/a/13243712/669631
try:
import cv
faceCascade = cv.Load('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml')
except:
faceCascade = False
# Select one of the haarcascade files:
# haarcascade_frontalface_alt.xml <-- Best one?
# haarcascade_frontalface_alt2.xml
# haarcascade_frontalface_alt_tree.xml
# haarcascade_frontalface_default.xml
# haarcascade_profileface.xml
def detectFaces(im):
# This function takes a PIL image and finds the patterns defined in the
# haarcascade function modified from: http://www.lucaamore.com/?p=638
# Convert a PIL image to a greyscale cv image
# from: http://pythonpath.wordpress.com/2012/05/08/pil-to-opencv-image/
im = im.convert('L')
cv_im = cv.CreateImageHeader(im.size, cv.IPL_DEPTH_8U, 1)
cv.SetData(cv_im, im.tostring(), im.size[0])
# variables
min_size = (20, 20)
haar_scale = 1.1
min_neighbors = 3
haar_flags = 0
# Equalize the histogram
cv.EqualizeHist(cv_im, cv_im)
# Detect the faces
faces = cv.HaarDetectObjects(
cv_im, faceCascade, cv.CreateMemStorage(0),
haar_scale, min_neighbors, haar_flags, min_size
)
return faces
def face_crop(im, size, face=False, **kwargs):
if not face or not faceCascade:
return im
source_x, source_y = [int(v) for v in im.size]
faces = detectFaces(im)
if faces:
cropBox = [0, 0, 0, 0]
for face, n in faces:
if face[2] > cropBox[2] or face[3] > cropBox[3]:
cropBox = face
xDelta = int(max(cropBox[2] * 0.25, 0))
yDelta = int(max(cropBox[3] * 0.25, 0))
# Convert cv box to PIL box [left, upper, right, lower]
box = [
max(cropBox[0] - xDelta, 0),
max(cropBox[1] - yDelta, 0),
min(cropBox[0] + cropBox[2] + xDelta, source_x - 1),
min(cropBox[1] + cropBox[3] + yDelta, source_y - 1)
]
im = im.crop(box)
return im
| opencorato/sayit | speeches/thumbnail_processors.py | Python | agpl-3.0 | 2,093 |
from datetime import datetime
from luigi.date_interval import Date
from edx.analytics.tasks.tests.acceptance import AcceptanceTestCase, as_list_param, when_geolocation_data_available
class LocationByCourseAcceptanceTest(AcceptanceTestCase):
INPUT_FILE = 'location_by_course_tracking.log'
COURSE_ID = u'edX/Open_DemoX/edx_demo_course'
COURSE_ID2 = u'course-v1:edX+Open_DemoX+edx_demo_course2'
DATE_INTERVAL = Date(2014, 7, 21)
START_DATE = DATE_INTERVAL.date_a
END_DATE = DATE_INTERVAL.date_b
SQL_FIXTURES = [
'load_student_courseenrollment_for_location_by_course.sql',
'load_auth_user_for_location_by_course.sql'
]
@when_geolocation_data_available
def test_location_by_course(self):
self.upload_tracking_log(self.INPUT_FILE, self.START_DATE)
for fixture_file_name in self.SQL_FIXTURES:
self.execute_sql_fixture_file(fixture_file_name)
self.task.launch([
'InsertToMysqlLastCountryPerCourseTask',
'--source', as_list_param(self.test_src),
'--interval', self.DATE_INTERVAL.to_string(),
'--n-reduce-tasks', str(self.NUM_REDUCERS),
])
self.maxDiff = None
with self.export_db.cursor() as cursor:
cursor.execute('SELECT * FROM course_enrollment_location_current ORDER BY country_code, course_id')
results = cursor.fetchall()
# TODO: what happens if the test starts near the UTC day boundary. The task sees that today is day "X", yet this
# code sees the following day since the day boundary was crossed between then and now.
today = datetime.utcnow().date()
self.assertItemsEqual([
row[1:6] for row in results
], [
(today, self.COURSE_ID, None, 1, 1),
(today, self.COURSE_ID, 'UNKNOWN', 0, 1),
(today, self.COURSE_ID, 'IE', 1, 1),
(today, self.COURSE_ID2, 'TH', 1, 1),
(today, self.COURSE_ID, 'TH', 1, 1),
])
| edx/edx-analytics-pipeline | edx/analytics/tasks/tests/acceptance/test_location_per_course.py | Python | agpl-3.0 | 2,023 |
from django.contrib import admin
from django.db import models
from models import APIMetric
class APIMetricAdmin(admin.ModelAdmin):
list_display = ('apicall', 'user', 'created_at')
list_filter = ('apicall',)
admin.site.register(APIMetric, APIMetricAdmin) | rossjones/ScraperWikiX | web/api/admin.py | Python | agpl-3.0 | 278 |
import re
from functools import partial, update_wrapper
from schemagic.core import validate_against_schema
from schemagic.utils import merge
def predicate_validator(predicate, name=None, coercer=None, message=None, data=None):
"""Builds new validator function that tests, and optionally coerces, data against the supplied predicate and coercer
:param predicate: function that accepts one argument, the data, returns true if data is good, false otherwise.
:param name: name of the supplied predicate. useful when building validators from anonymous functions.
:param coercer: a function that accepts the data and returns a modification of that data. If no coercer is provided,
the data will still be subject to any coercions that occur within the validation. This is to allow for additional
flexibility, for instance, you may want to convert a datetime string into a datatime object before validating it.
:param message: A message that described the problem with the data if it wasn't validated correctly.
This message will be automatically suffixed with a printout of the data recieved by the validator.
If message is not provided, a default message is used that references the predicate by name.
:param data: the data to be validated
:return: if data is not supplied, returns a copy of the predicate validator function with all the other
values "filled in". i.e. it returns a curried function.
If the data is supplied, returns the, possibly transformed, data if it is valid, else throws an error.
"""
predicate.__name__ = name or predicate.__name__
if data is None:
return update_wrapper(partial(predicate_validator, predicate, name, coercer, message), predicate)
data = coercer(data) if coercer else data
message = (message or "data did not meet requirements of the predicate {0}".format(predicate.__name__)) + "\n value: {0}".format(data)
if not predicate(data):
raise ValueError(message)
return data
formatted_string = lambda str_format, **kwargs: predicate_validator(
lambda data: re.match(str_format, data),
**merge(dict(name="formatted_string: {0}".format(str_format),
coercer=str,
message="string not of expected format: expected: {0}".format(format)),
kwargs))
"""Stringifies the data, then matches it against the supplied regex string. Valid if match is returned"""
#: ``formatted_string(r'\d+\-\d+\-\d+')``: checks to see if the data is of the type returned by stringifying a datetime.date object
date_string = formatted_string(r'\d+\-\d+\-\d+')
#: ``formatted_string(r'\d+\-\d+\-\d+ \d+:\d+:\d+\.\d+')``: checks to see if the data is of the type returned by stringifying a datetime.datetime object
datetime_string = formatted_string(r'\d+\-\d+\-\d+ \d+:\d+:\d+\.\d+')
#: ``predicate_validator``: Usually composed with or_, checks to see if the data is the value None
null = predicate_validator(lambda val: val is None, name="null")
or_ = lambda *schemata: predicate_validator(
lambda val: any(validate_against_schema(schema, val) for schema in schemata),
name="any of schema's {0}".format(schemata),
)
"""checks to see if the data is valid with any of the given data definitions"""
enum = lambda *possible_vals: predicate_validator(
lambda val: val in possible_vals,
name="enumeration of allowable values: {0}".format(possible_vals),
)
"""checks to see if the data is one of the provided values"""
| Mechrophile/schemagic | schemagic/validators.py | Python | lgpl-2.1 | 3,519 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""
Tests for Spack's built-in parallel make support.
This just tests whether the right args are getting passed to make.
"""
import os
import shutil
import tempfile
import unittest
from llnl.util.filesystem import *
from spack.build_environment import MakeExecutable
from spack.util.environment import path_put_first
class MakeExecutableTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
make_exe = join_path(self.tmpdir, 'make')
with open(make_exe, 'w') as f:
f.write('#!/bin/sh\n')
f.write('echo "$@"')
os.chmod(make_exe, 0o700)
path_put_first('PATH', [self.tmpdir])
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_make_normal(self):
make = MakeExecutable('make', 8)
self.assertEqual(make(output=str).strip(), '-j8')
self.assertEqual(make('install', output=str).strip(), '-j8 install')
def test_make_explicit(self):
make = MakeExecutable('make', 8)
self.assertEqual(make(parallel=True, output=str).strip(), '-j8')
self.assertEqual(make('install', parallel=True,
output=str).strip(), '-j8 install')
def test_make_one_job(self):
make = MakeExecutable('make', 1)
self.assertEqual(make(output=str).strip(), '')
self.assertEqual(make('install', output=str).strip(), 'install')
def test_make_parallel_false(self):
make = MakeExecutable('make', 8)
self.assertEqual(make(parallel=False, output=str).strip(), '')
self.assertEqual(make('install', parallel=False,
output=str).strip(), 'install')
def test_make_parallel_disabled(self):
make = MakeExecutable('make', 8)
os.environ['SPACK_NO_PARALLEL_MAKE'] = 'true'
self.assertEqual(make(output=str).strip(), '')
self.assertEqual(make('install', output=str).strip(), 'install')
os.environ['SPACK_NO_PARALLEL_MAKE'] = '1'
self.assertEqual(make(output=str).strip(), '')
self.assertEqual(make('install', output=str).strip(), 'install')
# These don't disable (false and random string)
os.environ['SPACK_NO_PARALLEL_MAKE'] = 'false'
self.assertEqual(make(output=str).strip(), '-j8')
self.assertEqual(make('install', output=str).strip(), '-j8 install')
os.environ['SPACK_NO_PARALLEL_MAKE'] = 'foobar'
self.assertEqual(make(output=str).strip(), '-j8')
self.assertEqual(make('install', output=str).strip(), '-j8 install')
del os.environ['SPACK_NO_PARALLEL_MAKE']
def test_make_parallel_precedence(self):
make = MakeExecutable('make', 8)
# These should work
os.environ['SPACK_NO_PARALLEL_MAKE'] = 'true'
self.assertEqual(make(parallel=True, output=str).strip(), '')
self.assertEqual(make('install', parallel=True,
output=str).strip(), 'install')
os.environ['SPACK_NO_PARALLEL_MAKE'] = '1'
self.assertEqual(make(parallel=True, output=str).strip(), '')
self.assertEqual(make('install', parallel=True,
output=str).strip(), 'install')
# These don't disable (false and random string)
os.environ['SPACK_NO_PARALLEL_MAKE'] = 'false'
self.assertEqual(make(parallel=True, output=str).strip(), '-j8')
self.assertEqual(make('install', parallel=True,
output=str).strip(), '-j8 install')
os.environ['SPACK_NO_PARALLEL_MAKE'] = 'foobar'
self.assertEqual(make(parallel=True, output=str).strip(), '-j8')
self.assertEqual(make('install', parallel=True,
output=str).strip(), '-j8 install')
del os.environ['SPACK_NO_PARALLEL_MAKE']
| wscullin/spack | lib/spack/spack/test/make_executable.py | Python | lgpl-2.1 | 5,061 |
# -*- coding: utf-8 -*-
"""
ZUGBRUECKE
Calling routines in Windows DLLs from Python scripts running on unixlike systems
https://github.com/pleiszenburg/zugbruecke
src/zugbruecke/core/callback_server.py: Classes for managing callback routines
Required to run on platform / side: [UNIX, WINE]
Copyright (C) 2017-2019 Sebastian M. Ernst <[email protected]>
<LICENSE_BLOCK>
The contents of this file are subject to the GNU Lesser General Public License
Version 2.1 ("LGPL" or "License"). You may not use this file except in
compliance with the License. You may obtain a copy of the License at
https://www.gnu.org/licenses/old-licenses/lgpl-2.1.txt
https://github.com/pleiszenburg/zugbruecke/blob/master/LICENSE
Software distributed under the License is distributed on an "AS IS" basis,
WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the
specific language governing rights and limitations under the License.
</LICENSE_BLOCK>
"""
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# IMPORT
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from pprint import pformat as pf
import traceback
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# CALLBACK SERVER CLASS
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
class callback_translator_server_class:
def __init__(self, data, routine_name, routine_handler, argtypes_d, restype_d, memsync_d):
# Store my own name
self.name = routine_name
# Store handler
self.handler = routine_handler
# Store handle on data
self.data = data
# Get handle on log
self.log = self.data.log
# Store definition of argument types
self.argtypes_d = argtypes_d
# Store definition of return value type
self.restype_d = restype_d
# Store memsync definition
self.memsync_d = memsync_d
def __call__(self, *args):
# Log status
self.log.out('[callback-server] Trying to call callback routine "%s" ...' % self.name)
# Log status
self.log.out('[callback-server] ... parameters are "%r". Packing and pushing to client ...' % (args,))
try:
# Handle memory
mem_package_list = self.data.client_pack_memory_list(args, self.memsync_d)
except Exception as e:
# Log status
self.log.out('[callback-server] ... memory packing failed!')
# Push traceback to log
self.log.err(traceback.format_exc())
raise e
try:
# Pack arguments and call RPC callback function (packed arguments are shipped to Unix side)
return_dict = self.handler(self.data.arg_list_pack(args, self.argtypes_d), mem_package_list)
except Exception as e:
# Log status
self.log.out('[callback-server] ... call failed!')
# Push traceback to log
self.log.err(traceback.format_exc())
raise e
try:
# Log status
self.log.out('[callback-server] ... received feedback from client, unpacking ...')
# Unpack return dict (for pointers and structs)
self.data.arg_list_sync(
args,
self.data.arg_list_unpack(return_dict['args'], self.argtypes_d),
self.argtypes_d
)
# Unpack return value
return_value = self.data.return_msg_unpack(return_dict['return_value'], self.restype_d)
# Unpack memory (call may have failed partially only)
self.data.client_unpack_memory_list(args, return_value, return_dict['memory'], self.memsync_d)
except Exception as e:
# Log status
self.log.out('[callback-server] ... unpacking failed!')
# Push traceback to log
self.log.err(traceback.format_exc())
raise e
# Raise the original error if call was not a success
if not return_dict['success']:
self.log.out('[callback-server] ... call raised an error.')
raise return_dict['exception']
# Log status
self.log.out('[callback-server] ... unpacked, return.')
# Return data directly to DLL routine
return return_value
| pleiszenburg/zugbruecke | src/zugbruecke/core/callback_server.py | Python | lgpl-2.1 | 4,015 |
from boyle import define, File, Shell
a = define(
name='t1',
out=File('a'),
do=Shell('echo hello > {out}'))
b = define(
name='t2',
out=File('b'),
do=Shell('echo world > {out}'))
c = define(
name='t3',
out=File('jonatan.jpg'),
inp=[a, b],
do=Shell('cat {inp[0]} {inp[1]} > {out}'))
| boyleworkflow/boyle | notes/internal-python-dsl/simple_workflow.py | Python | lgpl-3.0 | 325 |
import cadquery as cq
# Set up the length, width, and thickness
(L, w, t) = (20.0, 6.0, 3.0)
s = cq.Workplane("XY")
# Draw half the profile of the bottle and extrude it
p = s.center(-L / 2.0, 0).vLine(w / 2.0) \
.threePointArc((L / 2.0, w / 2.0 + t), (L, w / 2.0)).vLine(-w / 2.0) \
.mirrorX().extrude(30.0, True)
# Make the neck
p.faces(">Z").workplane().circle(3.0).extrude(2.0, True)
# Make a shell
result = p.faces(">Z").shell(0.3)
# Displays the result of this script
show_object(result)
| jmwright/cadquery-freecad-module | Libs/cadquery/examples/FreeCAD/Ex022_Classic_OCC_Bottle.py | Python | lgpl-3.0 | 508 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from __future__ import with_statement
import re, codecs, os, StringIO, tempfile
from gravity.tae.tokenizer import Token
from gravity.tae.text import AnnotatedText
from gravity.tae.corpora.corpora import Corpora
from gravity.common.process import PipedProcess
# map not standard CoNLL language codes to standard
LANGS_MAP = { "nl":"ned", "es":"esp", "en":"eng", "de":"deu"}
class CoNLL(AnnotatedText):
class ConnlevalOutputParser(object):
def send(self, pipe): pass
def read(self, pipe):
class Results(dict):
def __init__(self, lines):
assert lines
self.lines = lines
def __str__(self): return ''.join(self.lines)
lines = pipe.readlines()
res = Results(lines)
m = re.compile(r"[^0-9]*([0-9]+)\s+(tokens)[^0-9]+([0-9]+)\s+(phrases)\;(.*)").match(lines[0].strip())
g = m.groups()
for i in range((len(g)-1)/2):
j = 2*i + 1
res[m.group(j+1)] = int(m.group(j))
m = re.compile(r"[^0-9]*([0-9]+)[^0-9]+([0-9]+)[^0-9]*").match(m.group(len(g)))
res['found_phrases'] = int(m.group(1))
res['correct_phrases'] = int(m.group(2))
m = re.compile(r"(accuracy)\s*\:\s*([0-9]+[.][0-9]+)\%\;\s*(precision)\s*\:\s*([0-9]+[.][0-9]+)\%\;\s*(recall)\s*\:\s*([0-9]+[.][0-9]+)\%\;\s*(FB1)\:\s*([0-9]+[.][0-9]+)").match(lines[1].strip())
for i in range(len(m.groups())/2):
j = 2*i + 1
res[m.group(j)] = float(m.group(j+1))
r = re.compile(r"\s*(\w+)\s*\:\s*(\w+)\s*\:\s*([0-9]+[.][0-9]+)\%\s*\;\s*(\w+)\s*\:\s*([0-9]+[.][0-9]+)\%\s*\;\s*(\w+)\s*\:\s*([0-9]+[.][0-9]+)\s*")
for i in range(len(lines) - 2):
l = lines[i + 2].strip()
if len(l) == 0: continue
m = r.match(l)
g = m.groups()
p = g[0]
for k in range((len(g) - 1)/2):
j = 2*k + 2
res[p + '_' + m.group(j)] = float(m.group(j+1))
return res
def __init__(self, path, line_parser_re, lang):
assert path and line_parser_re
self.path = path
with codecs.open(path, encoding='iso-8859-1') as f:
AnnotatedText.__init__(self, f.read(), line_parser_re = line_parser_re, lang = lang)
@classmethod
def home(cls):
return os.path.join(Corpora.corpora_home(), cls.__name__)
@classmethod
def path(cls, name, lang):
p = os.path.join(cls.home(), 'data', '%s.%s' % (LANGS_MAP[lang], name))
if not os.path.exists(p) or os.path.isdir(p): raise BaseException("Wrong path '%s'" % p)
return p
@classmethod
def testa(cls, lang):
return cls(path = cls.path('testa', lang), lang = lang)
@classmethod
def testb(cls, lang):
return cls(path = cls.path('testb', lang), lang = lang)
@classmethod
def train(cls, lang):
return cls(path = cls.path('train', lang), lang = lang)
def _clone_token(self, t):
return (t[0], t[1], t[2], t[3])
def pos_tokens(self, type_filter = None):
for e in self.iob_tokens('POS', type_filter): yield e
def ne_tokens(self, type_filter = None):
for e in self.iob_tokens('NE', type_filter): yield e
def syn_tokens(self, type_filter = None):
for e in self.iob_tokens('SYN', type_filter): yield e
def build_token(self, tag_name, text, offset, length, tag):
return (text, offset, length, self.tags_map[tag_name][tag])
def iob_tokens(self, tag_name = None, type_filter = None):
if type_filter == None:
for t in super(CoNLL, self).iob_tokens(tag_name): yield t
else:
for t in super(CoNLL, self).iob_tokens(tag_name):
if (t[3] & type_filter) > 0: yield t
def tokens(self, tag_name = None, type_filter = None):
if type_filter == None:
for t in super(CoNLL, self).tokens(tag_name): yield t
else:
for t in super(CoNLL, self).tokens(tag_name):
if (t[3] & type_filter) > 0: yield t
# Convert CoNLL formattted file into tsv (stanford for instance uses it for)
# tsv:
# <entity>\tab<ne_type>\n
def to_tsv(self, output_path, delim = "\t", tag_map = { 'PER':'PERSON', 'LOC':'LOCATION', 'MISC':'MISC', 'ORG':'ORGANIZATION', 'O':'O'}):
assert output_path and delim and tag_map
with codecs.open(output_path, mode='w', encoding='utf-8') as o:
with codecs.open(self.path, encoding='iso-8859-1') as f:
for line in f.readlines():
line = line.strip()
if len(line) > 0 and line.find('-DOCSTART-') < 0:
t = line.split(' ')
tp = t[-1]
if tp != 'O': tp = tp[tp.index('-') + 1:]
o.write(u"%s%s%s\n" % (t[0], delim, tag_map[tp]))
o.write(u'\n')
def _eval_fields (self, tokens, tag_name, types_map):
tag_name_index = self.tags_names.index(tag_name)
def find_next_match_token(i, tokens, t):
t_s, t_e = t[1], t[1] + t[2] - 1
if i < 0: i = 0
for j in range(i, len(tokens), 1):
rt_s, rt_e = tokens[j][1], tokens[j][1] + tokens[j][2] - 1
if rt_s < 0 or rt_e < t_s:
continue
if t_s >= rt_s and t_e <= rt_e:
prefix = 'B-' if t_s == rt_s else 'I-'
t_t = tokens[j][3] #((tokens[j][3] & t_bits) | b_bit) ^ b_bit
return (j, prefix, types_map[t_t], t_t)
#elif (t_s < rt_s and t_e >= rt_s) or (t_s <= rt_e and t_e > rt_e):
# print "Entity location inconsistency '%s'(ner) = [%d, %d], '%s'(corpus) = [%d, %d]" % (tokens[j][0].encode('utf-8'), rt_s, rt_e, t[0].encode('utf-8'), t_s, t_e)
# raise BaseException("Entity location inconsistency '%s'(ner) = [%d, %d], '%s'(corpus) = [%d, %d]" % (tokens[j][0], rt_s, rt_e, t[0], t_s, t_e) )
return (j, '', 'O', 0)
return (len(tokens), '', 'O', 0)
li = 0
ner, pt_type, i = None, 'O', -1
for l in codecs.open(self.path, encoding='iso-8859-1').readlines():
if self._tokens[li][2] == 0:
pt_type = 'O'
yield ''
else:
w = self._tokens[li][0]
l = l.strip()
lt = l.split(' ')[-1].strip()
ner = find_next_match_token(i, tokens, self._tokens[li])
if lt == 'O':
yield w + ' O ' + ner[1] + ner[2]
pt_type = 'O'
else:
prefix, suffix = lt[0:2], lt[2:len(lt)]
if prefix != 'B-' and prefix != 'I-': raise BaseException("Wrong prefix in '%s'" % l)
if suffix != ner[2] or prefix == 'B-': yield w + ' ' + lt + ' ' + ner[1] + ner[2]
elif prefix == 'I-':
if ner[1] == 'B-' and pt_type != suffix: yield w + ' ' + lt + ' I-' + ner[2]
else: yield w + ' ' + lt + ' ' + ner[1] + ner[2]
else:
raise BaseException("Unknown tag name %s" % lt)
pt_type = suffix
i = ner[0]
li += 1
def conlleval(self, tokens, tag_name = 'SYN', eval_script_name = 'conlleval.txt'):
def reverse_map(m):
mm = {}
for k in m:
v = m[k]
if v in mm: raise BaseException(str(v) + " is in map " + str(m))
mm[v] = k
return mm
f = tempfile.mkstemp(text = True)
ff = None
try:
ff = open(f[1], 'w')
for l in self._eval_fields(tokens, tag_name, reverse_map(self.tags_map[tag_name])):
ff.write(l.encode("iso-8859-1") + "\n")
ff.flush()
ff.close()
return PipedProcess(os.path.join(self.home(), "bin", eval_script_name) + "< %s" % f[1])(CoNLL.ConnlevalOutputParser())
finally:
if os.path.exists(f[1]): os.remove(f[1])
def baseline(self, baseline_script_name = 'baseline.txt', eval_script_name = 'conlleval.txt'):
class BaselineOutput(object):
def read(self2, pipe):
tmp = tempfile.mkstemp('a','b', self.home(), text = True)
with open(tmp[1], 'w') as f:
for line in pipe.readlines(): f.write(line)
return tmp[1]
def send(self, pipe): pass
p = PipedProcess(os.path.join(self.home(), "bin", baseline_script_name) + " " + self.__class__.path('train', self.lang) + " " + self.path)
fn = p(BaselineOutput())
try:
return PipedProcess(os.path.join(self.home(), "bin", eval_script_name) + "< %s" % fn)(CoNLL.ConnlevalOutputParser())
finally:
if fn != None: os.remove(fn)
class CoNLL2000(CoNLL):
# CC - Coordinating conjunction
# CD - Cardinal number
# DT - Determiner
# EX - Existential there
# FW - Foreign word
# IN - Preposition or subordinating conjunction
# JJ - Adjective
# JJR - Adjective, comparative
# JJS - Adjective, superlative
# LS - List item marker
# MD - Modal
# NN - Noun, singular or mass
# NNS - Noun, plural
# NNP - Proper noun, singular
# NNPS - Proper noun, plural
# PDT - Predeterminer
# POS - Possessive ending
# PRP - Personal pronoun
# PRP$ - Possessive pronoun
# RB - Adverb
# RBR - Adverb, comparative
# RBS - Adverb, superlative
# RP - Particle
# SYM - Symbol
# TO - to
# UH - Interjection
# VB - Verb, base form
# VBD - Verb, past tense
# VBG - Verb, gerund or present participle
# VBN - Verb, past participle
# VBP - Verb, non-3rd person singular present
# VBZ - Verb, 3rd person singular present
# WDT - Wh-determiner
# WP - Wh-pronoun
# WP$ - Possessive wh-pronoun
# WRB - Wh-adver
POS_TAG_MAP = { 'NNP': Token.POS_NOUN,
'NNS': Token.POS_NOUN,
'NN': Token.POS_NOUN,
'NNPS': Token.POS_NOUN,
'VB': Token.POS_VERB,
'VBD': Token.POS_VERB,
'VBG': Token.POS_VERB,
'VBN': Token.POS_VERB,
'VBP': Token.POS_VERB,
'VBZ': Token.POS_VERB,
'MD' : Token.POS_VERB,
'PRP': Token.POS_PRONOUN,
'PRP$':Token.POS_PRONOUN,
'JJ': Token.POS_ADJ,
'JJS': Token.POS_ADJ,
'JJR': Token.POS_ADJ,
'IN': Token.POS_PREP,
'CD': Token.POS_NUM,
'CC' : Token.POS_CONJ,
'RB': Token.POS_ADVERB,
'RBR': Token.POS_ADVERB,
'RBS': Token.POS_ADVERB,
'WDT':Token.POS_UNKNOWN,
'WRB':Token.POS_UNKNOWN,
'POS':Token.POS_UNKNOWN,
'DT':Token.POS_UNKNOWN,
'WP':Token.POS_UNKNOWN,
'WP$' :Token.POS_UNKNOWN,
'TO' :Token.POS_UNKNOWN,
'RP' :Token.POS_UNKNOWN,
'LS' :Token.POS_UNKNOWN,
'SYM' :Token.POS_UNKNOWN,
'FW' :Token.POS_UNKNOWN,
'EX' :Token.POS_UNKNOWN,
'PDT' :Token.POS_UNKNOWN,
'UH' :Token.POS_UNKNOWN,
':' : Token.POS_PUNCT,
"''" : Token.POS_PUNCT,
"\'" : Token.POS_PUNCT,
"[" : Token.POS_PUNCT,
"]" : Token.POS_PUNCT,
"$" : Token.POS_PUNCT,
"@" : Token.POS_PUNCT,
"#" : Token.POS_PUNCT,
"%" : Token.POS_PUNCT,
"(" : Token.POS_PUNCT,
")" : Token.POS_PUNCT,
"-" : Token.POS_PUNCT,
"``" : Token.POS_PUNCT,
"!" : Token.POS_PUNCT,
"?" : Token.POS_PUNCT,
"\"" : Token.POS_PUNCT,
"\"\"" : Token.POS_PUNCT,
',' : Token.POS_PUNCT,
'.' : Token.POS_PUNCT,
'NN|SYM': Token.POS_UNKNOWN } # !!! very strange case, not clear wether it follows IOB standard !!!
# CoNLL 2000 declares 11 different chunks:
# { ADJP, ADVP, CONJP, INTJ, LST, NP, PP, PRT, SBAR, VP, UCP }.
# despite the large number of chunk types, the NP, VP and PP types account
# for 95% of all chunk occurrences.
SYN_CHUNK_MAP = {
'VP' : Token.SYN_CHUNK_VP,
'NP' : Token.SYN_CHUNK_NP,
'PP' : Token.SYN_CHUNK_PP,
'ADJP' : Token.SYN_CHUNK_ADJP,
'ADVP' : Token.SYN_CHUNK_ADVP,
'CONJP': Token.SYN_CHUNK_CONJP,
'INTJ' : Token.SYN_CHUNK_INTJ,
'LST' : Token.SYN_CHUNK_LST,
'PRT' : Token.SYN_CHUNK_PRT,
'SBAR' : Token.SYN_CHUNK_SBAR,
'UCP' : Token.SYN_CHUNK_UCP
}
def __init__(self, path, lang = 'en'):
if lang != 'en': raise BaseException("Unsupported language %s" % lang)
self.tags_map = { 'POS': CoNLL2000.POS_TAG_MAP, 'SYN':CoNLL2000.SYN_CHUNK_MAP }
CoNLL.__init__(self, path = path, line_parser_re = re.compile(r"([^ ]+) (?P<POS>[^ ]+) (?P<SYN>[^ ]+)"), lang = lang)
@classmethod
def home(cls): return os.path.join(CoNLL.home(), '2000')
@classmethod
def testb(cls, lang):
# no testb data set is available for this corpus
raise NotImplementedError()
def baseline(self, baseline_script_name = 'baseline', eval_script_name = 'conlleval'):
# no baseline script is available for this corpus
raise NotImplementedError()
class CoNLL2002(CoNLL):
POS_TAG_MAP = { 'Punc' : Token.POS_PUNCT,
'V' : Token.POS_VERB,
'Adv' : Token.POS_ADVERB,
'Adj' : Token.POS_ADJ,
'Prep': Token.POS_PREP,
'Conj': Token.POS_CONJ,
'Art' : Token.POS_ART,
'N' : Token.POS_NOUN,
'Num' : Token.POS_NUM,
'Pron': Token.POS_PRONOUN,
'Misc': Token.POS_UNKNOWN,
'Int' : Token.POS_UNKNOWN }
NE_TAG_MAP = { 'LOC' : Token.NE_LOC,
'PER' : Token.NE_PER,
'ORG' : Token.NE_ORG,
'MISC' : Token.NE_MISC,
'O' : Token.NE_UNKNOWN }
def __init__(self, path, lang):
if lang != 'nl' and lang != 'es': raise BaseException("Unsupported language %s" % lang)
self.tags_map = { 'POS': CoNLL2002.POS_TAG_MAP , 'NE': CoNLL2002.NE_TAG_MAP }
if lang == 'nl':
CoNLL.__init__(self, path = path, line_parser_re = re.compile(r"([^ ]+) (?P<POS>[^ ]+) (?P<NE>[^ ]+)"), lang = lang)
else:
CoNLL.__init__(self, path = path, line_parser_re = re.compile(r"([^ ]+) (?P<NE>[^ ]+)"), lang = lang)
def conlleval(self, tokens, tag_name = 'NE'):
return super(CoNLL2002, self).conlleval(tokens, tag_name = tag_name)
@classmethod
def home(cls):
return os.path.join(CoNLL.home(), '2002')
class CoNLL2003(CoNLL):
def __init__(self, path, lang = 'en'):
if lang != 'en': raise BaseException("Unsupported language %s" % lang)
self.tags_map = { 'POS': CoNLL2000.POS_TAG_MAP , 'NE': CoNLL2002.NE_TAG_MAP, 'SYN':CoNLL2000.SYN_CHUNK_MAP }
CoNLL.__init__(self, path = path, line_parser_re = re.compile(r"([^ ]+) (?P<POS>[^ ]+) (?P<SYN>[^ ]+) (?P<NE>[^ ]+)"), lang = lang)
@classmethod
def home(cls):
return os.path.join(CoNLL.home(), '2003')
def conlleval(self, tokens, tag_name = 'NE', eval_script_name = 'conlleval'):
return super(CoNLL2003, self).conlleval(tokens, tag_name= tag_name, eval_script_name = eval_script_name)
def baseline(self, baseline_script_name = 'baseline', eval_script_name = 'conlleval'):
return super(CoNLL2003, self).baseline(baseline_script_name, eval_script_name)
# not completely settled method
def convert_to_2002(self):
with codecs.open(self.path + ".2002", mode='w', encoding='iso-8859-1') as f:
prev_ne_tag = None
for t in self._tokens:
if t[3] == None:
f.write(t[0])
prev_ne_tag = None
else:
ne, pos = t[3]['NE'], t[3]['POS']
if ne == 'O':
f.write(t[0] + " " + pos + " O")
prev_ne_tag = None
else:
prefix, tg = ne[0:1], ne[2:]
if prefix == 'B':
f.write(t[0] + " " + pos + " " + ne)
elif prev_ne_tag != tg:
f.write(t[0] + " " + pos + " B-" + tg)
else:
f.write(t[0] + " " + pos + " " + ne)
prev_ne_tag = tg
f.write("\n")
# a = CoNLL2003.testa("en")
# a.conlleval([t for t in a.ne_tokens()])
# t = CoNLL2002.train("nl")
# t.to_tsv(output_path="ned.train.tsv")
#
# t = CoNLL2002.testa("nl")
# t.to_tsv(output_path="ned.testa.tsv")
#
# t = CoNLL2002.testb("nl")
# t.to_tsv(output_path="ned.testb.tsv")
# b = CoNLL2003.testb("en")
# t = CoNLL2003.train("en")
# a.convert_to_2002()
# b.convert_to_2002()
# t.convert_to_2002()
| vfulco/scalpel | lib/gravity/tae/corpora/conll.py | Python | lgpl-3.0 | 19,418 |
# Copyright 2009-2010 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bson
import struct
from mongotor.errors import (DatabaseError,
InterfaceError, TimeoutError)
def _unpack_response(response, cursor_id=None, as_class=dict, tz_aware=False):
"""Unpack a response from the database.
Check the response for errors and unpack, returning a dictionary
containing the response data.
:Parameters:
- `response`: byte string as returned from the database
- `cursor_id` (optional): cursor_id we sent to get this response -
used for raising an informative exception when we get cursor id not
valid at server response
- `as_class` (optional): class to use for resulting documents
"""
response_flag = struct.unpack("<i", response[:4])[0]
if response_flag & 1:
# Shouldn't get this response if we aren't doing a getMore
assert cursor_id is not None
raise InterfaceError("cursor id '%s' not valid at server" %
cursor_id)
elif response_flag & 2:
error_object = bson.BSON(response[20:]).decode()
if error_object["$err"] == "not master":
raise DatabaseError("master has changed")
raise DatabaseError("database error: %s" %
error_object["$err"])
result = {}
result["cursor_id"] = struct.unpack("<q", response[4:12])[0]
result["starting_from"] = struct.unpack("<i", response[12:16])[0]
result["number_returned"] = struct.unpack("<i", response[16:20])[0]
result["data"] = bson.decode_all(response[20:], as_class, tz_aware)
assert len(result["data"]) == result["number_returned"]
return result
def _check_command_response(response, msg="%s", allowable_errors=[]):
if not response["ok"]:
if "wtimeout" in response and response["wtimeout"]:
raise TimeoutError(msg % response["errmsg"])
details = response
# Mongos returns the error details in a 'raw' object
# for some errors.
if "raw" in response:
for shard in response["raw"].itervalues():
if not shard.get("ok"):
# Just grab the first error...
details = shard
break
if not details["errmsg"] in allowable_errors:
if details["errmsg"] == "db assertion failure":
ex_msg = ("db assertion failure, assertion: '%s'" %
details.get("assertion", ""))
if "assertionCode" in details:
ex_msg += (", assertionCode: %d" %
(details["assertionCode"],))
raise DatabaseError(ex_msg, details.get("assertionCode"))
raise DatabaseError(msg % details["errmsg"])
def _fields_list_to_dict(fields):
"""Takes a list of field names and returns a matching dictionary.
["a", "b"] becomes {"a": 1, "b": 1}
and
["a.b.c", "d", "a.c"] becomes {"a.b.c": 1, "d": 1, "a.c": 1}
"""
as_dict = {}
for field in fields:
if not isinstance(field, basestring):
raise TypeError("fields must be a list of key names, "
"each an instance of %s" % (basestring.__name__,))
as_dict[field] = 1
return as_dict
| marcelnicolay/mongotor | mongotor/helpers.py | Python | lgpl-3.0 | 3,825 |
#Asteroid class
import pyglet, random
import resources, physicalobject
class Asteroid(physicalobject.PhysicalObject):
def __init__(self, *args, **kwargs):
super(Asteroid, self).__init__(
resources.asteroid_image, *args, **kwargs)
self.rotate_speed = random.random() * 100.0 - 50.0
def handle_collision_with(self,other_object):
super(Asteroid, self).handle_collision_with(other_object)
if self.dead and self.scale > 0.25:
num_asteroids = random.randint(2,3)
for i in xrange(num_asteroids):
new_asteroid = Asteroid(
x=self.x, y=self.y, batch=self.batch)
new_asteroid.rotation = random.randint(0,360)
new_asteroid.velocity_x = (
random.random() * 70 + self.velocity_x)
new_asteroid.velocity_y = (
random.random() * 70 + self.velocity_y)
new_asteroid.scale = self.scale * 0.5
self.new_objects.append(new_asteroid)
def update(self,dt):
super(Asteroid,self).update(dt)
self.rotation += self.rotate_speed * dt | CyanCorsair/asteroids | version_2/game/asteroid.py | Python | unlicense | 979 |
# Copyright 2016-2021 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This manages the detection and auto-configuration of nodes.
# Discovery sources may implement scans and may be passive or may provide
# both.
# The phases and actions:
# - Detect - Notice the existance of a potentially supported target
# - Potentially apply a secure replacement for default credential
# (perhaps using some key identifier combined with some string
# denoting temporary use, and use confluent master integrity key
# to generate a password in a formulaic way?)
# - Do some universal reconfiguration if applicable (e.g. if something is
# part of an enclosure with an optionally enabled enclosure manager,
# check and request enclosure manager enablement
# - Throughout all of this, at this phase no sensitive data is divulged,
# only using credentials that are factory default or equivalent to
# factory default
# - Request transition to Locate
# - Locate - Use available cues to ascertain the physical location. This may
# be mac address lookup through switch or correlated by a server
# enclosure manager. If the location data suggests a node identity,
# then proceed to the 'verify' state
# - Verify - Given the current information and candidate upstream verifier,
# verify the authenticity of the servers claim in an automated way
# if possible. A few things may happen at this juncture
# - Verification outright fails (confirmed negative response)
# - Audit log entry created, element is not *allowed* to
# proceed
# - Verification not possible (neither good or bad)
# - If security policy is set to low, proceed to 'Manage'
# - Otherwise, log the detection event and stop (user
# would then manually bless the endpoint if applicable
# - Verification succeeds
# - If security policy is set to strict (or manual, whichever
# word works best, note the successfull verification, but
# do not manage
# - Otherwise, proceed to 'Manage'
# -Pre-configure - Given data up to this point, try to do some pre-config.
# For example, if located and X, then check for S, enable S
# This happens regardless of verify, as verify may depend on
# S
# - Manage
# - Create the node if autonode (Deferred)
# - If there is not a defined ip address, collect the current LLA and use
# that value.
# - If no username/password defined, generate a unique password, 20 bytes
# long, written to pass most complexity rules (15 random bytes, base64,
# retry until uppercase, lowercase, digit, and symbol all present)
# - Apply defined configuration to endpoint
import base64
import confluent.config.configmanager as cfm
import confluent.collective.manager as collective
import confluent.discovery.protocols.pxe as pxe
import confluent.discovery.protocols.ssdp as ssdp
import confluent.discovery.protocols.slp as slp
import confluent.discovery.handlers.imm as imm
import confluent.discovery.handlers.cpstorage as cpstorage
import confluent.discovery.handlers.tsm as tsm
import confluent.discovery.handlers.pxe as pxeh
import confluent.discovery.handlers.smm as smm
import confluent.discovery.handlers.xcc as xcc
import confluent.exceptions as exc
import confluent.log as log
import confluent.messages as msg
import confluent.networking.macmap as macmap
import confluent.noderange as noderange
import confluent.util as util
import eventlet
import traceback
import socket as nsocket
webclient = eventlet.import_patched('pyghmi.util.webclient')
import eventlet
import eventlet.greenpool
import eventlet.semaphore
autosensors = set()
scanner = None
try:
unicode
except NameError:
unicode = str
class nesteddict(dict):
def __missing__(self, key):
v = self[key] = nesteddict()
return v
nodehandlers = {
'service:lenovo-smm': smm,
'service:lenovo-smm2': smm,
'lenovo-xcc': xcc,
'service:management-hardware.IBM:integrated-management-module2': imm,
'pxe-client': pxeh,
'onie-switch': None,
'cumulus-switch': None,
'service:io-device.Lenovo:management-module': None,
'service:thinkagile-storage': cpstorage,
'service:lenovo-tsm': tsm,
}
servicenames = {
'pxe-client': 'pxe-client',
'onie-switch': 'onie-switch',
'cumulus-switch': 'cumulus-switch',
'service:lenovo-smm': 'lenovo-smm',
'service:lenovo-smm2': 'lenovo-smm2',
'lenovo-xcc': 'lenovo-xcc',
'service:management-hardware.IBM:integrated-management-module2': 'lenovo-imm2',
'service:io-device.Lenovo:management-module': 'lenovo-switch',
'service:thinkagile-storage': 'thinkagile-storagebmc',
'service:lenovo-tsm': 'lenovo-tsm',
}
servicebyname = {
'pxe-client': 'pxe-client',
'onie-switch': 'onie-switch',
'cumulus-switch': 'cumulus-switch',
'lenovo-smm': 'service:lenovo-smm',
'lenovo-smm2': 'service:lenovo-smm2',
'lenovo-xcc': 'lenovo-xcc',
'lenovo-imm2': 'service:management-hardware.IBM:integrated-management-module2',
'lenovo-switch': 'service:io-device.Lenovo:management-module',
'thinkagile-storage': 'service:thinkagile-storagebmc',
'lenovo-tsm': 'service:lenovo-tsm',
}
discopool = eventlet.greenpool.GreenPool(500)
runningevals = {}
# Passive-only auto-detection protocols:
# PXE
# Both passive and active
# SLP (passive mode listens for SLP DA and unicast interrogation of the system)
# mDNS
# SSD
# Also there are location providers
# Switch
# chassis
# chassis may in turn describe more chassis
# We normalize discovered node data to the following pieces of information:
# * Detected node name (if available, from switch discovery or similar or
# auto generated node name.
# * Model number
# * Model name
# * Serial number
# * System UUID (in x86 space, specifically whichever UUID would be in DMI)
# * Network interfaces and addresses
# * Switch connectivity information
# * enclosure information
# * Management TLS fingerprint if validated (switch publication or enclosure)
# * System TLS fingerprint if validated (switch publication or system manager)
#TODO: by serial, by uuid, by node
known_info = {}
known_services = {}
known_serials = {}
known_uuids = nesteddict()
known_nodes = nesteddict()
unknown_info = {}
pending_nodes = {}
pending_by_uuid = {}
def enrich_pxe_info(info):
sn = None
mn = None
nodename = info.get('nodename', None)
uuid = info.get('uuid', '')
if not uuid_is_valid(uuid):
return info
for mac in known_uuids.get(uuid, {}):
if not sn and 'serialnumber' in known_uuids[uuid][mac]:
info['serialnumber'] = known_uuids[uuid][mac]['serialnumber']
if not mn and 'modelnumber' in known_uuids[uuid][mac]:
info['modelnumber'] = known_uuids[uuid][mac]['modelnumber']
if nodename is None and 'nodename' in known_uuids[uuid][mac]:
info['nodename'] = known_uuids[uuid][mac]['nodename']
def uuid_is_valid(uuid):
if not uuid:
return False
return uuid.lower() not in ('00000000-0000-0000-0000-000000000000',
'ffffffff-ffff-ffff-ffff-ffffffffffff',
'00112233-4455-6677-8899-aabbccddeeff',
'03000200-0400-0500-0006-000700080009',
'20202020-2020-2020-2020-202020202020')
def _printable_ip(sa):
return nsocket.getnameinfo(
sa, nsocket.NI_NUMERICHOST|nsocket.NI_NUMERICSERV)[0]
def send_discovery_datum(info):
addresses = info.get('addresses', [])
if info['handler'] == pxeh:
enrich_pxe_info(info)
yield msg.KeyValueData({'nodename': info.get('nodename', '')})
yield msg.KeyValueData({'ipaddrs': [_printable_ip(x) for x in addresses]})
sn = info.get('serialnumber', '')
mn = info.get('modelnumber', '')
uuid = info.get('uuid', '')
if uuid:
relatedmacs = []
for mac in known_uuids.get(uuid, {}):
if mac and mac != info.get('hwaddr', ''):
relatedmacs.append(mac)
if relatedmacs:
yield msg.KeyValueData({'relatedmacs': relatedmacs})
yield msg.KeyValueData({'serialnumber': sn})
yield msg.KeyValueData({'modelnumber': mn})
yield msg.KeyValueData({'uuid': uuid})
if 'enclosure.uuid' in info:
yield msg.KeyValueData({'enclosure_uuid': info['enclosure.uuid']})
if 'enclosure.bay' in info:
yield msg.KeyValueData({'bay': int(info['enclosure.bay'])})
yield msg.KeyValueData({'macs': [info.get('hwaddr', '')]})
types = []
for infotype in info.get('services', []):
if infotype in servicenames:
types.append(servicenames[infotype])
yield msg.KeyValueData({'types': types})
if 'otheraddresses' in info:
yield msg.KeyValueData({'otheripaddrs': list(info['otheraddresses'])})
if 'location' in info:
yield msg.KeyValueData({'location': info['location']})
if 'room' in info:
yield msg.KeyValueData({'room': info['room']})
if 'rack' in info:
yield msg.KeyValueData({'rack': info['rack']})
if 'u' in info:
yield msg.KeyValueData({'lowest_u': info['u']})
if 'hostname' in info:
yield msg.KeyValueData({'hostname': info['hostname']})
if 'modelname' in info:
yield msg.KeyValueData({'modelname': info['modelname']})
def _info_matches(info, criteria):
model = criteria.get('by-model', None)
devtype = criteria.get('by-type', None)
node = criteria.get('by-node', None)
serial = criteria.get('by-serial', None)
status = criteria.get('by-state', None)
uuid = criteria.get('by-uuid', None)
if model and info.get('modelnumber', None) != model:
return False
if devtype and devtype not in info.get('services', []):
return False
if node and info.get('nodename', None) != node:
return False
if serial and info.get('serialnumber', None) != serial:
return False
if status and info.get('discostatus', None) != status:
return False
if uuid and info.get('uuid', None) != uuid:
return False
return True
def list_matching_nodes(criteria):
retnodes = []
for node in known_nodes:
for mac in known_nodes[node]:
info = known_info[mac]
if _info_matches(info, criteria):
retnodes.append(node)
break
retnodes.sort(key=noderange.humanify_nodename)
return [msg.ChildCollection(node + '/') for node in retnodes]
def list_matching_serials(criteria):
for serial in sorted(list(known_serials)):
info = known_serials[serial]
if _info_matches(info, criteria):
yield msg.ChildCollection(serial + '/')
def list_matching_uuids(criteria):
for uuid in sorted(list(known_uuids)):
for mac in known_uuids[uuid]:
info = known_uuids[uuid][mac]
if _info_matches(info, criteria):
yield msg.ChildCollection(uuid + '/')
break
def list_matching_states(criteria):
return [msg.ChildCollection(x) for x in ('discovered/', 'identified/',
'unidentified/')]
def list_matching_macs(criteria):
for mac in sorted(list(known_info)):
info = known_info[mac]
if _info_matches(info, criteria):
yield msg.ChildCollection(mac.replace(':', '-'))
def list_matching_types(criteria):
rettypes = []
for infotype in known_services:
typename = servicenames[infotype]
if ('by-model' not in criteria or
criteria['by-model'] in known_services[infotype]):
rettypes.append(typename)
return [msg.ChildCollection(typename + '/')
for typename in sorted(rettypes)]
def list_matching_models(criteria):
for model in sorted(list(detected_models())):
if ('by-type' not in criteria or
model in known_services[criteria['by-type']]):
yield msg.ChildCollection(model + '/')
def show_info(mac):
mac = mac.replace('-', ':')
if mac not in known_info:
raise exc.NotFoundException(mac + ' not a known mac address')
for i in send_discovery_datum(known_info[mac]):
yield i
list_info = {
'by-node': list_matching_nodes,
'by-serial': list_matching_serials,
'by-type': list_matching_types,
'by-model': list_matching_models,
'by-mac': list_matching_macs,
'by-state': list_matching_states,
'by-uuid': list_matching_uuids,
}
multi_selectors = set([
'by-type',
'by-model',
'by-state',
'by-uuid',
])
node_selectors = set([
'by-node',
'by-serial',
])
single_selectors = set([
'by-mac',
])
def _parameterize_path(pathcomponents):
listrequested = False
childcoll = True
if len(pathcomponents) % 2 == 1:
listrequested = pathcomponents[-1]
pathcomponents = pathcomponents[:-1]
pathit = iter(pathcomponents)
keyparams = {}
validselectors = multi_selectors | node_selectors | single_selectors
for key, val in zip(pathit, pathit):
if key not in validselectors:
raise exc.NotFoundException('{0} is not valid here'.format(key))
if key == 'by-type':
keyparams[key] = servicebyname.get(val, '!!!!invalid-type')
else:
keyparams[key] = val
validselectors.discard(key)
if key in single_selectors:
childcoll = False
validselectors = set([])
elif key in node_selectors:
validselectors = single_selectors | set([])
return validselectors, keyparams, listrequested, childcoll
def handle_autosense_config(operation, inputdata):
autosense = cfm.get_global('discovery.autosense')
autosense = autosense or autosense is None
if operation == 'retrieve':
yield msg.KeyValueData({'enabled': autosense})
elif operation == 'update':
enabled = inputdata['enabled']
if type(enabled) in (unicode, bytes):
enabled = enabled.lower() in ('true', '1', 'y', 'yes', 'enable',
'enabled')
if autosense == enabled:
return
cfm.set_global('discovery.autosense', enabled)
if enabled:
start_autosense()
else:
stop_autosense()
def handle_api_request(configmanager, inputdata, operation, pathcomponents):
if pathcomponents == ['discovery', 'autosense']:
return handle_autosense_config(operation, inputdata)
if operation == 'retrieve':
return handle_read_api_request(pathcomponents)
elif (operation in ('update', 'create') and
pathcomponents == ['discovery', 'rescan']):
if inputdata != {'rescan': 'start'}:
raise exc.InvalidArgumentException()
rescan()
return (msg.KeyValueData({'rescan': 'started'}),)
elif operation in ('update', 'create'):
if 'node' not in inputdata:
raise exc.InvalidArgumentException('Missing node name in input')
mac = _get_mac_from_query(pathcomponents)
info = known_info[mac]
if info['handler'] is None:
raise exc.NotImplementedException(
'Unable to {0} to {1}'.format(operation,
'/'.join(pathcomponents)))
handler = info['handler'].NodeHandler(info, configmanager)
try:
eval_node(configmanager, handler, info, inputdata['node'],
manual=True)
except Exception as e:
# or... incorrect passworod provided..
if 'Incorrect password' in str(e) or 'Unauthorized name' in str(e):
return [msg.ConfluentTargetInvalidCredentials(
inputdata['node'])]
raise
return [msg.AssignedResource(inputdata['node'])]
elif operation == 'delete':
mac = _get_mac_from_query(pathcomponents)
del known_info[mac]
return [msg.DeletedResource(mac)]
raise exc.NotImplementedException(
'Unable to {0} to {1}'.format(operation, '/'.join(pathcomponents)))
def _get_mac_from_query(pathcomponents):
_, queryparms, _, _ = _parameterize_path(pathcomponents[1:])
if 'by-mac' not in queryparms:
raise exc.InvalidArgumentException('Must target using "by-mac"')
mac = queryparms['by-mac'].replace('-', ':')
if mac not in known_info:
raise exc.NotFoundException('{0} not found'.format(mac))
return mac
def handle_read_api_request(pathcomponents):
# TODO(jjohnson2): This should be more generalized...
# odd indexes into components are 'by-'*, even indexes
# starting at 2 are parameters to previous index
if pathcomponents == ['discovery', 'rescan']:
return (msg.KeyValueData({'scanning': bool(scanner)}),)
subcats, queryparms, indexof, coll = _parameterize_path(pathcomponents[1:])
if len(pathcomponents) == 1:
dirlist = [msg.ChildCollection(x + '/') for x in sorted(list(subcats))]
dirlist.append(msg.ChildCollection('rescan'))
dirlist.append(msg.ChildCollection('autosense'))
return dirlist
if not coll:
return show_info(queryparms['by-mac'])
if not indexof:
return [msg.ChildCollection(x + '/') for x in sorted(list(subcats))]
if indexof not in list_info:
raise exc.NotFoundException('{0} is not found'.format(indexof))
return list_info[indexof](queryparms)
def detected_services():
for srv in known_services:
yield servicenames[srv]
def detected_models():
knownmodels = set([])
for info in known_info:
info = known_info[info]
if 'modelnumber' in info and info['modelnumber'] not in knownmodels:
knownmodels.add(info['modelnumber'])
yield info['modelnumber']
def _recheck_nodes(nodeattribs, configmanager):
if rechecklock.locked():
# if already in progress, don't run again
# it may make sense to schedule a repeat, but will try the easier and less redundant way first
return
with rechecklock:
return _recheck_nodes_backend(nodeattribs, configmanager)
def _recheck_nodes_backend(nodeattribs, configmanager):
global rechecker
_map_unique_ids(nodeattribs)
# for the nodes whose attributes have changed, consider them as potential
# strangers
if nodeattribs:
macmap.vintage = 0 # expire current mac map data, in case
# the attributes changed impacted the result
for node in nodeattribs:
if node in known_nodes:
for somemac in known_nodes[node]:
unknown_info[somemac] = known_nodes[node][somemac]
unknown_info[somemac]['discostatus'] = 'unidentified'
# Now we go through ones we did not find earlier
for mac in list(unknown_info):
try:
_recheck_single_unknown(configmanager, mac)
except Exception:
traceback.print_exc()
continue
# now we go through ones that were identified, but could not pass
# policy or hadn't been able to verify key
for nodename in pending_nodes:
info = pending_nodes[nodename]
try:
if info['handler'] is None:
next
handler = info['handler'].NodeHandler(info, configmanager)
discopool.spawn_n(eval_node, configmanager, handler, info, nodename)
except Exception:
traceback.print_exc()
log.log({'error': 'Unexpected error during discovery of {0}, check debug '
'logs'.format(nodename)})
def _recheck_single_unknown(configmanager, mac):
info = unknown_info.get(mac, None)
_recheck_single_unknown_info(configmanager, info)
def _recheck_single_unknown_info(configmanager, info):
global rechecker
global rechecktime
if not info or info['handler'] is None:
return
if info['handler'] != pxeh and not info.get('addresses', None):
#log.log({'info': 'Missing address information in ' + repr(info)})
return
handler = info['handler'].NodeHandler(info, configmanager)
if handler.https_supported and not handler.https_cert:
if handler.cert_fail_reason == 'unreachable':
log.log(
{
'info': '{0} with hwaddr {1} is not reachable at {2}'
''.format(
handler.devname, info['hwaddr'], handler.ipaddr
)})
# addresses data is bad, delete the offending ip
info['addresses'] = [x for x in info.get('addresses', []) if x != handler.ipaddr]
# TODO(jjohnson2): rescan due to bad peer addr data?
# not just wait around for the next announce
return
log.log(
{
'info': '{0} with hwaddr {1} at address {2} is not yet running '
'https, will examine later'.format(
handler.devname, info['hwaddr'], handler.ipaddr
)})
if rechecker is not None and rechecktime > util.monotonic_time() + 300:
rechecker.cancel()
# if cancel did not result in dead, then we are in progress
if rechecker is None or rechecker.dead:
rechecktime = util.monotonic_time() + 300
rechecker = eventlet.spawn_after(300, _periodic_recheck,
configmanager)
return
nodename, info['maccount'] = get_nodename(configmanager, handler, info)
if nodename:
if handler.https_supported:
dp = configmanager.get_node_attributes([nodename],
('pubkeys.tls_hardwaremanager',))
lastfp = dp.get(nodename, {}).get('pubkeys.tls_hardwaremanager',
{}).get('value', None)
if util.cert_matches(lastfp, handler.https_cert):
info['nodename'] = nodename
known_nodes[nodename][info['hwaddr']] = info
info['discostatus'] = 'discovered'
return # already known, no need for more
discopool.spawn_n(eval_node, configmanager, handler, info, nodename)
def safe_detected(info):
if 'hwaddr' not in info or not info['hwaddr']:
return
if info['hwaddr'] in runningevals:
# Do not evaluate the same mac multiple times at once
return
runningevals[info['hwaddr']] = discopool.spawn(eval_detected, info)
def eval_detected(info):
try:
detected(info)
except Exception as e:
traceback.print_exc()
del runningevals[info['hwaddr']]
def detected(info):
global rechecker
global rechecktime
# later, manual and CMM discovery may act on SN and/or UUID
for service in info['services']:
if service in nodehandlers:
if service not in known_services:
known_services[service] = set([])
handler = nodehandlers[service]
info['handler'] = handler
break
else: # no nodehandler, ignore for now
return
if (handler and not handler.NodeHandler.adequate(info) and
info.get('protocol', None)):
eventlet.spawn_after(10, info['protocol'].fix_info, info,
safe_detected)
return
if info['hwaddr'] in known_info and 'addresses' in info:
# we should tee these up for parsing when an enclosure comes up
# also when switch config parameters change, should discard
# and there's also if wiring is fixed...
# of course could periodically revisit known_nodes
# replace potentially stale address info
#TODO(jjohnson2): remove this
# temporary workaround for XCC not doing SLP DA over dedicated port
# bz 93219, fix submitted, but not in builds yet
# strictly speaking, going ipv4 only legitimately is mistreated here,
# but that should be an edge case
oldaddr = known_info[info['hwaddr']].get('addresses', [])
for addr in info['addresses']:
if addr[0].startswith('fe80::'):
break
else:
for addr in oldaddr:
if addr[0].startswith('fe80::'):
info['addresses'].append(addr)
if known_info[info['hwaddr']].get(
'addresses', []) == info['addresses']:
# if the ip addresses match, then assume no changes
# now something resetting to defaults could, in theory
# have the same address, but need to be reset
# in that case, however, a user can clear pubkeys to force a check
return
known_info[info['hwaddr']] = info
cfg = cfm.ConfigManager(None)
if handler:
handler = handler.NodeHandler(info, cfg)
handler.scan()
try:
if 'modelnumber' not in info:
info['modelnumber'] = info['attributes']['enclosure-machinetype-model'][0]
except (KeyError, IndexError):
pass
if 'modelnumber' in info:
known_services[service].add(info['modelnumber'])
try:
if 'serialnumber' not in info:
snum = info['attributes']['enclosure-serial-number'][0].strip()
if snum:
info['serialnumber'] = snum
except (KeyError, IndexError):
pass
if 'serialnumber' in info:
known_serials[info['serialnumber']] = info
uuid = info.get('uuid', None)
if uuid_is_valid(uuid):
known_uuids[uuid][info['hwaddr']] = info
info['otheraddresses'] = set([])
for i4addr in info.get('attributes', {}).get('ipv4-address', []):
info['otheraddresses'].add(i4addr)
if handler and handler.https_supported and not handler.https_cert:
if handler.cert_fail_reason == 'unreachable':
log.log(
{
'info': '{0} with hwaddr {1} is not reachable by https '
'at address {2}'.format(
handler.devname, info['hwaddr'], handler.ipaddr
)})
info['addresses'] = [x for x in info.get('addresses', []) if x != handler.ipaddr]
return
log.log(
{'info': '{0} with hwaddr {1} at address {2} is not yet running '
'https, will examine later'.format(
handler.devname, info['hwaddr'], handler.ipaddr
)})
if rechecker is not None and rechecktime > util.monotonic_time() + 300:
rechecker.cancel()
if rechecker is None or rechecker.dead:
rechecktime = util.monotonic_time() + 300
rechecker = eventlet.spawn_after(300, _periodic_recheck, cfg)
unknown_info[info['hwaddr']] = info
info['discostatus'] = 'unidentfied'
#TODO, eventlet spawn after to recheck sooner, or somehow else
# influence periodic recheck to shorten delay?
return
nodename, info['maccount'] = get_nodename(cfg, handler, info)
if nodename and handler and handler.https_supported:
dp = cfg.get_node_attributes([nodename],
('pubkeys.tls_hardwaremanager', 'id.uuid', 'discovery.policy'))
dp = dp.get(nodename, {})
lastfp = dp.get('pubkeys.tls_hardwaremanager',
{}).get('value', None)
if util.cert_matches(lastfp, handler.https_cert):
info['nodename'] = nodename
known_nodes[nodename][info['hwaddr']] = info
info['discostatus'] = 'discovered'
uuid = info.get('uuid', None)
if uuid:
storeuuid = dp.get('id.uuid', {}).get('value', None)
if not storeuuid:
discop = dp.get('discovery.policy', {}).get('value', '')
if discop:
policies = set(discop.split(','))
else:
policies = set([])
if policies & {'open', 'permissive'}:
cfg.set_node_attributes({nodename: {'id.uuid': info['uuid']}})
return # already known, no need for more
#TODO(jjohnson2): We might have to get UUID for certain searches...
#for now defer probe until inside eval_node. We might not have
#a nodename without probe in the future.
if nodename and handler:
eval_node(cfg, handler, info, nodename)
elif handler:
#log.log(
# {'info': 'Detected unknown {0} with hwaddr {1} at '
# 'address {2}'.format(
# handler.devname, info['hwaddr'], handler.ipaddr
# )})
info['discostatus'] = 'unidentified'
unknown_info[info['hwaddr']] = info
def b64tohex(b64str):
bd = base64.b64decode(b64str)
bd = bytearray(bd)
return ''.join(['{0:02x}'.format(x) for x in bd])
def get_enclosure_chain_head(nodename, cfg):
ne = True
members = [nodename]
while ne:
ne = cfg.get_node_attributes(
nodename, 'enclosure.extends').get(nodename, {}).get(
'enclosure.extends', {}).get('value', None)
if not ne:
return nodename
if ne in members:
raise exc.InvalidArgumentException(
'Circular chain that includes ' + nodename)
if not cfg.is_node(ne):
raise exc.InvalidArgumentException(
'{0} is chained to nonexistent node {1} '.format(
nodename, ne))
nodename = ne
members.append(nodename)
return nodename
def get_chained_smm_name(nodename, cfg, handler, nl=None, checkswitch=True):
# nodename is the head of the chain, cfg is a configmanager, handler
# is the handler of the current candidate, nl is optional indication
# of the next link in the chain, checkswitch can disable the switch
# search if not indicated by current situation
# returns the new name and whether it has been securely validated or not
# first we check to see if directly connected
mycert = handler.https_cert
if checkswitch:
fprints = macmap.get_node_fingerprints(nodename, cfg)
for fprint in fprints:
if util.cert_matches(fprint[0], mycert):
# ok we have a direct match, it is this node
return nodename, fprint[1]
# ok, unable to get it, need to traverse the chain from the beginning
if not nl:
nl = list(cfg.filter_node_attributes(
'enclosure.extends=' + nodename))
while nl:
if len(nl) != 1:
raise exc.InvalidArgumentException('Multiple enclosures trying to '
'extend a single enclosure')
cd = cfg.get_node_attributes(nodename, ['hardwaremanagement.manager',
'pubkeys.tls_hardwaremanager'])
pkey = cd[nodename].get('pubkeys.tls_hardwaremanager', {}).get(
'value', None)
if not pkey:
# We cannot continue through a break in the chain
return None, False
smmaddr = cd.get(nodename, {}).get('hardwaremanagement.manager', {}).get('value', None)
if not smmaddr:
return None, False
if pkey:
cv = util.TLSCertVerifier(
cfg, nodename, 'pubkeys.tls_hardwaremanager').verify_cert
for fprint in get_smm_neighbor_fingerprints(smmaddr, cv):
if util.cert_matches(fprint, mycert):
# a trusted chain member vouched for the cert
# so it's validated
return nl[0], True
# advance down the chain by one and try again
nodename = nl[0]
nl = list(cfg.filter_node_attributes(
'enclosure.extends=' + nodename))
return None, False
def get_smm_neighbor_fingerprints(smmaddr, cv):
if ':' in smmaddr:
smmaddr = '[{0}]'.format(smmaddr)
wc = webclient.SecureHTTPConnection(smmaddr, verifycallback=cv)
try:
neighs = wc.grab_json_response('/scripts/neighdata.json')
except Exception:
log.log({'error': 'Failure getting LLDP information from {}'.format(smmaddr)})
return
if not neighs:
return
for neigh in neighs:
if 'sha256' not in neigh:
continue
yield 'sha256$' + b64tohex(neigh['sha256'])
def get_nodename(cfg, handler, info):
nodename = None
maccount = None
info['verified'] = False
if not handler:
return None, None
if handler.https_supported:
currcert = handler.https_cert
if not currcert:
info['discofailure'] = 'nohttps'
return None, None
currprint = util.get_fingerprint(currcert, 'sha256')
nodename = nodes_by_fprint.get(currprint, None)
if not nodename:
# Try SHA512 as well
currprint = util.get_fingerprint(currcert)
nodename = nodes_by_fprint.get(currprint, None)
if not nodename:
curruuid = info.get('uuid', None)
if uuid_is_valid(curruuid):
nodename = nodes_by_uuid.get(curruuid, None)
if nodename is None:
_map_unique_ids()
nodename = nodes_by_uuid.get(curruuid, None)
if not nodename and info['handler'] == pxeh:
enrich_pxe_info(info)
nodename = info.get('nodename', None)
if not nodename:
# Ok, see if it is something with a chassis-uuid and discover by
# chassis
nodename = get_nodename_from_enclosures(cfg, info)
if not nodename and handler.devname == 'SMM':
nodename = get_nodename_from_chained_smms(cfg, handler, info)
if not nodename: # as a last resort, search switches for info
# This is the slowest potential operation, so we hope for the
# best to occur prior to this
nodename, macinfo = macmap.find_nodeinfo_by_mac(info['hwaddr'], cfg)
maccount = macinfo['maccount']
if nodename:
if handler.devname == 'SMM':
nl = list(cfg.filter_node_attributes(
'enclosure.extends=' + nodename))
if nl:
# We found an SMM, and it's in a chain per configuration
# we need to ask the switch for the fingerprint to see
# if we have a match or not
newnodename, v = get_chained_smm_name(nodename, cfg,
handler, nl)
if newnodename:
# while this started by switch, it was disambiguated
info['verified'] = v
return newnodename, None
else:
errorstr = ('Attempt to discover SMM in chain but '
'unable to follow chain to the specific '
'SMM, it may be waiting on an upstream '
'SMM, chain starts with {0}'.format(
nodename))
log.log({'error': errorstr})
return None, None
if (nodename and
not handler.discoverable_by_switch(macinfo['maccount'])):
if handler.devname == 'SMM':
errorstr = 'Attempt to discover SMM by switch, but chained ' \
'topology or incorrect net attributes detected, ' \
'which is not compatible with switch discovery ' \
'of SMM, nodename would have been ' \
'{0}'.format(nodename)
log.log({'error': errorstr})
return None, None
return nodename, maccount
def get_nodename_from_chained_smms(cfg, handler, info):
nodename = None
for fprint in get_smm_neighbor_fingerprints(
handler.ipaddr, lambda x: True):
if fprint in nodes_by_fprint:
# need to chase the whole chain
# to support either direction
chead = get_enclosure_chain_head(nodes_by_fprint[fprint],
cfg)
newnodename, v = get_chained_smm_name(
chead, cfg, handler, checkswitch=False)
if newnodename:
info['verified'] = v
nodename = newnodename
return nodename
def get_node_guess_by_uuid(uuid):
for mac in known_uuids.get(uuid, {}):
nodename = known_uuids[uuid][mac].get('nodename', None)
if nodename:
return nodename
return None
def get_node_by_uuid_or_mac(uuidormac):
node = pxe.macmap.get(uuidormac, None)
if node is not None:
return node
return nodes_by_uuid.get(uuidormac, None)
def get_nodename_from_enclosures(cfg, info):
nodename = None
cuuid = info.get('attributes', {}).get('chassis-uuid', [None])[0]
if cuuid and cuuid in nodes_by_uuid:
encl = nodes_by_uuid[cuuid]
bay = info.get('enclosure.bay', None)
if bay:
tnl = cfg.filter_node_attributes('enclosure.manager=' + encl)
tnl = list(
cfg.filter_node_attributes('enclosure.bay={0}'.format(bay),
tnl))
if len(tnl) == 1:
# This is not a secure assurance, because it's by
# uuid instead of a key
nodename = tnl[0]
return nodename
def eval_node(cfg, handler, info, nodename, manual=False):
try:
handler.probe() # unicast interrogation as possible to get more data
# switch concurrently
# do some preconfig, for example, to bring a SMM online if applicable
handler.preconfig(nodename)
except Exception as e:
unknown_info[info['hwaddr']] = info
info['discostatus'] = 'unidentified'
errorstr = 'An error occured during discovery, check the ' \
'trace and stderr logs, mac was {0} and ip was {1}' \
', the node or the containing enclosure was {2}' \
''.format(info['hwaddr'], handler.ipaddr, nodename)
traceback.print_exc()
if manual:
raise exc.InvalidArgumentException(errorstr)
log.log({'error': errorstr})
return
# first, if had a bay, it was in an enclosure. If it was discovered by
# switch, it is probably the enclosure manager and not
# the node directly. switch is ambiguous and we should leave it alone
if 'enclosure.bay' in info and handler.is_enclosure:
unknown_info[info['hwaddr']] = info
info['discostatus'] = 'unidentified'
log.log({'error': 'Something that is an enclosure reported a bay, '
'not possible'})
if manual:
raise exc.InvalidArgumentException()
return
nl = list(cfg.filter_node_attributes('enclosure.manager=' + nodename))
if not handler.is_enclosure and nl:
# The specified node is an enclosure (has nodes mapped to it), but
# what we are talking to is *not* an enclosure
# might be ambiguous, need to match chassis-uuid as well..
if 'enclosure.bay' not in info:
unknown_info[info['hwaddr']] = info
info['discostatus'] = 'unidentified'
errorstr = '{2} with mac {0} is in {1}, but unable to ' \
'determine bay number'.format(info['hwaddr'],
nodename,
handler.ipaddr)
if manual:
raise exc.InvalidArgumentException(errorstr)
log.log({'error': errorstr})
return
enl = list(cfg.filter_node_attributes('enclosure.extends=' + nodename))
if enl:
# ambiguous SMM situation according to the configuration, we need
# to match uuid
encuuid = info['attributes'].get('chassis-uuid', None)
if encuuid:
encuuid = encuuid[0]
enl = list(cfg.filter_node_attributes('id.uuid=' + encuuid))
if len(enl) != 1:
# errorstr = 'No SMM by given UUID known, *yet*'
# if manual:
# raise exc.InvalidArgumentException(errorstr)
# log.log({'error': errorstr})
if encuuid in pending_by_uuid:
pending_by_uuid[encuuid].append(info)
else:
pending_by_uuid[encuuid] = [info]
return
# We found the real smm, replace the list with the actual smm
# to continue
nl = list(cfg.filter_node_attributes(
'enclosure.manager=' + enl[0]))
else:
errorstr = 'Chained SMM configuration with older XCC, ' \
'unable to perform zero power discovery'
if manual:
raise exc.InvalidArgumentException(errorstr)
log.log({'error': errorstr})
return
# search for nodes fitting our description using filters
# lead with the most specific to have a small second pass
nl = list(cfg.filter_node_attributes(
'enclosure.bay={0}'.format(info['enclosure.bay']), nl))
if len(nl) != 1:
info['discofailure'] = 'ambigconfig'
if len(nl):
errorstr = 'The following nodes have duplicate ' \
'enclosure attributes: ' + ','.join(nl)
else:
errorstr = 'The {0} in enclosure {1} bay {2} does not ' \
'seem to be a defined node ({3})'.format(
handler.devname, nodename,
info['enclosure.bay'],
handler.ipaddr,
)
if manual:
raise exc.InvalidArgumentException(errorstr)
log.log({'error': errorstr})
unknown_info[info['hwaddr']] = info
info['discostatus'] = 'unidentified'
return
nodename = nl[0]
if not discover_node(cfg, handler, info, nodename, manual):
# store it as pending, assuming blocked on enclosure
# assurance...
pending_nodes[nodename] = info
else:
# we can and did accurately discover by switch or in enclosure
# but... is this really ok? could be on an upstream port or
# erroneously put in the enclosure with no nodes yet
# so first, see if the candidate node is a chain host
if not manual:
if info.get('maccount', False):
# discovery happened through switch
nl = list(cfg.filter_node_attributes(
'enclosure.extends=' + nodename))
if nl:
# The candidate nodename is the head of a chain, we must
# validate the smm certificate by the switch
fprints = macmap.get_node_fingerprints(nodename, cfg)
for fprint in fprints:
if util.cert_matches(fprint[0], handler.https_cert):
if not discover_node(cfg, handler, info,
nodename, manual):
pending_nodes[nodename] = info
return
if (info.get('maccount', False) and
not handler.discoverable_by_switch(info['maccount'])):
errorstr = 'The detected node {0} was detected using switch, ' \
'however the relevant port has too many macs learned ' \
'for this type of device ({1}) to be discovered by ' \
'switch.'.format(nodename, handler.devname)
log.log({'error': errorstr})
return
if not discover_node(cfg, handler, info, nodename, manual):
pending_nodes[nodename] = info
def discover_node(cfg, handler, info, nodename, manual):
if manual:
if not cfg.is_node(nodename):
raise exc.InvalidArgumentException(
'{0} is not a defined node, must be defined before an '
'endpoint may be assigned to it'.format(nodename))
if handler.https_supported:
currcert = handler.https_cert
if currcert:
currprint = util.get_fingerprint(currcert, 'sha256')
prevnode = nodes_by_fprint.get(currprint, None)
if prevnode and prevnode != nodename:
raise exc.InvalidArgumentException(
'Attempt to assign {0} conflicts with existing node {1} '
'based on TLS certificate.'.format(nodename, prevnode))
known_nodes[nodename][info['hwaddr']] = info
if info['hwaddr'] in unknown_info:
del unknown_info[info['hwaddr']]
info['discostatus'] = 'identified'
dp = cfg.get_node_attributes(
[nodename], ('discovery.policy', 'id.uuid',
'pubkeys.tls_hardwaremanager'))
policy = dp.get(nodename, {}).get('discovery.policy', {}).get(
'value', None)
if policy is None:
policy = ''
policies = set(policy.split(','))
lastfp = dp.get(nodename, {}).get('pubkeys.tls_hardwaremanager',
{}).get('value', None)
# TODO(jjohnson2): permissive requires we guarantee storage of
# the pubkeys, which is deferred for a little bit
# Also, 'secure', when we have the needed infrastructure done
# in some product or another.
curruuid = info.get('uuid', False)
if 'pxe' in policies and info['handler'] == pxeh:
return do_pxe_discovery(cfg, handler, info, manual, nodename, policies)
elif ('permissive' in policies and handler.https_supported and lastfp and
not util.cert_matches(lastfp, handler.https_cert) and not manual):
info['discofailure'] = 'fingerprint'
log.log({'info': 'Detected replacement of {0} with existing '
'fingerprint and permissive discovery policy, not '
'doing discovery unless discovery.policy=open or '
'pubkeys.tls_hardwaremanager attribute is cleared '
'first'.format(nodename)})
return False # With a permissive policy, do not discover new
elif policies & set(('open', 'permissive')) or manual:
info['nodename'] = nodename
if info['handler'] == pxeh:
return do_pxe_discovery(cfg, handler, info, manual, nodename, policies)
elif manual or not util.cert_matches(lastfp, handler.https_cert):
# only 'discover' if it is not the same as last time
try:
handler.config(nodename)
except Exception as e:
info['discofailure'] = 'bug'
if manual:
raise
log.log(
{'error':
'Error encountered trying to set up {0}, {1}'.format(
nodename, str(e))})
traceback.print_exc()
return False
newnodeattribs = {}
if list(cfm.list_collective()):
# We are in a collective, check collective.manager
cmc = cfg.get_node_attributes(nodename, 'collective.manager')
cm = cmc.get(nodename, {}).get('collective.manager', {}).get('value', None)
if not cm:
# Node is being discovered in collective, but no collective.manager, default
# to the collective member actually able to execute the discovery
newnodeattribs['collective.manager'] = collective.get_myname()
if 'uuid' in info:
newnodeattribs['id.uuid'] = info['uuid']
if 'serialnumber' in info:
newnodeattribs['id.serial'] = info['serialnumber']
if 'modelnumber' in info:
newnodeattribs['id.model'] = info['modelnumber']
if handler.https_cert:
newnodeattribs['pubkeys.tls_hardwaremanager'] = \
util.get_fingerprint(handler.https_cert, 'sha256')
if newnodeattribs:
cfg.set_node_attributes({nodename: newnodeattribs})
log.log({'info': 'Discovered {0} ({1})'.format(nodename,
handler.devname)})
info['discostatus'] = 'discovered'
for i in pending_by_uuid.get(curruuid, []):
eventlet.spawn_n(_recheck_single_unknown_info, cfg, i)
try:
del pending_by_uuid[curruuid]
except KeyError:
pass
return True
if info['handler'] == pxeh:
olduuid = dp.get(nodename, {}).get('id.uuid', {}).get(
'value', '')
if olduuid.lower() != info['uuid']:
log.log({'info': 'Detected {0}, but discovery.policy is not set to a '
'value allowing discovery (open, permissive, or pxe)'.format(
nodename)})
info['discofailure'] = 'policy'
else:
log.log({'info': 'Detected {0}, but discovery.policy is not set to a '
'value allowing discovery (open or permissive)'.format(
nodename)})
info['discofailure'] = 'policy'
return False
def do_pxe_discovery(cfg, handler, info, manual, nodename, policies):
# use uuid based scheme in lieu of tls cert, ideally only
# for stateless 'discovery' targets like pxe, where data does not
# change
uuidinfo = cfg.get_node_attributes(nodename, ['id.uuid', 'id.serial', 'id.model', 'net*.hwaddr', 'net*.bootable'])
if manual or policies & set(('open', 'pxe')):
enrich_pxe_info(info)
attribs = {}
olduuid = uuidinfo.get(nodename, {}).get('id.uuid', None)
if isinstance(olduuid, dict):
olduuid = olduuid.get('value', None)
uuid = info.get('uuid', None)
if uuid and uuid != olduuid:
attribs['id.uuid'] = info['uuid']
sn = info.get('serialnumber', None)
mn = info.get('modelnumber', None)
if sn and sn != uuidinfo.get(nodename, {}).get('id.serial', None):
attribs['id.serial'] = sn
if mn and mn != uuidinfo.get(nodename, {}).get('id.model', None):
attribs['id.model'] = mn
for attrname in uuidinfo.get(nodename, {}):
if attrname.endswith('.bootable') and uuidinfo[nodename][attrname].get('value', None):
newattrname = attrname[:-8] + 'hwaddr'
oldhwaddr = uuidinfo.get(nodename, {}).get(newattrname, {}).get('value', None)
if info['hwaddr'] != oldhwaddr:
attribs[newattrname] = info['hwaddr']
if attribs:
cfg.set_node_attributes({nodename: attribs})
if info['uuid'] in known_pxe_uuids:
return True
if uuid_is_valid(info['uuid']):
known_pxe_uuids[info['uuid']] = nodename
#log.log({'info': 'Detected {0} ({1} with mac {2})'.format(
# nodename, handler.devname, info['hwaddr'])})
return True
attribwatcher = None
nodeaddhandler = None
needaddhandled = False
def _handle_nodelist_change(configmanager):
global needaddhandled
global nodeaddhandler
macmap.vintage = 0 # the current mac map is probably inaccurate
_recheck_nodes((), configmanager)
if needaddhandled:
needaddhandled = False
nodeaddhandler = eventlet.spawn(_handle_nodelist_change, configmanager)
else:
nodeaddhandler = None
def newnodes(added, deleting, renamed, configmanager):
global attribwatcher
global needaddhandled
global nodeaddhandler
alldeleting = set(deleting) | set(renamed)
for node in alldeleting:
if node not in known_nodes:
continue
for mac in known_nodes[node]:
if mac in known_info:
del known_info[mac]
del known_nodes[node]
_map_unique_ids()
configmanager.remove_watcher(attribwatcher)
allnodes = configmanager.list_nodes()
attribwatcher = configmanager.watch_attributes(
allnodes, ('discovery.policy', 'net*.switch',
'hardwaremanagement.manager', 'net*.switchport',
'id.uuid', 'pubkeys.tls_hardwaremanager',
'net*.bootable'), _recheck_nodes)
if nodeaddhandler:
needaddhandled = True
else:
nodeaddhandler = eventlet.spawn(_handle_nodelist_change, configmanager)
rechecker = None
rechecktime = None
rechecklock = eventlet.semaphore.Semaphore()
def _periodic_recheck(configmanager):
global rechecker
global rechecktime
rechecker = None
try:
_recheck_nodes((), configmanager)
except Exception:
traceback.print_exc()
log.log({'error': 'Unexpected error during discovery, check debug '
'logs'})
# if rechecker is set, it means that an accelerated schedule
# for rechecker was requested in the course of recheck_nodes
if rechecker is None:
rechecktime = util.monotonic_time() + 900
rechecker = eventlet.spawn_after(900, _periodic_recheck,
configmanager)
def rescan():
_map_unique_ids()
global scanner
if scanner:
return
else:
scanner = eventlet.spawn(blocking_scan)
def blocking_scan():
global scanner
slpscan = eventlet.spawn(slp.active_scan, safe_detected, slp)
ssdpscan = eventlet.spawn(ssdp.active_scan, safe_detected, ssdp)
slpscan.wait()
ssdpscan.wait()
scanner = None
def start_detection():
global attribwatcher
global rechecker
global rechecktime
_map_unique_ids()
cfg = cfm.ConfigManager(None)
allnodes = cfg.list_nodes()
attribwatcher = cfg.watch_attributes(
allnodes, ('discovery.policy', 'net*.switch',
'hardwaremanagement.manager', 'net*.switchport', 'id.uuid',
'pubkeys.tls_hardwaremanager'), _recheck_nodes)
cfg.watch_nodecollection(newnodes)
autosense = cfm.get_global('discovery.autosense')
if autosense or autosense is None:
start_autosense()
if rechecker is None:
rechecktime = util.monotonic_time() + 900
rechecker = eventlet.spawn_after(900, _periodic_recheck, cfg)
eventlet.spawn_n(ssdp.snoop, safe_detected, None, ssdp, get_node_by_uuid_or_mac)
def stop_autosense():
for watcher in list(autosensors):
watcher.kill()
autosensors.discard(watcher)
def start_autosense():
autosensors.add(eventlet.spawn(slp.snoop, safe_detected, slp))
autosensors.add(eventlet.spawn(pxe.snoop, safe_detected, pxe, get_node_guess_by_uuid))
nodes_by_fprint = {}
nodes_by_uuid = {}
known_pxe_uuids = {}
def _map_unique_ids(nodes=None):
global nodes_by_uuid
global nodes_by_fprint
global known_pxe_uuids
# Map current known ids based on uuid and fingperprints for fast lookup
cfg = cfm.ConfigManager(None)
if nodes is None:
nodes_by_uuid = {}
nodes_by_fprint = {}
known_pxe_uuids = {}
nodes = cfg.list_nodes()
bigmap = cfg.get_node_attributes(nodes,
('id.uuid',
'pubkeys.tls_hardwaremanager'))
for uuid in list(nodes_by_uuid):
node = nodes_by_uuid[uuid]
if node in bigmap:
del nodes_by_uuid[uuid]
for uuid in list(known_pxe_uuids):
node = known_pxe_uuids[uuid]
if node in bigmap:
del known_pxe_uuids[uuid]
for fprint in list(nodes_by_fprint):
node = nodes_by_fprint[fprint]
if node in bigmap:
del nodes_by_fprint[fprint]
for node in bigmap:
uuid = bigmap[node].get('id.uuid', {}).get('value', '').lower()
if uuid_is_valid(uuid):
nodes_by_uuid[uuid] = node
known_pxe_uuids[uuid] = node
fprint = bigmap[node].get(
'pubkeys.tls_hardwaremanager', {}).get('value', None)
if fprint:
nodes_by_fprint[fprint] = node
if __name__ == '__main__':
start_detection()
while True:
eventlet.sleep(30)
| xcat2/confluent | confluent_server/confluent/discovery/core.py | Python | apache-2.0 | 57,545 |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common import credentials_factory as credentials
from tempest import config
from tempest import exceptions
from tempest.tests import fake_config
from tempest.tests.lib import base
class TestLegacyCredentialsProvider(base.TestCase):
fixed_params = {'identity_version': 'v2'}
def setUp(self):
super(TestLegacyCredentialsProvider, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
def test_get_creds_roles_legacy_invalid(self):
test_accounts_class = credentials.LegacyCredentialProvider(
**self.fixed_params)
self.assertRaises(exceptions.InvalidConfiguration,
test_accounts_class.get_creds_by_roles,
['fake_role'])
| HybridF5/tempest_debug | tempest/tests/common/test_credentials.py | Python | apache-2.0 | 1,461 |
#!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# downsample.py
# Created by Disa Mhembere on 2014-07-08.
# Email: [email protected]
import argparse
from glob import glob
from collections import defaultdict
import os, sys
import igraph
from mrcap.atlas import Atlas
from mrcap.utils import igraph_io
from time import time
import downsample_atlas
import nibabel as nib
import zipfile
sys.path += [os.path.abspath("../")]
from zindex import MortonXYZ
import numpy as np
import cPickle as pickle
DEBUG = False
def downsample(g, factor=-1, ds_atlas=None, ignore_zero=True):
"""
Downsample a graph by a scale factor.
Downsamples by collapsing regions using an dynamically generated downsampled atlas. Rebuilding the graph takes on the order of a few minutes on a standard desktop computer with more than 4GB of RAM.
**Positional Arguments**
g: [.graphml; XML file]
- A full sized big graph.
factor: [int] (default = 1)
- The downsampling factor.
ds_atlas: [.nii; nifti image] (default = MNI152)
- A prebuilt downsampled nifti atlas with which to downsample.
ignore_zero: [boolean] (default = True)
- We assume the zeroth label is outside the brain.
**Returns**
new graph: [.graphml; XML file]
- The input graph downsampled to the scale of the input atlas.
"""
start = time()
edge_dict = defaultdict(int) # key=(v1, v2), value=weight
if factor >= 0:
print "Generating downsampled atlas ..." # TODO: Cythonize
ds_atlas = downsample_atlas.create(start=factor) # Create ds atlas and an atlas map for the original atlas
ds_atlas = ds_atlas.get_data() # don't care about other atlas data
spatial_map = [0]*(int(ds_atlas.max())+1)
# This takes O(m)
for e in g.es:
src_spatial_id = long(g.vs[e.source]["spatial_id"])
tgt_spatial_id = long(g.vs[e.target]["spatial_id"])
src_x, src_y, src_z = MortonXYZ(src_spatial_id)
tgt_x, tgt_y, tgt_z = MortonXYZ(tgt_spatial_id)
src = ds_atlas[src_x, src_y, src_z]
tgt = ds_atlas[tgt_x, tgt_y, tgt_z]
# FIXME GK: We will skip all region zeros for all atlases which is not really true!
if ignore_zero:
if (src and tgt) and (src != tgt):
if not spatial_map[src]: spatial_map[src] = `src_spatial_id`
if not spatial_map[tgt]: spatial_map[tgt] = `tgt_spatial_id`
edge_dict[(src, tgt)] += e["weight"]
else:
print "Never should get here"
if not spatial_map[src]: spatial_map[src] = `src_spatial_id`
if not spatial_map[tgt]: spatial_map[tgt] = `tgt_spatial_id`
edge_dict[(src, tgt)] += e["weight"]
del g # free me
new_graph = igraph.Graph(n=len(spatial_map), directed=False) # len spatial_map is the # of vertices
new_graph.vs["spatial_id"] = spatial_map
print "Adding edges to graph ..."
new_graph += edge_dict.keys()
print "Adding edge weight to graph ..."
new_graph.es["weight"] = edge_dict.values()
print "Deleting zero-degree nodes..."
zero_deg_nodes = np.where(np.array(new_graph.degree()) == 0 )[0]
new_graph.delete_vertices(zero_deg_nodes)
print "Completed building graph in %.3f sec ... " % (time() - start)
print new_graph.summary()
return new_graph
def main():
parser = argparse.ArgumentParser(description="")
parser.add_argument("infn", action="store", help="Input file name")
parser.add_argument("-f", "--factor", action="store", type=int, help="Downsampling factor")
parser.add_argument("-a", "--ds_atlas", action="store", help="Pre-Downsampled atlas file name")
parser.add_argument("outfn", action="store", help="Output file name")
parser.add_argument("--informat", "-i", action="store", default="graphml", help="Input format of the graph")
parser.add_argument("--outformat", "-o", action="store", default="graphml", help="Output format of the graph")
result = parser.parse_args()
if result.factor >= 0:
g = igraph_io.read_arbitrary(result.infn, informat=result.informat)
new_graph = downsample(g, factor=result.factor)
elif result.ds_atlas:
g = igraph_io.read_arbitrary(result.infn, informat=result.informat)
new_graph = downsample(g, ds_atlas=nib.load(result.ds_atlas))
else:
sys.stderr.write("[ERROR]: either -f or -a flag must be specified\n")
exit(-1)
new_graph.write(result.outfn, format=result.outformat)
if __name__ == "__main__":
main()
| openconnectome/m2g | MR-OCP/mrcap/utils/downsample.py | Python | apache-2.0 | 4,933 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from helpers import unittest
from luigi.mock import MockTarget, MockFileSystem
from luigi.format import Nop
class MockFileTest(unittest.TestCase):
def test_1(self):
t = MockTarget('test')
p = t.open('w')
print('test', file=p)
p.close()
q = t.open('r')
self.assertEqual(list(q), ['test\n'])
q.close()
def test_with(self):
t = MockTarget("foo")
with t.open('w') as b:
b.write("bar")
with t.open('r') as b:
self.assertEqual(list(b), ['bar'])
def test_bytes(self):
t = MockTarget("foo", format=Nop)
with t.open('wb') as b:
b.write(b"bar")
with t.open('rb') as b:
self.assertEqual(list(b), [b'bar'])
def test_default_mode_value(self):
t = MockTarget("foo")
with t.open('w') as b:
b.write("bar")
with t.open() as b:
self.assertEqual(list(b), ['bar'])
def test_mode_none_error(self):
t = MockTarget("foo")
with self.assertRaises(TypeError):
with t.open(None) as b:
b.write("bar")
# That should work in python2 because of the autocast
# That should work in python3 because the default format is Text
def test_unicode(self):
t = MockTarget("foo")
with t.open('w') as b:
b.write(u"bar")
with t.open('r') as b:
self.assertEqual(b.read(), u'bar')
class MockFileSystemTest(unittest.TestCase):
fs = MockFileSystem()
def _touch(self, path):
t = MockTarget(path)
with t.open('w'):
pass
def setUp(self):
self.fs.clear()
self.path = "/tmp/foo"
self.path2 = "/tmp/bar"
self.path3 = "/tmp/foobar"
self._touch(self.path)
self._touch(self.path2)
def test_copy(self):
self.fs.copy(self.path, self.path3)
self.assertTrue(self.fs.exists(self.path))
self.assertTrue(self.fs.exists(self.path3))
def test_exists(self):
self.assertTrue(self.fs.exists(self.path))
def test_remove(self):
self.fs.remove(self.path)
self.assertFalse(self.fs.exists(self.path))
def test_remove_recursive(self):
self.fs.remove("/tmp", recursive=True)
self.assertFalse(self.fs.exists(self.path))
self.assertFalse(self.fs.exists(self.path2))
def test_rename(self):
self.fs.rename(self.path, self.path3)
self.assertFalse(self.fs.exists(self.path))
self.assertTrue(self.fs.exists(self.path3))
def test_listdir(self):
self.assertEqual(sorted([self.path, self.path2]), sorted(self.fs.listdir("/tmp")))
| adaitche/luigi | test/mock_test.py | Python | apache-2.0 | 3,350 |
# AZURE TRANSLATOR DEMO
# this demo is minimal
# here you can see cooked things with voice change based on language
# https://github.com/MyRobotLab/inmoov/blob/develop/InMoov/services/G_Translator.py
#voice service
mouth=Runtime.createAndStart("mouth", "MarySpeech")
#azure service
AzureTranslator=Runtime.createAndStart("AzureTranslator", "AzureTranslator")
AzureTranslator.setCredentials("YOUR_KEY_HERE_7da9defb-7d86-etc...")
#voice output
mouth.setVoice("cmu-bdl-hsmm")
mouth.setLanguage("en")
supported_languages = { # as defined here: http://msdn.microsoft.com/en-us/library/hh456380.aspx
'da' : 'Danish',
'nl' : 'Dutch',
'en' : 'English',
'fr' : 'French',
'de' : 'German',
'it' : 'Italian',
'is' : 'Iceland',
'no' : 'Norwegian',
'pt' : 'Portuguese',
'ru' : 'Russian',
'es' : 'Spanish',
'sv' : 'Swedish',
'tr' : 'Turkish',
'ro' : 'Romanian',
'ja' : 'Japanese',
'pl' : 'Polish',
}
#Mary tts voice name map
male_languagesMary = {
'da' : 'cmu-bdl-hsmm',#'dfki-pavoque-neutral-hsmm',
'nl' : 'cmu-bdl-hsmm',#'dfki-pavoque-neutral-hsmm',
'en' : 'cmu-bdl-hsmm',
'fr' : 'cmu-bdl-hsmm',
'de' : 'cmu-bdl-hsmm',#'dfki-pavoque-neutral-hsmm',
'it' : 'cmu-bdl-hsmm',#'istc-lucia-hsmm',
'is' : 'cmu-bdl-hsmm',#'dfki-pavoque-neutral-hsmm',
'no' : 'cmu-bdl-hsmm',#'dfki-pavoque-neutral-hsmm',
'pt' : 'cmu-bdl-hsmm',#'istc-lucia-hsmm',
'ru' : 'cmu-bdl-hsmm',
'es' : 'cmu-bdl-hsmm',#'istc-lucia-hsmm',
'sv' : 'cmu-bdl-hsmm',
'tr' : 'cmu-bdl-hsmm',#'dfki-ot-hsmm',
'ro' : 'cmu-bdl-hsmm',
'ja' : 'cmu-bdl-hsmm',
'pl' : 'cmu-bdl-hsmm',
}
#Translate to :
#TODO ADD TRANSLATED KEYWORDS
en_languages = {
'danish' : 'da',
'danois' : 'da',
'dutch' : 'nl',
'hollandais' : 'nl',
'english' : 'en',
'anglais' : 'en',
'french' : 'fr',
'français' : 'fr',
'german' : 'de',
'allemand' : 'de',
'italian' : 'it',
'italien' : 'it',
'norwegian' : 'no',
'norvegien' : 'no',
'Icelandic' : 'is',
'islandais' : 'is',
'spanish' : 'es',
'espagnol' : 'es',
'swedish' : 'sv',
'suédois' : 'sv',
'japonese' : 'ja',
'japonais' : 'ja',
'portuguese' : 'pt',
'portuguais' : 'pt',
'turkish' : 'tr',
'turk' : 'tr',
'russian' : 'ru',
'russe' : 'ru',
'romanian' : 'ro',
'roumain' : 'ro',
}
#main function
def translateText(text,language):
#AzureTranslator.fromLanguage('en')
RealLang="0"
try:
RealLang=en_languages[language]
except:
mouth.speakBlocking("I dont know this language, i am so sorry, or you made a mistake dude")
print RealLang
try:
AzureTranslator.detectLanguage(text)
except:
mouth.speakBlocking("Check your azure credentials please ! I can't do all the work for you, i am just a robot")
RealLang="0"
if RealLang!="0":
AzureTranslator.toLanguage(RealLang)
sleep(0.5)
t_text=AzureTranslator.translate(text)
#small trick to prevent old api connection problems
i=0
while 'Cannot find an active Azure Market Place' in t_text and i<50:
print(i,t_text)
i += 1
sleep(0.2)
AzureTranslator.detectLanguage(text)
t_text=AzureTranslator.translate(text+" ")
if 'Cannot find an active Azure Market Place' in t_text:
mouth.speakBlocking("There is a problem with azure, i am so sorry. Or maybe I am tired")
else:
# change voice to map language
mouth.setVoice(male_languagesMary[RealLang])
mouth.speakBlocking(t_text)
# Go back original voice
mouth.setVoice("cmu-bdl-hsmm")
# translateText(THE TEXT,TO LANGUAGE ( from #Translate to : )
translateText(u"Hola buenos dias","french")
sleep(2)
translateText(u"Hello ! and I can translate so many languages ! ","italian")
| MyRobotLab/myrobotlab | src/main/resources/resource/AzureTranslator/AzureTranslator.py | Python | apache-2.0 | 3,645 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import Mock, MagicMock
from trove.versions import BaseVersion
from trove.versions import Version
from trove.versions import VersionDataView
from trove.versions import VersionsAPI
from trove.versions import VersionsController
from trove.versions import VersionsDataView
from trove.versions import VERSIONS
from xml.dom import minidom
import testtools
BASE_URL = 'http://localhost'
class VersionsControllerTest(testtools.TestCase):
def setUp(self):
super(VersionsControllerTest, self).setUp()
self.controller = VersionsController()
self.assertIsNotNone(self.controller,
"VersionsController instance was None")
def test_index_json(self):
request = Mock()
result = self.controller.index(request)
self.assertIsNotNone(result,
'Result was None')
result._data = Mock()
result._data.data_for_json = \
lambda: {'status': 'CURRENT',
'updated': '2012-08-01T00:00:00Z',
'id': 'v1.0',
'links': [{'href': 'http://localhost/v1.0/',
'rel': 'self'}]}
# can be anything but xml
json_data = result.data("application/json")
self.assertIsNotNone(json_data,
'Result json_data was None')
self.assertEqual('v1.0', json_data['id'],
'Version id is incorrect')
self.assertEqual('CURRENT', json_data['status'],
'Version status is incorrect')
self.assertEqual('2012-08-01T00:00:00Z', json_data['updated'],
'Version updated value is incorrect')
def test_index_xml(self):
request = Mock()
result = self.controller.index(request)
self.assertIsNotNone(result, 'Result was None')
id = VERSIONS['1.0']['id']
status = VERSIONS['1.0']['status']
base_url = BASE_URL
updated = VERSIONS['1.0']['updated']
version = Version(id, status, base_url, updated)
result._data = Mock()
result._data.data_for_xml = lambda: {'versions': [version]}
xml_data = result.data("application/xml")
self.assertIsNotNone(xml_data, 'Result xml_data was None')
versions = xml_data['versions']
self.assertIsNotNone(versions, "Versions was None")
self.assertTrue(len(versions) == 1, "Versions length was != 1")
v = versions[0]
self.assertEqual('v1.0', v.id,
'Version id is incorrect')
self.assertEqual('CURRENT', v.status,
'Version status is incorrect')
self.assertEqual('2012-08-01T00:00:00Z', v.updated,
'Version updated value is incorrect')
def test_show_json(self):
request = Mock()
request.url_version = '1.0'
result = self.controller.show(request)
self.assertIsNotNone(result,
'Result was None')
json_data = result.data("application/json")
self.assertIsNotNone(json_data, "JSON data was None")
version = json_data.get('version', None)
self.assertIsNotNone(version, "Version was None")
self.assertEqual('CURRENT', version['status'],
"Version status was not 'CURRENT'")
self.assertEqual('2012-08-01T00:00:00Z', version['updated'],
"Version updated was not '2012-08-01T00:00:00Z'")
self.assertEqual('v1.0', version['id'], "Version id was not 'v1.0'")
def test_show_xml(self):
request = Mock()
request.url_version = '1.0'
result = self.controller.show(request)
self.assertIsNotNone(result,
'Result was None')
xml_data = result.data("application/xml")
self.assertIsNotNone(xml_data, "XML data was None")
version = xml_data.get('version', None)
self.assertIsNotNone(version, "Version was None")
self.assertEqual('CURRENT', version.status,
"Version status was not 'CURRENT'")
self.assertEqual('2012-08-01T00:00:00Z', version.updated,
"Version updated was not '2012-08-01T00:00:00Z'")
self.assertEqual('v1.0', version.id, "Version id was not 'v1.0'")
class BaseVersionTestCase(testtools.TestCase):
def setUp(self):
super(BaseVersionTestCase, self).setUp()
id = VERSIONS['1.0']['id']
status = VERSIONS['1.0']['status']
base_url = BASE_URL
updated = VERSIONS['1.0']['updated']
self.base_version = BaseVersion(id, status, base_url, updated)
self.assertIsNotNone(self.base_version,
'BaseVersion instance was None')
def test_data(self):
data = self.base_version.data()
self.assertIsNotNone(data, 'Base Version data was None')
self.assertTrue(type(data) is dict,
"Base Version data is not a dict")
self.assertEqual('CURRENT', data['status'],
"Data status was not 'CURRENT'")
self.assertEqual('2012-08-01T00:00:00Z', data['updated'],
"Data updated was not '2012-08-01T00:00:00Z'")
self.assertEqual('v1.0', data['id'],
"Data status was not 'v1.0'")
def test_url(self):
url = self.base_version.url()
self.assertIsNotNone(url, 'Url was None')
self.assertEqual('http://localhost/v1.0/', url,
"Base Version url is incorrect")
def test_to_xml(self):
xml = self.base_version.to_xml()
self.assertIsNotNone(xml, 'XML was None')
self.assertEqual('v1.0', xml.getAttribute('id'),
"XML Version is not v1.0")
self.assertEqual('CURRENT', xml.getAttribute('status'),
"XML status was not 'CURRENT'")
self.assertEqual('2012-08-01T00:00:00Z', xml.getAttribute('updated'),
"XML updated value was not 2012-08-01T00:00:00Z")
links = xml.getElementsByTagName("link")
self.assertIsNotNone(links, "XML links element was None")
link = links[0]
self.assertIsNotNone(link, "XML link element was None")
self.assertEqual('http://localhost/v1.0/', link.getAttribute("href"),
"XML link href is not 'http://localhost/v1.0/'")
self.assertEqual('self', link.getAttribute("rel"),
"XML link rel is not self")
class VersionTestCase(testtools.TestCase):
def setUp(self):
super(VersionTestCase, self).setUp()
id = VERSIONS['1.0']['id']
status = VERSIONS['1.0']['status']
base_url = BASE_URL
updated = VERSIONS['1.0']['updated']
self.version = Version(id, status, base_url, updated)
self.assertIsNotNone(self.version,
'Version instance was None')
def test_url_no_trailing_slash(self):
url = self.version.url()
self.assertIsNotNone(url, 'Version url was None')
self.assertEqual(BASE_URL + '/', url,
'Base url value was incorrect')
def test_url_with_trailing_slash(self):
self.version.base_url = 'http://localhost/'
url = self.version.url()
self.assertEqual(BASE_URL + '/', url,
'Base url value was incorrect')
class VersionDataViewTestCase(testtools.TestCase):
def setUp(self):
super(VersionDataViewTestCase, self).setUp()
# get a version object first
id = VERSIONS['1.0']['id']
status = VERSIONS['1.0']['status']
base_url = BASE_URL
updated = VERSIONS['1.0']['updated']
self.version = Version(id, status, base_url, updated)
self.assertIsNotNone(self.version,
'Version instance was None')
# then create an instance of VersionDataView
self.version_data_view = VersionDataView(self.version)
self.assertIsNotNone(self.version_data_view,
'Version Data view instance was None')
def test_data_for_json(self):
json_data = self.version_data_view.data_for_json()
self.assertIsNotNone(json_data, "JSON data was None")
self.assertTrue(type(json_data) is dict,
"JSON version data is not a dict")
self.assertIsNotNone(json_data.get('version'),
"Dict json_data has no key 'version'")
data = json_data['version']
self.assertIsNotNone(data, "JSON data version was None")
self.assertEqual('CURRENT', data['status'],
"Data status was not 'CURRENT'")
self.assertEqual('2012-08-01T00:00:00Z', data['updated'],
"Data updated was not '2012-08-01T00:00:00Z'")
self.assertEqual('v1.0', data['id'],
"Data status was not 'v1.0'")
def test_data_for_xml(self):
xml_data = self.version_data_view.data_for_xml()
self.assertIsNotNone(xml_data, "XML data is None")
self.assertTrue(type(xml_data) is dict,
"XML version data is not a dict")
self.assertIsNotNone(xml_data.get('version', None),
"Dict xml_data has no key 'version'")
version = xml_data['version']
self.assertIsNotNone(version, "Version was None")
self.assertEqual(self.version.id, version.id,
"Version ids are not equal")
class VersionsDataViewTestCase(testtools.TestCase):
def setUp(self):
super(VersionsDataViewTestCase, self).setUp()
# get a version object, put it in a list
self.versions = []
id = VERSIONS['1.0']['id']
status = VERSIONS['1.0']['status']
base_url = BASE_URL
updated = VERSIONS['1.0']['updated']
self.version = Version(id, status, base_url, updated)
self.assertIsNotNone(self.version,
'Version instance was None')
self.versions.append(self.version)
# then create an instance of VersionsDataView
self.versions_data_view = VersionsDataView(self.versions)
self.assertIsNotNone(self.versions_data_view,
'Versions Data view instance was None')
def test_data_for_json(self):
json_data = self.versions_data_view.data_for_json()
self.assertIsNotNone(json_data, "JSON data was None")
self.assertTrue(type(json_data) is dict,
"JSON versions data is not a dict")
self.assertIsNotNone(json_data.get('versions', None),
"Dict json_data has no key 'versions'")
versions = json_data['versions']
self.assertIsNotNone(versions, "Versions was None")
self.assertTrue(len(versions) == 1, "Versions length != 1")
# explode the version object
versions_data = [v.data() for v in self.versions]
d1 = versions_data.pop()
d2 = versions.pop()
self.assertEqual(d1['id'], d2['id'],
"Version ids are not equal")
def test_data_for_xml(self):
xml_data = self.versions_data_view.data_for_xml()
self.assertIsNotNone(xml_data, "XML data was None")
self.assertTrue(type(xml_data) is dict, "XML data was not a dict")
versions = xml_data.get('versions', None)
self.assertIsNotNone(versions, "Versions is None")
self.assertTrue(type(versions) is list, "Versions is not a list")
self.assertTrue(len(versions) == 1, "Versions length != 1")
v = versions[0]
self.assertEqual(v.id, self.version.id)
class VersionAPITestCase(testtools.TestCase):
def setUp(self):
super(VersionAPITestCase, self).setUp()
def test_instance(self):
self.versions_api = VersionsAPI()
self.assertIsNotNone(self.versions_api,
"VersionsAPI instance was None")
| citrix-openstack-build/trove | trove/tests/unittests/api/test_versions.py | Python | apache-2.0 | 12,853 |
#MenuTitle: Compare Font Info for Open Fonts
# -*- coding: utf-8 -*-
__doc__="""
Analyzes font info entries in all open fonts and outputs differences.
"""
import GlyphsApp
Glyphs.clearLog()
Glyphs.showMacroWindow()
listOfData = [
"File Path",
"Family Name",
"Version Number",
"Date",
"Copyright",
"Designer",
"Designer URL",
"Manufacturer",
"Manufacturer URL",
"UPM",
"Grid Length",
"Disables Nice Names"
]
def fontinfo( thisFont ):
return [
thisFont.filepath,
thisFont.familyName,
"%i.%.3i" % (thisFont.versionMajor, thisFont.versionMinor),
thisFont.date,
thisFont.copyright,
thisFont.designer,
thisFont.designerURL,
thisFont.manufacturer,
thisFont.manufacturerURL,
thisFont.upm,
thisFont.gridLength,
thisFont.disablesNiceNames
]
Fonts = Glyphs.fonts
allFontsInfo = [ fontinfo(f) for f in Fonts ]
numberOfFonts = range(len( allFontsInfo ))
print "Analyzing Open Fonts ...\n"
for i in range( len( allFontsInfo )):
infolist = [ info[i] for info in allFontsInfo ]
if len(set( infolist ) ) != 1:
numberedData = zip( numberOfFonts, infolist )
print "%s:" % listOfData[i]
print "\n".join( [ " %i: %s" % ( dat[0], dat[1] ) for dat in numberedData ] )
print
| weiweihuanghuang/Glyphs-Scripts | Test/Compare Font Family.py | Python | apache-2.0 | 1,208 |
# -*- coding: utf-8 -*-
'''Example settings/local.py file.
These settings override what's in website/settings/defaults.py
NOTE: local.py will not be added to source control.
'''
import logging
from os import environ
from . import defaults
DEV_MODE = True
DEBUG_MODE = True # Sets app to debug mode, turns off template caching, etc.
SECURE_MODE = not DEBUG_MODE # Disable osf cookie secure
# NOTE: Internal Domains/URLs have been added to facilitate docker development environments
# when localhost inside a container != localhost on the client machine/docker host.
PROTOCOL = 'https://' if SECURE_MODE else 'http://'
DOMAIN = PROTOCOL + 'localhost:5000/'
INTERNAL_DOMAIN = DOMAIN
API_DOMAIN = PROTOCOL + 'localhost:8000/'
#WATERBUTLER_URL = 'http://localhost:7777'
#WATERBUTLER_INTERNAL_URL = WATERBUTLER_URL
PREPRINT_PROVIDER_DOMAINS = {
'enabled': False,
'prefix': 'http://local.',
'suffix': ':4201/'
}
USE_EXTERNAL_EMBER = True
PROXY_EMBER_APPS = True
EMBER_DOMAIN = environ.get('EMBER_DOMAIN', 'localhost')
LIVE_RELOAD_DOMAIN = 'http://{}:4200'.format(EMBER_DOMAIN) # Change port for the current app
EXTERNAL_EMBER_APPS = {
'ember_osf_web': {
'server': 'http://{}:4200/'.format(EMBER_DOMAIN),
'path': '/ember_osf_web/',
'routes': [
'collections',
'handbook',
],
},
'preprints': {
'server': 'http://{}:4201/'.format(EMBER_DOMAIN),
'path': '/preprints/'
},
'registries': {
'server': 'http://{}:4202/'.format(EMBER_DOMAIN),
'path': '/registries/'
},
'reviews': {
'server': 'http://{}:4203/'.format(EMBER_DOMAIN),
'path': '/reviews/'
},
}
SEARCH_ENGINE = 'elastic'
ELASTIC_TIMEOUT = 10
# Email
USE_EMAIL = False
MAIL_SERVER = 'localhost:1025' # For local testing
MAIL_USERNAME = 'osf-smtp'
MAIL_PASSWORD = 'CHANGEME'
# Mailchimp email subscriptions
ENABLE_EMAIL_SUBSCRIPTIONS = False
# Session
COOKIE_NAME = 'osf'
OSF_COOKIE_DOMAIN = None
SECRET_KEY = 'CHANGEME'
SESSION_COOKIE_SECURE = SECURE_MODE
OSF_SERVER_KEY = None
OSF_SERVER_CERT = None
class CeleryConfig(defaults.CeleryConfig):
"""
Celery configuration
"""
##### Celery #####
## Default RabbitMQ broker
# broker_url = 'amqp://'
# Celery with SSL
# import ssl
#
# broker_use_ssl = {
# 'keyfile': '/etc/ssl/private/worker.key',
# 'certfile': '/etc/ssl/certs/worker.pem',
# 'ca_certs': '/etc/ssl/certs/ca-chain.cert.pem',
# 'cert_reqs': ssl.CERT_REQUIRED,
# }
# Default RabbitMQ backend
# result_backend = 'amqp://'
USE_CDN_FOR_CLIENT_LIBS = False
# WARNING: `SENDGRID_WHITELIST_MODE` should always be True in local dev env to prevent unintentional spamming.
# Add specific email addresses to `SENDGRID_EMAIL_WHITELIST` for testing purposes.
SENDGRID_WHITELIST_MODE = True
SENDGRID_EMAIL_WHITELIST = []
# Example of extending default settings
# defaults.IMG_FMTS += ["pdf"]
# support email
OSF_SUPPORT_EMAIL = '[email protected]'
# contact email
OSF_CONTACT_EMAIL = '[email protected]'
#Email templates logo
OSF_LOGO = 'osf_logo'
OSF_PREPRINTS_LOGO = 'osf_preprints'
OSF_MEETINGS_LOGO = 'osf_meetings'
OSF_PREREG_LOGO = 'osf_prereg'
OSF_REGISTRIES_LOGO = 'osf_registries'
DOI_FORMAT = '{prefix}/FK2osf.io/{guid}'
# Uncomment for local DOI creation testing
# datacite
# DATACITE_USERNAME = 'changeme'
# DATACITE_PASSWORD = 'changeme'
# DATACITE_URL = 'https://mds.test.datacite.org'
# crossref
# CROSSREF_USERNAME = 'changeme'
# CROSSREF_PASSWORD = 'changeme'
# CROSSREF_URL = https://test.crossref.org/servlet/deposit
# CROSSREF_DEPOSITOR_EMAIL = 'changeme' # This email will receive confirmation/error messages from CrossRef on submission
# Show sent emails in console
logging.getLogger('website.mails.mails').setLevel(logging.DEBUG)
| caseyrollins/osf.io | website/settings/local-dist.py | Python | apache-2.0 | 3,862 |
from bottle import route, view, request, response
from datetime import datetime
import sqlite3
import bcrypt
from helpers import FHIR
from fitBitConnect import fitBitConnect
@route('/sign_up', method='GET')
@view('sign_up')
def get_sign_up():
return dict(year=datetime.now().year)
@route('/sign_up', method='POST')
@view('sign_up')
def create_user_sign_up():
username = request.forms.get('username')
password = request.forms.get('password')
first_name = request.forms.get('first_name')
last_name = request.forms.get('last_name')
birthdate = request.forms.get('birthdate')
gender = request.forms.get('gender')
user = {
'first_name': first_name,
'last_name': last_name,
'birthdate': birthdate,
'gender': gender
}
hashed = bcrypt.hashpw(password, bcrypt.gensalt())
db = sqlite3.connect('database/jogrx.db')
c = db.cursor()
c.execute("INSERT INTO user (username, password, first_name, last_name, birthdate, gender) VALUES (?, ?, ?, ?, ?, ?)", (username, hashed, first_name, last_name, birthdate, gender))
new_userid = c.lastrowid
response.set_cookie('userid', new_userid, "teamfin")
fhir = FHIR('http://polaris.i3l.gatech.edu:8080/gt-fhir-webapp/base')
fhir_id = fhir.create_new_patient(user)
c.execute("UPDATE user SET fhir_id=? WHERE id=?", (fhir_id, int(new_userid)))
db.commit()
c.close()
return fitBitConnect()
| omtinez/gatech-teamfin | controllers/sign_up.py | Python | apache-2.0 | 1,442 |
import hashlib
import json.decoder
import logging
import ntpath
import random
from docker.errors import DockerException
from docker.utils import parse_bytes as sdk_parse_bytes
from .errors import StreamParseError
from .timeparse import MULTIPLIERS
from .timeparse import timeparse
json_decoder = json.JSONDecoder()
log = logging.getLogger(__name__)
def stream_as_text(stream):
"""Given a stream of bytes or text, if any of the items in the stream
are bytes convert them to text.
This function can be removed once docker-py returns text streams instead
of byte streams.
"""
for data in stream:
if not isinstance(data, str):
data = data.decode('utf-8', 'replace')
yield data
def line_splitter(buffer, separator='\n'):
index = buffer.find(str(separator))
if index == -1:
return None
return buffer[:index + 1], buffer[index + 1:]
def split_buffer(stream, splitter=None, decoder=lambda a: a):
"""Given a generator which yields strings and a splitter function,
joins all input, splits on the separator and yields each chunk.
Unlike string.split(), each chunk includes the trailing
separator, except for the last one if none was found on the end
of the input.
"""
splitter = splitter or line_splitter
buffered = ''
for data in stream_as_text(stream):
buffered += data
while True:
buffer_split = splitter(buffered)
if buffer_split is None:
break
item, buffered = buffer_split
yield item
if buffered:
try:
yield decoder(buffered)
except Exception as e:
log.error(
'Compose tried decoding the following data chunk, but failed:'
'\n%s' % repr(buffered)
)
raise StreamParseError(e)
def json_splitter(buffer):
"""Attempt to parse a json object from a buffer. If there is at least one
object, return it and the rest of the buffer, otherwise return None.
"""
buffer = buffer.strip()
try:
obj, index = json_decoder.raw_decode(buffer)
rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
return obj, rest
except ValueError:
return None
def json_stream(stream):
"""Given a stream of text, return a stream of json objects.
This handles streams which are inconsistently buffered (some entries may
be newline delimited, and others are not).
"""
return split_buffer(stream, json_splitter, json_decoder.decode)
def json_hash(obj):
dump = json.dumps(obj, sort_keys=True, separators=(',', ':'), default=lambda x: x.repr())
h = hashlib.sha256()
h.update(dump.encode('utf8'))
return h.hexdigest()
def microseconds_from_time_nano(time_nano):
return int(time_nano % 1000000000 / 1000)
def nanoseconds_from_time_seconds(time_seconds):
return int(time_seconds / MULTIPLIERS['nano'])
def parse_seconds_float(value):
return timeparse(value or '')
def parse_nanoseconds_int(value):
parsed = timeparse(value or '')
if parsed is None:
return None
return nanoseconds_from_time_seconds(parsed)
def build_string_dict(source_dict):
return {k: str(v if v is not None else '') for k, v in source_dict.items()}
def splitdrive(path):
if len(path) == 0:
return ('', '')
if path[0] in ['.', '\\', '/', '~']:
return ('', path)
return ntpath.splitdrive(path)
def parse_bytes(n):
try:
return sdk_parse_bytes(n)
except DockerException:
return None
def unquote_path(s):
if not s:
return s
if s[0] == '"' and s[-1] == '"':
return s[1:-1]
return s
def generate_random_id():
while True:
val = hex(random.getrandbits(32 * 8))[2:-1]
try:
int(truncate_id(val))
continue
except ValueError:
return val
def truncate_id(value):
if ':' in value:
value = value[value.index(':') + 1:]
if len(value) > 12:
return value[:12]
return value
def unique_everseen(iterable, key=lambda x: x):
"List unique elements, preserving order. Remember all elements ever seen."
seen = set()
for element in iterable:
unique_key = key(element)
if unique_key not in seen:
seen.add(unique_key)
yield element
def truncate_string(s, max_chars=35):
if len(s) > max_chars:
return s[:max_chars - 2] + '...'
return s
def filter_attached_for_up(items, service_names, attach_dependencies=False,
item_to_service_name=lambda x: x):
"""This function contains the logic of choosing which services to
attach when doing docker-compose up. It may be used both with containers
and services, and any other entities that map to service names -
this mapping is provided by item_to_service_name."""
if attach_dependencies or not service_names:
return items
return [
item
for item in items if item_to_service_name(item) in service_names
]
| thaJeztah/compose | compose/utils.py | Python | apache-2.0 | 5,128 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
from nova import exception
from nova import objects
from nova.tests.unit.objects import test_instance_numa
from nova.tests.unit.objects import test_objects
fake_instance_uuid = uuids.fake
fake_migration_context_obj = objects.MigrationContext()
fake_migration_context_obj.instance_uuid = fake_instance_uuid
fake_migration_context_obj.migration_id = 42
fake_migration_context_obj.new_numa_topology = (
test_instance_numa.fake_obj_numa_topology.obj_clone())
fake_migration_context_obj.old_numa_topology = None
fake_migration_context_obj.new_pci_devices = objects.PciDeviceList()
fake_migration_context_obj.old_pci_devices = None
fake_migration_context_obj.new_pci_requests = (
objects.InstancePCIRequests(requests=[
objects.InstancePCIRequest(count=123, spec=[])]))
fake_migration_context_obj.old_pci_requests = None
fake_migration_context_obj.new_resources = objects.ResourceList()
fake_migration_context_obj.old_resources = None
fake_db_context = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'instance_uuid': fake_instance_uuid,
'migration_context': jsonutils.dumps(
fake_migration_context_obj.obj_to_primitive()),
}
def get_fake_migration_context_obj(ctxt):
obj = fake_migration_context_obj.obj_clone()
obj._context = ctxt
return obj
def get_fake_migration_context_with_pci_devs(ctxt=None):
obj = get_fake_migration_context_obj(ctxt)
obj.old_pci_devices = objects.PciDeviceList(
objects=[objects.PciDevice(vendor_id='1377',
product_id='0047',
address='0000:0a:00.1',
compute_node_id=1,
request_id=uuids.pcidev)])
obj.new_pci_devices = objects.PciDeviceList(
objects=[objects.PciDevice(vendor_id='1377',
product_id='0047',
address='0000:0b:00.1',
compute_node_id=2,
request_id=uuids.pcidev)])
return obj
class _TestMigrationContext(object):
def _test_get_by_instance_uuid(self, db_data):
mig_context = objects.MigrationContext.get_by_instance_uuid(
self.context, fake_db_context['instance_uuid'])
if mig_context:
self.assertEqual(fake_db_context['instance_uuid'],
mig_context.instance_uuid)
expected_mig_context = db_data and db_data.get('migration_context')
expected_mig_context = objects.MigrationContext.obj_from_db_obj(
expected_mig_context)
self.assertEqual(expected_mig_context.instance_uuid,
mig_context.instance_uuid)
self.assertEqual(expected_mig_context.migration_id,
mig_context.migration_id)
self.assertIsInstance(expected_mig_context.new_numa_topology,
mig_context.new_numa_topology.__class__)
self.assertIsInstance(expected_mig_context.old_numa_topology,
mig_context.old_numa_topology.__class__)
self.assertIsInstance(expected_mig_context.new_pci_devices,
mig_context.new_pci_devices.__class__)
self.assertIsInstance(expected_mig_context.old_pci_devices,
mig_context.old_pci_devices.__class__)
self.assertIsInstance(expected_mig_context.new_pci_requests,
mig_context.new_pci_requests.__class__)
self.assertIsInstance(expected_mig_context.old_pci_requests,
mig_context.old_pci_requests.__class__)
self.assertIsInstance(expected_mig_context. new_resources,
mig_context.new_resources.__class__)
self.assertIsInstance(expected_mig_context.old_resources,
mig_context.old_resources.__class__)
else:
self.assertIsNone(mig_context)
@mock.patch('nova.db.api.instance_extra_get_by_instance_uuid')
def test_get_by_instance_uuid(self, mock_get):
mock_get.return_value = fake_db_context
self._test_get_by_instance_uuid(fake_db_context)
@mock.patch('nova.db.api.instance_extra_get_by_instance_uuid')
def test_get_by_instance_uuid_none(self, mock_get):
db_context = fake_db_context.copy()
db_context['migration_context'] = None
mock_get.return_value = db_context
self._test_get_by_instance_uuid(db_context)
@mock.patch('nova.db.api.instance_extra_get_by_instance_uuid')
def test_get_by_instance_uuid_missing(self, mock_get):
mock_get.return_value = None
self.assertRaises(
exception.MigrationContextNotFound,
objects.MigrationContext.get_by_instance_uuid,
self.context, 'fake_uuid')
@mock.patch('nova.objects.Migration.get_by_id',
return_value=objects.Migration(cross_cell_move=True))
def test_is_cross_cell_move(self, mock_get_by_id):
ctxt = context.get_admin_context()
mig_ctx = get_fake_migration_context_obj(ctxt)
self.assertTrue(mig_ctx.is_cross_cell_move())
mock_get_by_id.assert_called_once_with(ctxt, mig_ctx.migration_id)
class TestMigrationContext(test_objects._LocalTest, _TestMigrationContext):
def test_pci_mapping_for_migration(self):
mig_ctx = get_fake_migration_context_with_pci_devs()
pci_mapping = mig_ctx.get_pci_mapping_for_migration(False)
self.assertDictEqual(
{mig_ctx.old_pci_devices[0].address: mig_ctx.new_pci_devices[0]},
pci_mapping)
def test_pci_mapping_for_migration_revert(self):
mig_ctx = get_fake_migration_context_with_pci_devs()
pci_mapping = mig_ctx.get_pci_mapping_for_migration(True)
self.assertDictEqual(
{mig_ctx.new_pci_devices[0].address: mig_ctx.old_pci_devices[0]},
pci_mapping)
class TestMigrationContextRemote(test_objects._RemoteTest,
_TestMigrationContext):
pass
| rahulunair/nova | nova/tests/unit/objects/test_migration_context.py | Python | apache-2.0 | 6,945 |
#
# Copyright 2011, Robert Mela
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Configuration constants for use in tests
"""
SECRET_KEY = 'my_amazon_secret_key'
KEY_ID = 'my_amazon_key_id'
TEST_URL_BASE='http://my-bucket-name.s3.amazonaws.com/'
TEST_FILE='myfile.ext'
TEST_URL= TEST_URL_BASE + TEST_FILE
| jbraeuer/yum-s3-plugin | test/testvalues.py | Python | apache-2.0 | 835 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This program will change the shrinking factor of a trained classifier
"""
from __future__ import print_function
#import sys
#sys.path.append("..")
#sys.path.append("../data_sequence")
#sys.path.append("../helpers")
from detections_pb2 import Box
from detector_model_pb2 import DetectorModel, MultiScalesDetectorModel
import os, os.path#, glob
from optparse import OptionParser
from plot_detector_model import read_model
def parse_arguments():
parser = OptionParser()
parser.description = \
"This program a trained classifier models and " \
"creates a new model with a different shrinking factor"
parser.add_option("-i", "--input", dest="input_path",
metavar="PATH", type="string",
help="path to the input model file")
parser.add_option("-s", "--shrinking_factor", dest="shrinking_factor",
metavar="INTEGER", type="int",
help="new shrinking factor value")
parser.add_option("-o", "--output", dest="output_path",
metavar="PATH", type="string",
help="path to the model file to be created")
(options, args) = parser.parse_args()
#print (options, args)
if options.input_path:
if not os.path.exists(options.input_path):
parser.error("Could not find the input file %s" % options.input_path)
else:
parser.error("'input' option is required to run this program")
if not options.shrinking_factor:
parser.error("'shrinking_factor' option is required to run this program")
options.input_path = os.path.normpath(options.input_path)
if options.output_path:
if os.path.exists(options.output_path):
parser.error("output_path should point to a non existing file")
else:
parser.error("'output' option is required to run this program")
return options
def scale_feature(feature, scaling_factor):
box = feature.box
box.min_corner.x = int(box.min_corner.x*scaling_factor)
box.min_corner.y = int(box.min_corner.y*scaling_factor)
box.max_corner.x = int(box.max_corner.x*scaling_factor)
box.max_corner.y = int(box.max_corner.y*scaling_factor)
return
def scale_stump(stump, scaling_factor):
scale_feature(stump.feature, scaling_factor)
stump.feature_threshold *= (scaling_factor*scaling_factor)
return
def change_shrinking_factor(new_shrinking_factor, detector):
model = detector.soft_cascade_model
if not model:
raise Exception("Only SoftCascadeOverIntegralChannels models are supported, " \
"received {0}".format(detector.detector_type))
#print("model.shrinking_factor", model.shrinking_factor)
scaling_factor = model.shrinking_factor / float(new_shrinking_factor)
print("scaling_factor ==", scaling_factor)
if scaling_factor == 1.0:
raise Exception("Input model already has shrinking factor {0}".format(new_shrinking_factor))
for stage in model.stages:
if stage.feature_type == stage.Stumps:
scale_stump(stage.decision_stump, scaling_factor)
elif stage.feature_type == stage.Level2DecisionTree:
for node in stage.level2_decision_tree.nodes:
scale_stump(node.decision_stump, scaling_factor)
# end of "for all nodes"
elif stage.feature_type == stage.LevelNDecisionTree:
for node in stage.levelN_decision_tree.nodes:
scale_stump(node.decision_stump, scaling_factor)
# end of "for all nodes"
else:
raise Exception("Received an unhandled stage type")
# end of "for all stages"
model.shrinking_factor = new_shrinking_factor
print("detector.model_window_size",
(detector.model_window_size.x, detector.model_window_size.y))
object_window = detector.object_window
print("detector.object_window",
((object_window.min_corner.x, object_window.min_corner.y),
(object_window.max_corner.x, object_window.max_corner.y)))
return
def replace_scale_hack(model):
"""
Small hack to replace a model of a given scale
"""
assert isinstance(model, MultiScalesDetectorModel)
model_path = "/users/visics/mmathias/devel/doppia/src/applications/boosted_learning/eccvWorkshop/2012_05_20_67022_trained_model_octave_-1.proto.bin.bootstrap2"
replacement_model = read_model(model_path)
assert isinstance(replacement_model, DetectorModel)
replacement_scale = 0.5
print("Replacing model of scale {0} " \
"by the model read at {1}".format(replacement_scale, model_path))
for detector in model.detectors:
if detector.scale == replacement_scale:
detector.CopyFrom(replacement_model)
detector.scale = replacement_scale # we set the proper new scale
break
return
def create_new_shrinking_factor_model(input_path, new_shrinking_factor, output_path):
input_model = read_model(input_path)
model_class = input_model.__class__
output_model = input_model
if True:
if model_class is MultiScalesDetectorModel:
for detector in output_model.detectors:
change_shrinking_factor(new_shrinking_factor, detector)
elif model_class is DetectorModel:
change_shrinking_factor(new_shrinking_factor, output_model)
else:
raise Exception("Received an unmanaged detector model class {0}".format(model_class) )
#replace_scale_hack(output_model)
output_content = output_model.SerializeToString()
out_file = open(output_path, "wb")
out_file.write(output_content)
out_file.close()
print("Created output model file", output_path)
return
def main():
options = parse_arguments()
create_new_shrinking_factor_model(options.input_path,
options.shrinking_factor,
options.output_path)
print("End of game, have a nice day!")
return
if __name__ == "__main__":
# Import Psyco if available
try:
import psyco
psyco.full()
except ImportError:
#print("(psyco not found)")
pass
else:
print("(using psyco)")
main()
# end of file | LevinJ/Pedestrian-detection-and-tracking | src/doppia/tools/objects_detection/models/change_shrinking_factor.py | Python | apache-2.0 | 6,372 |
click(Pattern("Flmnduana.png").targetOffset(34,0))
exit(0) | silverbulleters/vanessa-behavoir | tools/Sikuli/OpenDialogClickFolderSelect.sikuli/OpenDialogClick.py | Python | apache-2.0 | 58 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GRU layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class GRULayerTest(keras_parameterized.TestCase):
def test_return_sequences_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.GRU,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
@tf_test_util.run_v2_only
def test_float64_GRU(self):
if test.is_built_with_rocm():
self.skipTest('Double type is yet not supported in ROCm')
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.GRU,
kwargs={'units': units,
'return_sequences': True,
'dtype': 'float64'},
input_shape=(num_samples, timesteps, embedding_dim),
input_dtype='float64')
def test_dynamic_behavior_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = keras.layers.GRU(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_dropout_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.GRU,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
def test_recurrent_dropout_with_implementation_restriction(self):
layer = keras.layers.GRU(2, recurrent_dropout=0.1, implementation=2)
# The implementation is force to 1 due to the limit of recurrent_dropout.
self.assertEqual(layer.implementation, 1)
@parameterized.parameters([0, 1, 2])
def test_implementation_mode_GRU(self, implementation_mode):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.GRU,
kwargs={'units': units,
'implementation': implementation_mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_reset_after_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=num_samples,
test_samples=0,
input_shape=(timesteps, embedding_dim),
num_classes=units)
y_train = np_utils.to_categorical(y_train, units)
inputs = keras.layers.Input(shape=[timesteps, embedding_dim])
gru_layer = keras.layers.GRU(units,
reset_after=True)
output = gru_layer(inputs)
gru_model = keras.models.Model(inputs, output)
gru_model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
gru_model.fit(x_train, y_train)
gru_model.predict(x_train)
def test_with_masking_layer_GRU(self):
if test.is_built_with_rocm():
self.skipTest('MIOpen only supports packed input output')
layer_class = keras.layers.GRU
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_statefulness_GRU(self):
if test.is_built_with_rocm():
self.skipTest('MIOpen only supports packed input output')
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.GRU
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(
optimizer='sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
np.testing.assert_allclose(out7, out6, atol=1e-5)
def test_get_initial_states(self):
batch_size = 4
cell = keras.layers.GRUCell(20)
initial_state = cell.get_initial_state(
batch_size=batch_size, dtype=dtypes.float32)
_, state = cell(np.ones((batch_size, 20), dtype=np.float32), initial_state)
self.assertLen(state, 1)
self.assertEqual(state[0].shape, initial_state.shape)
@tf_test_util.run_all_in_graph_and_eager_modes
class GRULayerGenericTest(test.TestCase):
def test_constraints_GRU(self):
embedding_dim = 4
layer_class = keras.layers.GRU
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
def test_from_config_GRU(self):
layer_class = keras.layers.GRU
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_regularizers_GRU(self):
embedding_dim = 4
layer_class = keras.layers.GRU
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if context.executing_eagerly():
self.assertEqual(len(layer.losses), 4)
else:
self.assertEqual(len(layer.get_losses_for(x)), 1)
if __name__ == '__main__':
test.main()
| xzturn/tensorflow | tensorflow/python/keras/layers/gru_test.py | Python | apache-2.0 | 9,531 |
# Copyright 2014 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for the Solum Worker service."""
import logging as std_logging
import os
import sys
from oslo.config import cfg
import solum
from solum.common.rpc import service
from solum.common import trace_data
from solum.openstack.common.gettextutils import _
from solum.openstack.common import log as logging
from solum.worker.handlers import noop as noop_handler
from solum.worker.handlers import shell as shell_handler
from solum.worker.handlers import shell_nobuild as shell_nobuild_handler
LOG = logging.getLogger(__name__)
def main():
cfg.CONF(sys.argv[1:], project='solum')
logging.setup('solum')
solum.TLS.trace = trace_data.TraceData()
LOG.info(_('Starting server in PID %s') % os.getpid())
LOG.debug("Configuration:")
cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)
cfg.CONF.import_opt('topic', 'solum.worker.config', group='worker')
cfg.CONF.import_opt('host', 'solum.worker.config', group='worker')
cfg.CONF.import_opt('handler', 'solum.worker.config', group='worker')
handlers = {
'noop': noop_handler.Handler,
'shell': shell_handler.Handler,
'shell_nobuild': shell_nobuild_handler.Handler,
}
endpoints = [
handlers[cfg.CONF.worker.handler](),
]
server = service.Service(cfg.CONF.worker.topic,
cfg.CONF.worker.host, endpoints)
server.serve()
| ed-/solum | solum/cmd/worker.py | Python | apache-2.0 | 2,004 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import scann
import tensorflow as tf
import numpy as np
import math
import pickle
METRIC = 'dot_product'
DIMENSIONS_PER_BLOCK = 2
ANISOTROPIC_QUANTIZATION_THRESHOLD = 0.2
NUM_NEIGHBOURS = 10
NUM_LEAVES_TO_SEARCH = 200
REORDER_NUM_NEIGHBOURS = 200
TOKENS_FILE_NAME = 'tokens'
def load_embeddings(embedding_files_pattern):
embedding_list = list()
tokens = list()
embed_files = tf.io.gfile.glob(embedding_files_pattern)
print(f'{len(embed_files)} embedding files are found.')
for file_idx, embed_file in enumerate(embed_files):
print(f'Loading embeddings in file {file_idx+1} of {len(embed_files)}...')
with tf.io.gfile.GFile(embed_file, 'r') as file_reader:
lines = file_reader.readlines()
for line in lines:
parts = line.split(',')
item_Id = parts[0]
embedding = parts[1:]
embedding = np.array([float(v) for v in embedding])
normalized_embedding = embedding / np.linalg.norm(embedding)
embedding_list.append(normalized_embedding)
tokens.append(item_Id)
print(f'{len(embedding_list)} embeddings are loaded.')
return tokens, np.array(embedding_list)
def build_index(embeddings, num_leaves):
data_size = embeddings.shape[0]
if not num_leaves:
num_leaves = int(math.sqrt(data_size))
print('Start building the ScaNN index...')
scann_builder = scann.scann_ops.builder(embeddings, NUM_NEIGHBOURS, METRIC)
scann_builder = scann_builder.tree(
num_leaves=num_leaves,
num_leaves_to_search=NUM_LEAVES_TO_SEARCH,
training_sample_size=data_size)
scann_builder = scann_builder.score_ah(
DIMENSIONS_PER_BLOCK,
anisotropic_quantization_threshold=ANISOTROPIC_QUANTIZATION_THRESHOLD)
scann_builder = scann_builder.reorder(REORDER_NUM_NEIGHBOURS)
scann_index = scann_builder.build()
print('ScaNN index is built.')
return scann_index
def save_index(index, tokens, output_dir):
print('Saving index as a SavedModel...')
module = index.serialize_to_module()
tf.saved_model.save(
module, output_dir, signatures=None, options=None
)
print(f'Index is saved to {output_dir}')
print(f'Saving tokens file...')
tokens_file_path = os.path.join(output_dir, TOKENS_FILE_NAME)
with tf.io.gfile.GFile(tokens_file_path, 'wb') as handle:
pickle.dump(tokens, handle, protocol=pickle.HIGHEST_PROTOCOL)
print(f'Item file is saved to {tokens_file_path}.')
def build(embedding_files_pattern, output_dir, num_leaves=None):
print("Indexer started...")
tokens, embeddings = load_embeddings(embedding_files_pattern)
index = build_index(embeddings, num_leaves)
save_index(index, tokens, output_dir)
print("Indexer finished.")
| GoogleCloudPlatform/analytics-componentized-patterns | retail/recommendation-system/bqml-scann/index_builder/builder/indexer.py | Python | apache-2.0 | 3,305 |
'''
Created on Aug 16, 2011
@author: jklo
'''
from service_template import ServiceTemplate
from setup_utils import getInput, PublishDoc, isBoolean, YES, isInt
import pystache, uuid
import json
def install(server, dbname, setupInfo):
custom_opts = {}
active = getInput("Enable Slice?", "T", isBoolean)
custom_opts["active"] = active.lower() in YES
active = getInput("Enable Flow Control for Slice?", "T", isBoolean)
custom_opts["flow_control"] = active.lower() in YES
if custom_opts["flow_control"]:
active = getInput("Maximum IDs to Return?", "100", isInt)
custom_opts["id_limit"] = int(active)
active = getInput("Maximum Docs to Return?", "100", isInt)
custom_opts["doc_limit"] = int(active)
custom_opts["node_endpoint"] = setupInfo["nodeUrl"]
custom_opts["service_id"] = uuid.uuid4().hex
return __SliceServiceTemplate().install(server, dbname, custom_opts)
class __SliceServiceTemplate(ServiceTemplate):
def __init__(self):
ServiceTemplate.__init__(self)
self.service_data_template = '''{
"flow_control": {{flow_control}}{{#id_limit}},
"id_limit": {{id_limit}}{{/id_limit}}{{#doc_limit}},
"doc_limit": {{doc_limit}}{{/doc_limit}}
}'''
# Returns keys/pair where the keys is the destination database name
# and value is the couchapp directory name. This assumes a central
# location for all couchapps.
self.couchapps ={'resource_data': ['apps/learningregistry-slicelite'] }
def _optsoverride(self):
opts = {
"active": "false",
"service_name": "Slice",
"service_version": "0.10.0",
"service_endpoint": "/slice",
"service_key": "false",
"service_https": "false",
"service_type": "access",
"flow_control": False,
"id_limit": None,
"doc_limit": None,
}
return opts
if __name__ == "__main__":
import couchdb
nodeSetup = {
'couchDBUrl': "http://localhost:5984",
'nodeUrl': "http://test.example.com"
}
def doesNotEndInSlash(input=None):
return input is not None and input[-1] != "/"
def notExample(input=None):
return input is not None and input != nodeSetup["nodeUrl"]
nodeSetup["couchDBUrl"] = getInput("Enter the CouchDB URL:", nodeSetup["couchDBUrl"], doesNotEndInSlash)
nodeSetup["nodeUrl"] = getInput("Enter the public URL of the LR Node", nodeSetup["nodeUrl"], notExample)
server = couchdb.Server(url= nodeSetup['couchDBUrl'])
install(server, "node", nodeSetup)
| jimklo/LearningRegistry | config/services/Slice.py | Python | apache-2.0 | 2,749 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heatclient import client as heatclient
from tacker.vm import keystone
class OpenstackClients(object):
def __init__(self, auth_attr, region_name=None):
super(OpenstackClients, self).__init__()
self.keystone_plugin = keystone.Keystone()
self.heat_client = None
self.keystone_client = None
self.region_name = region_name
self.auth_attr = auth_attr
def _keystone_client(self):
version = self.auth_attr['auth_url'].rpartition('/')[2]
return self.keystone_plugin.initialize_client(version,
**self.auth_attr)
def _heat_client(self):
endpoint = self.keystone_session.get_endpoint(
service_type='orchestration', region_name=self.region_name)
return heatclient.Client('1', endpoint=endpoint,
session=self.keystone_session)
@property
def keystone_session(self):
return self.keystone.session
@property
def keystone(self):
if not self.keystone_client:
self.keystone_client = self._keystone_client()
return self.keystone_client
@property
def heat(self):
if not self.heat_client:
self.heat_client = self._heat_client()
return self.heat_client
| trozet/tacker | tacker/common/clients.py | Python | apache-2.0 | 1,866 |
# Copyright 2015, Kay Hayen, mailto:[email protected]
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, print_function
from foobar import util
from . import local
class Foobar(object):
def __init__(self):
print(util.foo())
| wfxiang08/Nuitka | tests/programs/absolute_import/foobar/foobar.py | Python | apache-2.0 | 944 |
from public import public
from ... import util
from ...common import exceptions as com
from .. import rules as rlz
from .. import types as ir
from .core import Node, _safe_repr
def _to_sort_key(key, *, table=None):
if isinstance(key, DeferredSortKey):
if table is None:
raise com.IbisTypeError(
"cannot resolve DeferredSortKey with table=None"
)
key = key.resolve(table)
if isinstance(key, ir.SortExpr):
return key
if isinstance(key, (tuple, list)):
key, sort_order = key
else:
sort_order = True
if not isinstance(key, ir.Expr):
if table is None:
raise com.IbisTypeError("cannot resolve key with table=None")
key = table._ensure_expr(key)
if isinstance(key, (ir.SortExpr, DeferredSortKey)):
return _to_sort_key(key, table=table)
if isinstance(sort_order, str):
if sort_order.lower() in ('desc', 'descending'):
sort_order = False
elif not isinstance(sort_order, bool):
sort_order = bool(sort_order)
return SortKey(key, ascending=sort_order).to_expr()
def _maybe_convert_sort_keys(tables, exprs):
exprs = util.promote_list(exprs)
keys = exprs[:]
for i, key in enumerate(exprs):
step = -1 if isinstance(key, (str, DeferredSortKey)) else 1
for table in tables[::step]:
try:
sort_key = _to_sort_key(key, table=table)
except Exception:
continue
else:
keys[i] = sort_key
break
return keys
@public
class SortKey(Node):
expr = rlz.column(rlz.any)
ascending = rlz.optional(
rlz.map_to(
{
True: True,
False: False,
1: True,
0: False,
},
),
default=True,
)
def __repr__(self):
# Temporary
rows = [
'Sort key:',
f' ascending: {self.ascending!s}',
util.indent(_safe_repr(self.expr), 2),
]
return '\n'.join(rows)
def output_type(self):
return ir.SortExpr
def root_tables(self):
return self.expr.op().root_tables()
def equals(self, other, cache=None):
return (
isinstance(other, SortKey)
and self.expr.equals(other.expr, cache=cache)
and self.ascending == other.ascending
)
def resolve_name(self):
return self.expr.get_name()
@public
class DeferredSortKey:
def __init__(self, what, ascending=True):
self.what = what
self.ascending = ascending
def resolve(self, parent):
what = parent._ensure_expr(self.what)
return SortKey(what, ascending=self.ascending).to_expr()
| cpcloud/ibis | ibis/expr/operations/sortkeys.py | Python | apache-2.0 | 2,826 |
import tempfile
import pytest
from pyhocon import ConfigFactory
from pyhocon.tool import HOCONConverter
class TestHOCONConverter(object):
CONFIG_STRING = """
a = {b: 1}
b = [1, 2]
c = 1
d = "a"
e = \"\"\"1
2
3\"\"\"
f1 = true
f2 = false
g = []
h = null
i = {}
"""
CONFIG = ConfigFactory.parse_string(CONFIG_STRING)
EXPECTED_JSON = \
"""
{
"a": {
"b": 1
},
"b": [
1,
2
],
"c": 1,
"d": "a",
"e": "1\\n 2\\n 3",
"f1": true,
"f2": false,
"g": [],
"h": null,
"i": {}
}
"""
EXPECTED_YAML = \
"""
a:
b: 1
b:
- 1
- 2
c: 1
d: a
e: |
1
2
3
f1: true
f2: false
g: []
h: None
i:
"""
EXPECTED_PROPERTIES = \
"""
a.b = 1
b.0 = 1
b.1 = 2
c = 1
d = a
e = 1\\
2\\
3
f1 = true
f2 = false
"""
def test_to_json(self):
converted = HOCONConverter.to_json(TestHOCONConverter.CONFIG)
assert [line.strip() for line in TestHOCONConverter.EXPECTED_JSON.split('\n') if line.strip()]\
== [line.strip() for line in converted.split('\n') if line.strip()]
def test_to_yaml(self):
converted = HOCONConverter.to_yaml(TestHOCONConverter.CONFIG)
assert [line.strip() for line in TestHOCONConverter.EXPECTED_YAML.split('\n') if line.strip()]\
== [line.strip() for line in converted.split('\n') if line.strip()]
def test_to_properties(self):
converted = HOCONConverter.to_properties(TestHOCONConverter.CONFIG)
assert [line.strip() for line in TestHOCONConverter.EXPECTED_PROPERTIES.split('\n') if line.strip()]\
== [line.strip() for line in converted.split('\n') if line.strip()]
def _test_convert(self, input, expected_output, format):
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write(input)
fdin.flush()
with tempfile.NamedTemporaryFile('r') as fdout:
HOCONConverter.convert(fdin.name, fdout.name, format)
with open(fdout.name) as fdi:
converted = fdi.read()
assert [line.strip() for line in expected_output.split('\n') if line.strip()]\
== [line.strip() for line in converted.split('\n') if line.strip()]
def test_convert(self):
self._test_convert(TestHOCONConverter.CONFIG_STRING, TestHOCONConverter.EXPECTED_JSON, 'json')
self._test_convert(TestHOCONConverter.CONFIG_STRING, TestHOCONConverter.EXPECTED_YAML, 'yaml')
self._test_convert(TestHOCONConverter.CONFIG_STRING, TestHOCONConverter.EXPECTED_PROPERTIES, 'properties')
def test_invalid_format(self):
with pytest.raises(Exception):
self._test_convert(TestHOCONConverter.CONFIG_STRING, TestHOCONConverter.EXPECTED_PROPERTIES, 'invalid')
| acx2015/pyhocon | tests/test_tool.py | Python | apache-2.0 | 3,542 |
"""Config flow for Philips TV integration."""
from __future__ import annotations
import platform
from typing import Any
from haphilipsjs import ConnectionFailure, PairingFailure, PhilipsTV
import voluptuous as vol
from homeassistant import config_entries, core
from homeassistant.const import (
CONF_API_VERSION,
CONF_HOST,
CONF_PASSWORD,
CONF_PIN,
CONF_USERNAME,
)
from . import LOGGER
from .const import CONF_SYSTEM, CONST_APP_ID, CONST_APP_NAME, DOMAIN
async def validate_input(
hass: core.HomeAssistant, host: str, api_version: int
) -> tuple[dict, PhilipsTV]:
"""Validate the user input allows us to connect."""
hub = PhilipsTV(host, api_version)
await hub.getSystem()
await hub.setTransport(hub.secured_transport)
if not hub.system:
raise ConnectionFailure("System data is empty")
return hub
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Philips TV."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self) -> None:
"""Initialize flow."""
super().__init__()
self._current = {}
self._hub: PhilipsTV | None = None
self._pair_state: Any = None
async def async_step_import(self, conf: dict) -> dict:
"""Import a configuration from config.yaml."""
for entry in self._async_current_entries():
if entry.data[CONF_HOST] == conf[CONF_HOST]:
return self.async_abort(reason="already_configured")
return await self.async_step_user(
{
CONF_HOST: conf[CONF_HOST],
CONF_API_VERSION: conf[CONF_API_VERSION],
}
)
async def _async_create_current(self):
system = self._current[CONF_SYSTEM]
return self.async_create_entry(
title=f"{system['name']} ({system['serialnumber']})",
data=self._current,
)
async def async_step_pair(self, user_input: dict | None = None) -> dict:
"""Attempt to pair with device."""
assert self._hub
errors = {}
schema = vol.Schema(
{
vol.Required(CONF_PIN): str,
}
)
if not user_input:
try:
self._pair_state = await self._hub.pairRequest(
CONST_APP_ID,
CONST_APP_NAME,
platform.node(),
platform.system(),
"native",
)
except PairingFailure as exc:
LOGGER.debug(exc)
return self.async_abort(
reason="pairing_failure",
description_placeholders={"error_id": exc.data.get("error_id")},
)
return self.async_show_form(
step_id="pair", data_schema=schema, errors=errors
)
try:
username, password = await self._hub.pairGrant(
self._pair_state, user_input[CONF_PIN]
)
except PairingFailure as exc:
LOGGER.debug(exc)
if exc.data.get("error_id") == "INVALID_PIN":
errors[CONF_PIN] = "invalid_pin"
return self.async_show_form(
step_id="pair", data_schema=schema, errors=errors
)
return self.async_abort(
reason="pairing_failure",
description_placeholders={"error_id": exc.data.get("error_id")},
)
self._current[CONF_USERNAME] = username
self._current[CONF_PASSWORD] = password
return await self._async_create_current()
async def async_step_user(self, user_input: dict | None = None) -> dict:
"""Handle the initial step."""
errors = {}
if user_input:
self._current = user_input
try:
hub = await validate_input(
self.hass, user_input[CONF_HOST], user_input[CONF_API_VERSION]
)
except ConnectionFailure as exc:
LOGGER.error(exc)
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
await self.async_set_unique_id(hub.system["serialnumber"])
self._abort_if_unique_id_configured()
self._current[CONF_SYSTEM] = hub.system
self._current[CONF_API_VERSION] = hub.api_version
self._hub = hub
if hub.pairing_type == "digest_auth_pairing":
return await self.async_step_pair()
return await self._async_create_current()
schema = vol.Schema(
{
vol.Required(CONF_HOST, default=self._current.get(CONF_HOST)): str,
vol.Required(
CONF_API_VERSION, default=self._current.get(CONF_API_VERSION, 1)
): vol.In([1, 5, 6]),
}
)
return self.async_show_form(step_id="user", data_schema=schema, errors=errors)
| w1ll1am23/home-assistant | homeassistant/components/philips_js/config_flow.py | Python | apache-2.0 | 5,227 |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from keystone.openstack.common.lockutils import lock
class LockFixture(fixtures.Fixture):
"""External locking fixture.
This fixture is basically an alternative to the synchronized decorator with
the external flag so that tearDowns and addCleanups will be included in
the lock context for locking between tests. The fixture is recommended to
be the first line in a test method, like so::
def test_method(self):
self.useFixture(LockFixture)
...
or the first line in setUp if all the test methods in the class are
required to be serialized. Something like::
class TestCase(testtools.testcase):
def setUp(self):
self.useFixture(LockFixture)
super(TestCase, self).setUp()
...
This is because addCleanups are put on a LIFO queue that gets run after the
test method exits. (either by completing or raising an exception)
"""
def __init__(self, name, lock_file_prefix=None):
self.mgr = lock(name, lock_file_prefix, True)
def setUp(self):
super(LockFixture, self).setUp()
self.addCleanup(self.mgr.__exit__, None, None, None)
self.mgr.__enter__()
| dsiddharth/access-keys | keystone/openstack/common/fixture/lockutils.py | Python | apache-2.0 | 1,886 |
# Copyright 2014 Red Hat
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest import config
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
# Loop for up to 120 seconds waiting on notifications
# NOTE(chdent): The choice of 120 seconds is fairly
# arbitrary: Long enough to give the notifications the
# chance to travel across a highly latent bus but not
# so long as to allow excessive latency to never be visible.
# TODO(chdent): Ideally this value would come from configuration.
NOTIFICATIONS_WAIT = 120
NOTIFICATIONS_SLEEP = 1
class TestSwiftTelemetry(manager.SwiftScenarioTest):
"""
Test that swift uses the ceilometer middleware.
* create container.
* upload a file to the created container.
* retrieve the file from the created container.
* wait for notifications from ceilometer.
"""
@classmethod
def skip_checks(cls):
super(TestSwiftTelemetry, cls).skip_checks()
if not CONF.service_available.ceilometer:
skip_msg = ("%s skipped as ceilometer is not available" %
cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def setup_clients(cls):
super(TestSwiftTelemetry, cls).setup_clients()
cls.telemetry_client = cls.os_operator.telemetry_client
def _confirm_notifications(self, container_name, obj_name):
"""
Loop seeking for appropriate notifications about the containers
and objects sent to swift.
"""
def _check_samples():
"""
Return True only if we have notifications about some
containers and some objects and the notifications are about
the expected containers and objects.
Otherwise returning False will case _check_samples to be
called again.
"""
results = self.telemetry_client.list_samples(
'storage.objects.incoming.bytes')
LOG.debug('got samples %s', results)
# Extract container info from samples.
containers, objects = [], []
for sample in results:
meta = sample['resource_metadata']
if meta.get('container') and meta['container'] != 'None':
containers.append(meta['container'])
elif (meta.get('target.metadata:container') and
meta['target.metadata:container'] != 'None'):
containers.append(meta['target.metadata:container'])
if meta.get('object') and meta['object'] != 'None':
objects.append(meta['object'])
elif (meta.get('target.metadata:object') and
meta['target.metadata:object'] != 'None'):
objects.append(meta['target.metadata:object'])
return (container_name in containers and obj_name in objects)
self.assertTrue(test.call_until_true(_check_samples,
NOTIFICATIONS_WAIT,
NOTIFICATIONS_SLEEP),
'Correct notifications were not received after '
'%s seconds.' % NOTIFICATIONS_WAIT)
@test.idempotent_id('6d6b88e5-3e38-41bc-b34a-79f713a6cb84')
@test.services('object_storage', 'telemetry')
def test_swift_middleware_notifies(self):
container_name = self.create_container()
obj_name, _ = self.upload_object_to_container(container_name)
self._confirm_notifications(container_name, obj_name)
| flyingfish007/tempest | tempest/scenario/test_swift_telemetry_middleware.py | Python | apache-2.0 | 4,228 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from contextlib import contextmanager
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.backend.jvm.tasks.jar_task import JarBuilderTask
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
def is_jvm_binary(target):
return isinstance(target, JvmBinary)
def is_java_library(target):
return target.has_sources('.java')
def is_scala_library(target):
return target.has_sources('.scala')
def is_jvm_library(target):
return (is_java_library(target)
or is_scala_library(target)
or (is_jvm_binary(target) and target.has_resources))
class JarCreate(JarBuilderTask):
"""Jars jvm libraries and optionally their sources and their docs."""
@classmethod
def register_options(cls, register):
super(JarCreate, cls).register_options(register)
register('--compressed', default=True, action='store_true',
fingerprint=True,
help='Create compressed jars.')
@classmethod
def product_types(cls):
return ['jars']
@classmethod
def prepare(cls, options, round_manager):
super(JarCreate, cls).prepare(options, round_manager)
cls.JarBuilder.prepare(round_manager)
def __init__(self, *args, **kwargs):
super(JarCreate, self).__init__(*args, **kwargs)
self.compressed = self.get_options().compressed
self._jars = {}
@property
def cache_target_dirs(self):
return True
def execute(self):
with self.invalidated(self.context.targets(is_jvm_library)) as invalidation_check:
with self.context.new_workunit(name='jar-create', labels=[WorkUnitLabel.MULTITOOL]):
jar_mapping = self.context.products.get('jars')
for vt in invalidation_check.all_vts:
jar_name = vt.target.name + '.jar'
jar_path = os.path.join(vt.results_dir, jar_name)
def add_jar_to_products():
jar_mapping.add(vt.target, vt.results_dir).append(jar_name)
if vt.valid:
if os.path.exists(jar_path):
add_jar_to_products()
else:
with self.create_jar(vt.target, jar_path) as jarfile:
with self.create_jar_builder(jarfile) as jar_builder:
if vt.target in jar_builder.add_target(vt.target):
add_jar_to_products()
@contextmanager
def create_jar(self, target, path):
existing = self._jars.setdefault(path, target)
if target != existing:
raise TaskError(
'Duplicate name: target {} tried to write {} already mapped to target {}'
.format(target, path, existing))
self._jars[path] = target
with self.open_jar(path, overwrite=True, compressed=self.compressed) as jar:
yield jar
| sameerparekh/pants | src/python/pants/backend/jvm/tasks/jar_create.py | Python | apache-2.0 | 3,020 |
# Copyright (c) 2012-2016 Seafile Ltd.
from django.conf.urls import url
from .views import *
urlpatterns = [
url(r'^link/send/$', send_shared_link, name='send_shared_link'),
url(r'^link/save/$', save_shared_link, name='save_shared_link'),
url(r'^upload_link/send/$', send_shared_upload_link, name='send_shared_upload_link'),
url(r'^ajax/private-share-dir/$', ajax_private_share_dir, name='ajax_private_share_dir'),
url(r'^ajax/get-link-audit-code/$', ajax_get_link_audit_code, name='ajax_get_link_audit_code'),
]
| miurahr/seahub | seahub/share/urls.py | Python | apache-2.0 | 535 |
# Server Specific Configurations
server = {
'port': '6382',
'host': '0.0.0.0'
}
# Pecan Application Configurations
app = {
'root': 'tuskar.api.controllers.root.RootController',
'modules': ['tuskar.api'],
'static_root': '%(confdir)s/public',
'template_path': '%(confdir)s/templates',
'debug': False,
'enable_acl': False,
}
# Custom Configurations must be in Python dictionary format::
#
# foo = {'bar':'baz'}
#
# All configurations are accessible at::
# pecan.conf
| tuskar/tuskar | tuskar/api/config.py | Python | apache-2.0 | 498 |
# Copyright 2015, Kay Hayen, mailto:[email protected]
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import some_package.DeepChild
import some_package.deep_package.DeepDeepChild
print( "Done." )
| tempbottle/Nuitka | tests/programs/deep/DeepProgramMain.py | Python | apache-2.0 | 865 |
# coding=utf-8
"""
ComicStreamer bookmark manager thread class
"""
"""
Copyright 2012-2014 Anthony Beville
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import threading
import select
import sys
import logging
import platform
import Queue
import datetime
from database import Comic
class Bookmarker(threading.Thread):
def __init__(self, dm):
super(Bookmarker, self).__init__()
self.queue = Queue.Queue(0)
self.quit = False
self.dm = dm
def stop(self):
self.quit = True
self.join()
def setBookmark(self, comic_id, pagenum):
self.queue.put((comic_id, pagenum))
def run(self):
logging.debug("Bookmarker: started main loop.")
self.session = self.dm.Session()
while True:
try:
(comic_id, pagenum) = self.queue.get(block=True, timeout=1)
except:
comic_id = None
if comic_id is not None:
obj = self.session.query(Comic).filter(Comic.id == int(comic_id)).first()
if obj is not None:
if int(pagenum) < obj.page_count:
obj.lastread_ts = datetime.datetime.utcnow()
obj.lastread_page = int(pagenum)
self.session.commit()
if self.quit:
break
self.session.close()
self.session = None
logging.debug("Bookmarker: stopped main loop.")
#-------------------------------------------------
| sebdelsol/ComicStreamer | comicstreamerlib/bookmarker.py | Python | apache-2.0 | 2,089 |
"""Validate integration translation files."""
from __future__ import annotations
from functools import partial
from itertools import chain
import json
import re
import voluptuous as vol
from voluptuous.humanize import humanize_error
import homeassistant.helpers.config_validation as cv
from homeassistant.util import slugify
from script.translations import upload
from .model import Config, Integration
UNDEFINED = 0
REQUIRED = 1
REMOVED = 2
RE_REFERENCE = r"\[\%key:(.+)\%\]"
REMOVED_TITLE_MSG = (
"config.title key has been moved out of config and into the root of strings.json. "
"Starting Home Assistant 0.109 you only need to define this key in the root "
"if the title needs to be different than the name of your integration in the "
"manifest."
)
MOVED_TRANSLATIONS_DIRECTORY_MSG = (
"The '.translations' directory has been moved, the new name is 'translations', "
"starting with Home Assistant 0.112 your translations will no longer "
"load if you do not move/rename this "
)
def check_translations_directory_name(integration: Integration) -> None:
"""Check that the correct name is used for the translations directory."""
legacy_translations = integration.path / ".translations"
translations = integration.path / "translations"
if translations.is_dir():
# No action required
return
if legacy_translations.is_dir():
integration.add_error("translations", MOVED_TRANSLATIONS_DIRECTORY_MSG)
def find_references(strings, prefix, found):
"""Find references."""
for key, value in strings.items():
if isinstance(value, dict):
find_references(value, f"{prefix}::{key}", found)
continue
match = re.match(RE_REFERENCE, value)
if match:
found.append({"source": f"{prefix}::{key}", "ref": match.groups()[0]})
def removed_title_validator(config, integration, value):
"""Mark removed title."""
if not config.specific_integrations:
raise vol.Invalid(REMOVED_TITLE_MSG)
# Don't mark it as an error yet for custom components to allow backwards compat.
integration.add_warning("translations", REMOVED_TITLE_MSG)
return value
def lowercase_validator(value):
"""Validate value is lowercase."""
if value.lower() != value:
raise vol.Invalid("Needs to be lowercase")
return value
def gen_data_entry_schema(
*,
config: Config,
integration: Integration,
flow_title: int,
require_step_title: bool,
):
"""Generate a data entry schema."""
step_title_class = vol.Required if require_step_title else vol.Optional
schema = {
vol.Optional("flow_title"): cv.string_with_no_html,
vol.Required("step"): {
str: {
step_title_class("title"): cv.string_with_no_html,
vol.Optional("description"): cv.string_with_no_html,
vol.Optional("data"): {str: cv.string_with_no_html},
}
},
vol.Optional("error"): {str: cv.string_with_no_html},
vol.Optional("abort"): {str: cv.string_with_no_html},
vol.Optional("progress"): {str: cv.string_with_no_html},
vol.Optional("create_entry"): {str: cv.string_with_no_html},
}
if flow_title == REQUIRED:
schema[vol.Required("title")] = cv.string_with_no_html
elif flow_title == REMOVED:
schema[vol.Optional("title", msg=REMOVED_TITLE_MSG)] = partial(
removed_title_validator, config, integration
)
return schema
def gen_strings_schema(config: Config, integration: Integration):
"""Generate a strings schema."""
return vol.Schema(
{
vol.Optional("title"): cv.string_with_no_html,
vol.Optional("config"): gen_data_entry_schema(
config=config,
integration=integration,
flow_title=REMOVED,
require_step_title=False,
),
vol.Optional("options"): gen_data_entry_schema(
config=config,
integration=integration,
flow_title=UNDEFINED,
require_step_title=False,
),
vol.Optional("device_automation"): {
vol.Optional("action_type"): {str: cv.string_with_no_html},
vol.Optional("condition_type"): {str: cv.string_with_no_html},
vol.Optional("trigger_type"): {str: cv.string_with_no_html},
vol.Optional("trigger_subtype"): {str: cv.string_with_no_html},
},
vol.Optional("state"): cv.schema_with_slug_keys(
cv.schema_with_slug_keys(str, slug_validator=lowercase_validator),
slug_validator=vol.Any("_", cv.slug),
),
vol.Optional("system_health"): {
vol.Optional("info"): {str: cv.string_with_no_html}
},
}
)
def gen_auth_schema(config: Config, integration: Integration):
"""Generate auth schema."""
return vol.Schema(
{
vol.Optional("mfa_setup"): {
str: gen_data_entry_schema(
config=config,
integration=integration,
flow_title=REQUIRED,
require_step_title=True,
)
}
}
)
def gen_platform_strings_schema(config: Config, integration: Integration):
"""Generate platform strings schema like strings.sensor.json.
Example of valid data:
{
"state": {
"moon__phase": {
"full": "Full"
}
}
}
"""
def device_class_validator(value):
"""Key validator for platorm states.
Platform states are only allowed to provide states for device classes they prefix.
"""
if not value.startswith(f"{integration.domain}__"):
raise vol.Invalid(
f"Device class need to start with '{integration.domain}__'. Key {value} is invalid. See https://developers.home-assistant.io/docs/internationalization/core#stringssensorjson"
)
slug_friendly = value.replace("__", "_", 1)
slugged = slugify(slug_friendly)
if slug_friendly != slugged:
raise vol.Invalid(
f"invalid device class {value}. After domain__, needs to be all lowercase, no spaces."
)
return value
return vol.Schema(
{
vol.Optional("state"): cv.schema_with_slug_keys(
cv.schema_with_slug_keys(str, slug_validator=lowercase_validator),
slug_validator=device_class_validator,
)
}
)
ONBOARDING_SCHEMA = vol.Schema({vol.Required("area"): {str: cv.string_with_no_html}})
def validate_translation_file(config: Config, integration: Integration, all_strings):
"""Validate translation files for integration."""
if config.specific_integrations:
check_translations_directory_name(integration)
strings_files = [integration.path / "strings.json"]
# Also validate translations for custom integrations
if config.specific_integrations:
# Only English needs to be always complete
strings_files.append(integration.path / "translations/en.json")
references = []
if integration.domain == "auth":
strings_schema = gen_auth_schema(config, integration)
elif integration.domain == "onboarding":
strings_schema = ONBOARDING_SCHEMA
else:
strings_schema = gen_strings_schema(config, integration)
for strings_file in strings_files:
if not strings_file.is_file():
continue
name = str(strings_file.relative_to(integration.path))
try:
strings = json.loads(strings_file.read_text())
except ValueError as err:
integration.add_error("translations", f"Invalid JSON in {name}: {err}")
continue
try:
strings_schema(strings)
except vol.Invalid as err:
integration.add_error(
"translations", f"Invalid {name}: {humanize_error(strings, err)}"
)
else:
if strings_file.name == "strings.json":
find_references(strings, name, references)
platform_string_schema = gen_platform_strings_schema(config, integration)
platform_strings = [integration.path.glob("strings.*.json")]
if config.specific_integrations:
platform_strings.append(integration.path.glob("translations/*.en.json"))
for path in chain(*platform_strings):
name = str(path.relative_to(integration.path))
try:
strings = json.loads(path.read_text())
except ValueError as err:
integration.add_error("translations", f"Invalid JSON in {name}: {err}")
continue
try:
platform_string_schema(strings)
except vol.Invalid as err:
msg = f"Invalid {path.name}: {humanize_error(strings, err)}"
if config.specific_integrations:
integration.add_warning("translations", msg)
else:
integration.add_error("translations", msg)
else:
find_references(strings, path.name, references)
if config.specific_integrations:
return
# Validate references
for reference in references:
parts = reference["ref"].split("::")
search = all_strings
key = parts.pop(0)
while parts and key in search:
search = search[key]
key = parts.pop(0)
if parts or key not in search:
integration.add_error(
"translations",
f"{reference['source']} contains invalid reference {reference['ref']}: Could not find {key}",
)
def validate(integrations: dict[str, Integration], config: Config):
"""Handle JSON files inside integrations."""
if config.specific_integrations:
all_strings = None
else:
all_strings = upload.generate_upload_data()
for integration in integrations.values():
validate_translation_file(config, integration, all_strings)
| w1ll1am23/home-assistant | script/hassfest/translations.py | Python | apache-2.0 | 10,183 |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import math
# this test shows how to perform a partitioning of an historySet removing some histories
# since we are changing the structure of the historySet we need to acustom also the input variables that are not touched by this functions
def time(self):
newTime = []
x0 = [] # since we are changing the structure of the historySet we need to acustom also the input variables that are not touched by this functions
y0 = [] # since we are changing the structure of the historySet we need to acustom also the input variables that are not touched by this functions
z0 = [] # since we are changing the structure of the historySet we need to acustom also the input variables that are not touched by this functions
for history in range(len(self.time)):
for ts in range(len(self.time[history])):
if self.time[history][ts] >= 0.001:
break
if history >1:
# just to show how to skip a history, we skip the first two ones
newTime.append(self.time[history][ts:])
x0.append(self.x0[history])
y0.append(self.y0[history])
z0.append(self.z0[history])
self.x0 = x0
self.y0 = y0
self.z0 = z0
return newTime
def x(self):
newX = []
for history in range(len(self.time)):
for ts in range(len(self.time[history])):
if self.time[history][ts] >= 0.001:
break
if history >1:
# just to show how to skip a history, we skip the first two ones
newX.append(self.x[history][ts:])
return newX
def y(self):
newY = []
for history in range(len(self.time)):
for ts in range(len(self.time[history])):
if self.time[history][ts] >= 0.001:
break
if history >1:
# just to show how to skip a history, we skip the first two ones
newY.append(self.y[history][ts:])
return newY
def z(self):
newZ = []
for history in range(len(self.time)):
for ts in range(len(self.time[history])):
if self.time[history][ts] >= 0.001:
break
if history >1:
# just to show how to skip a history, we skip the first two ones
newZ.append(self.z[history][ts:])
return newZ
| idaholab/raven | tests/framework/PostProcessors/ExternalPostProcessor/testHistorySetDeletingHistories/partitionHistorySetAndRemoveSomeHistories.py | Python | apache-2.0 | 2,721 |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from core.domain import activity_domain
from core.domain import activity_services
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_services
from core.domain import exp_services_test
from core.domain import rating_services
from core.domain import rights_manager
from core.domain import summary_services
from core.domain import user_services
from core.tests import test_utils
import feconf
import utils
class ExplorationDisplayableSummariesTest(
exp_services_test.ExplorationServicesUnitTests):
"""Test functions for getting displayable exploration summary dicts."""
ALBERT_EMAIL = '[email protected]'
BOB_EMAIL = '[email protected]'
ALBERT_NAME = 'albert'
BOB_NAME = 'bob'
USER_C_NAME = 'c'
USER_D_NAME = 'd'
USER_C_EMAIL = '[email protected]'
USER_D_EMAIL = '[email protected]'
USER_C_PROFILE_PICTURE = 'c_profile_picture'
EXP_ID_1 = 'eid1'
EXP_ID_2 = 'eid2'
EXP_ID_3 = 'eid3'
EXP_ID_4 = 'eid4'
EXP_ID_5 = 'eid5'
EXPECTED_VERSION_1 = 4
EXPECTED_VERSION_2 = 2
def setUp(self):
"""Populate the database of explorations and their summaries.
The sequence of events is:
- (1) Albert creates EXP_ID_1.
- (2) Bob edits the title of EXP_ID_1.
- (3) Albert creates EXP_ID_2.
- (4) Albert edits the title of EXP_ID_1.
- (5) Albert edits the title of EXP_ID_2.
- (6) Bob reverts Albert's last edit to EXP_ID_1.
- Bob tries to publish EXP_ID_2, and is denied access.
- (7) Albert publishes EXP_ID_2.
- (8) Albert creates EXP_ID_3
- (9) Albert publishes EXP_ID_3
- (10) Albert deletes EXP_ID_3
- (1) User_3 (has a profile_picture) creates EXP_ID_4.
- (2) User_4 edits the title of EXP_ID_4.
- (3) User_4 edits the title of EXP_ID_4.
"""
super(ExplorationDisplayableSummariesTest, self).setUp()
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.bob_id = self.get_user_id_from_email(self.BOB_EMAIL)
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.signup(self.BOB_EMAIL, self.BOB_NAME)
self.save_new_valid_exploration(self.EXP_ID_1, self.albert_id)
exp_services.update_exploration(
self.bob_id, self.EXP_ID_1, [{
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'Exploration 1 title'
}], 'Changed title.')
self.save_new_valid_exploration(self.EXP_ID_2, self.albert_id)
exp_services.update_exploration(
self.albert_id, self.EXP_ID_1, [{
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'Exploration 1 Albert title'
}], 'Changed title to Albert1 title.')
exp_services.update_exploration(
self.albert_id, self.EXP_ID_2, [{
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'Exploration 2 Albert title'
}], 'Changed title to Albert2 title.')
exp_services.revert_exploration(self.bob_id, self.EXP_ID_1, 3, 2)
with self.assertRaisesRegexp(
Exception, 'This exploration cannot be published'
):
rights_manager.publish_exploration(self.bob_id, self.EXP_ID_2)
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_2)
self.save_new_valid_exploration(self.EXP_ID_3, self.albert_id)
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_3)
exp_services.delete_exploration(self.albert_id, self.EXP_ID_3)
self.user_c_id = self.get_user_id_from_email(self.USER_C_EMAIL)
self.user_d_id = self.get_user_id_from_email(self.USER_D_EMAIL)
self.signup(self.USER_C_EMAIL, self.USER_C_NAME)
self.signup(self.USER_D_EMAIL, self.USER_D_NAME)
user_services.update_profile_picture_data_url(
self.user_c_id, self.USER_C_PROFILE_PICTURE)
self.save_new_valid_exploration(self.EXP_ID_4, self.user_c_id)
exp_services.update_exploration(
self.user_d_id, self.EXP_ID_4, [{
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'Exploration updated title'
}], 'Changed title once.')
exp_services.update_exploration(
self.user_d_id, self.EXP_ID_4, [{
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'Exploration updated title again'
}], 'Changed title twice.')
self.save_new_valid_exploration(self.EXP_ID_5, self.bob_id)
def test_get_human_readable_contributors_summary(self):
contributors_summary = {self.albert_id: 10, self.bob_id: 13}
self.assertEqual({
self.ALBERT_NAME: {
'num_commits': 10,
'profile_picture_data_url': (
user_services.DEFAULT_IDENTICON_DATA_URL)
},
self.BOB_NAME: {
'num_commits': 13,
'profile_picture_data_url': (
user_services.DEFAULT_IDENTICON_DATA_URL)
}
}, summary_services.get_human_readable_contributors_summary(
contributors_summary))
contributors_summary = {self.user_c_id: 1, self.user_d_id: 2}
self.assertEqual({
self.USER_C_NAME: {
'num_commits': 1,
'profile_picture_data_url': self.USER_C_PROFILE_PICTURE
},
self.USER_D_NAME: {
'num_commits': 2,
'profile_picture_data_url': (
user_services.DEFAULT_IDENTICON_DATA_URL)
}
}, summary_services.get_human_readable_contributors_summary(
contributors_summary))
def test_get_displayable_exp_summary_dicts_matching_ids(self):
# A list of exp_id's are passed in:
# EXP_ID_1 -- private exploration owned by Albert
# EXP_ID_2 -- pubished exploration owned by Albert
# EXP_ID_3 -- deleted exploration
# EXP_ID_5 -- private exploration owned by Bob
# Should only return [EXP_ID_2]
displayable_summaries = (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
[self.EXP_ID_1, self.EXP_ID_2, self.EXP_ID_3, self.EXP_ID_5]))
expected_summary = {
'category': u'A category',
'community_owned': False,
'id': self.EXP_ID_2,
'language_code': feconf.DEFAULT_LANGUAGE_CODE,
'num_views': 0,
'objective': u'An objective',
'ratings': feconf.get_empty_ratings(),
'status': 'public',
'tags': [],
'thumbnail_bg_color': '#a33f40',
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'title': u'Exploration 2 Albert title',
}
self.assertIn('last_updated_msec', displayable_summaries[0])
self.assertDictContainsSubset(expected_summary,
displayable_summaries[0])
def test_get_public_and_filtered_private_summary_dicts_for_creator(self):
# If a new exploration is created by another user (Bob) and not public,
# then Albert cannot see it when querying for explorations.
displayable_summaries = (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
[self.EXP_ID_1, self.EXP_ID_2, self.EXP_ID_3, self.EXP_ID_5],
editor_user_id=self.albert_id))
self.assertEqual(len(displayable_summaries), 2)
self.assertEqual(displayable_summaries[0]['id'], self.EXP_ID_1)
self.assertEqual(displayable_summaries[1]['id'], self.EXP_ID_2)
# However, if Albert is granted editor access to Bob's exploration,
# then Albert has access to the corresponding summary.
rights_manager.assign_role_for_exploration(
self.bob_id, self.EXP_ID_5, self.albert_id,
rights_manager.ROLE_EDITOR)
displayable_summaries = (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
[self.EXP_ID_1, self.EXP_ID_2, self.EXP_ID_3, self.EXP_ID_5],
editor_user_id=self.albert_id))
self.assertEqual(len(displayable_summaries), 3)
self.assertEqual(displayable_summaries[0]['status'], 'private')
self.assertEqual(displayable_summaries[0]['id'], self.EXP_ID_1)
self.assertEqual(displayable_summaries[1]['status'], 'public')
self.assertEqual(displayable_summaries[1]['id'], self.EXP_ID_2)
self.assertEqual(displayable_summaries[2]['status'], 'private')
self.assertEqual(displayable_summaries[2]['id'], self.EXP_ID_5)
class LibraryGroupsTest(exp_services_test.ExplorationServicesUnitTests):
"""Test functions for getting summary dicts for library groups."""
def setUp(self):
"""Populate the database of explorations and their summaries.
The sequence of events is:
- (1) Admin logs in.
- (2) Admin access admin page.
- (3) Admin reloads exploration with id '2'.
- (4) Admin logs out.
"""
super(LibraryGroupsTest, self).setUp()
self.login(self.ADMIN_EMAIL, is_super_admin=True)
response = self.testapp.get('/admin')
csrf_token = self.get_csrf_token_from_response(response)
self.post_json('/adminhandler', {
'action': 'reload_exploration',
'exploration_id': '2'
}, csrf_token)
self.logout()
def test_get_library_groups(self):
"""The exploration with id '2' is an exploration in the Mathematics
category. The call to get_library_groups() should return the
exploration as part of the Mathematics & Statistics group.
"""
library_groups = summary_services.get_library_groups([])
expected_exploration_summary_dict = {
'category': u'Algorithms',
'community_owned': True,
'id': '2',
'language_code': feconf.DEFAULT_LANGUAGE_CODE,
'num_views': 0,
'objective': u'discover the binary search algorithm',
'ratings': feconf.get_empty_ratings(),
'status': u'public',
'tags': [],
'title': u'The Lazy Magician',
'thumbnail_bg_color': '#d0982a',
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Algorithms.svg'),
}
expected_group = {
'categories': ['Algorithms', 'Computing', 'Programming'],
'header': 'Computing',
}
self.assertEqual(len(library_groups), 1)
self.assertDictContainsSubset(expected_group, library_groups[0])
self.assertEqual(
len(library_groups[0]['activity_summary_dicts']), 1)
actual_exploration_summary_dict = (
library_groups[0]['activity_summary_dicts'][0])
self.assertDictContainsSubset(expected_exploration_summary_dict, (
actual_exploration_summary_dict))
class FeaturedExplorationDisplayableSummariesTest(
test_utils.GenericTestBase):
"""Test functions for getting displayable featured exploration
summary dicts.
"""
ALBERT_NAME = 'albert'
ALBERT_EMAIL = '[email protected]'
EXP_ID_1 = 'eid1'
EXP_ID_2 = 'eid2'
LANGUAGE_CODE_ES = 'es'
def setUp(self):
"""Populate the database of explorations and their summaries.
The sequence of events is:
- (1) Albert creates EXP_ID_1.
- (2) Albert creates EXP_ID_2.
- (3) Albert publishes EXP_ID_1.
- (4) Albert publishes EXP_ID_2.
- (5) Admin user is set up.
"""
super(FeaturedExplorationDisplayableSummariesTest, self).setUp()
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.save_new_valid_exploration(
self.EXP_ID_1, self.albert_id, language_code=self.LANGUAGE_CODE_ES)
self.save_new_valid_exploration(self.EXP_ID_2, self.albert_id)
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_1)
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_2)
self.set_admins([self.ADMIN_USERNAME])
def test_for_featured_explorations(self):
"""Note that both EXP_ID_1 and EXP_ID_2 are public. However, only
EXP_ID_2 is featured, so the call to get_featured_explorations() should
only return [EXP_ID_2].
"""
activity_services.update_featured_activity_references([
activity_domain.ActivityReference(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID_2)
])
featured_activity_summaries = (
summary_services.get_featured_activity_summary_dicts([
feconf.DEFAULT_LANGUAGE_CODE]))
self.assertEqual(len(featured_activity_summaries), 1)
self.assertDictContainsSubset({
'status': 'public',
'thumbnail_bg_color': '#a33f40',
'community_owned': False,
'tags': [],
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'language_code': feconf.DEFAULT_LANGUAGE_CODE,
'id': self.EXP_ID_2,
'category': 'A category',
'ratings': feconf.get_empty_ratings(),
'title': 'A title',
'num_views': 0,
'objective': 'An objective'
}, featured_activity_summaries[0])
def test_language_code_filter(self):
"""Note that both EXP_ID_1 is in Spanish and EXP_ID_2 is in English."""
activity_services.update_featured_activity_references([
activity_domain.ActivityReference(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID_1),
activity_domain.ActivityReference(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID_2)
])
featured_activity_summaries = (
summary_services.get_featured_activity_summary_dicts([
feconf.DEFAULT_LANGUAGE_CODE]))
self.assertEqual(len(featured_activity_summaries), 1)
self.assertDictContainsSubset({
'language_code': feconf.DEFAULT_LANGUAGE_CODE,
'id': self.EXP_ID_2,
}, featured_activity_summaries[0])
featured_activity_summaries = (
summary_services.get_featured_activity_summary_dicts([
self.LANGUAGE_CODE_ES]))
self.assertEqual(len(featured_activity_summaries), 1)
self.assertDictContainsSubset({
'language_code': self.LANGUAGE_CODE_ES,
'id': self.EXP_ID_1,
}, featured_activity_summaries[0])
featured_activity_summaries = (
summary_services.get_featured_activity_summary_dicts([
feconf.DEFAULT_LANGUAGE_CODE, self.LANGUAGE_CODE_ES]))
self.assertEqual(len(featured_activity_summaries), 2)
self.assertDictContainsSubset({
'language_code': self.LANGUAGE_CODE_ES,
'id': self.EXP_ID_1,
}, featured_activity_summaries[0])
self.assertDictContainsSubset({
'language_code': feconf.DEFAULT_LANGUAGE_CODE,
'id': self.EXP_ID_2,
}, featured_activity_summaries[1])
featured_activity_summaries = (
summary_services.get_featured_activity_summary_dicts([
'nonexistent_language_code']))
self.assertEqual(len(featured_activity_summaries), 0)
featured_activity_summaries = (
summary_services.get_featured_activity_summary_dicts([]))
self.assertEqual(len(featured_activity_summaries), 0)
class CollectionLearnerDictTests(test_utils.GenericTestBase):
"""Test get_learner_collection_dict_by_id."""
EXP_ID = 'exploration_id'
EXP_ID_1 = 'exp_id1'
COLLECTION_ID = 'A_collection_id'
def setUp(self):
super(CollectionLearnerDictTests, self).setUp()
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
user_services.get_or_create_user(self.owner_id, self.OWNER_EMAIL)
user_services.get_or_create_user(self.editor_id, self.EDITOR_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
def test_get_learner_dict_with_deleted_exp_fails_validation(self):
self.save_new_valid_collection(
self.COLLECTION_ID, self.owner_id, exploration_id=self.EXP_ID)
summary_services.get_learner_collection_dict_by_id(
self.COLLECTION_ID, self.owner_id)
exp_services.delete_exploration(self.owner_id, self.EXP_ID)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected collection to only reference valid explorations, but '
'found an exploration with ID: exploration_id'):
summary_services.get_learner_collection_dict_by_id(
self.COLLECTION_ID, self.owner_id)
def test_get_learner_dict_when_referencing_inaccessible_explorations(self):
self.save_new_default_collection(self.COLLECTION_ID, self.owner_id)
self.save_new_valid_exploration(self.EXP_ID, self.editor_id)
collection_services.update_collection(
self.owner_id, self.COLLECTION_ID, [{
'cmd': collection_domain.CMD_ADD_COLLECTION_NODE,
'exploration_id': self.EXP_ID
}], 'Added another creator\'s private exploration')
# A collection cannot access someone else's private exploration.
rights_manager.publish_collection(self.owner_id, self.COLLECTION_ID)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected collection to only reference valid explorations, but '
'found an exploration with ID: exploration_id'):
summary_services.get_learner_collection_dict_by_id(
self.COLLECTION_ID, self.owner_id)
# After the exploration is published, the dict can now be created.
rights_manager.publish_exploration(self.editor_id, self.EXP_ID)
summary_services.get_learner_collection_dict_by_id(
self.COLLECTION_ID, self.owner_id)
def test_get_learner_dict_with_private_exp_fails_validation(self):
self.save_new_valid_collection(
self.COLLECTION_ID, self.owner_id, exploration_id=self.EXP_ID)
# Since both the collection and exploration are private, the learner
# dict can be created.
summary_services.get_learner_collection_dict_by_id(
self.COLLECTION_ID, self.owner_id)
# A public collection referencing a private exploration is bad, however.
rights_manager.publish_collection(self.owner_id, self.COLLECTION_ID)
with self.assertRaisesRegexp(
utils.ValidationError,
'Cannot reference a private exploration within a public '
'collection, exploration ID: exploration_id'):
summary_services.get_learner_collection_dict_by_id(
self.COLLECTION_ID, self.owner_id)
# After the exploration is published, the learner dict can be crated
# again.
rights_manager.publish_exploration(self.owner_id, self.EXP_ID)
summary_services.get_learner_collection_dict_by_id(
self.COLLECTION_ID, self.owner_id)
def test_get_learner_dict_with_allowed_private_exps(self):
self.save_new_valid_collection(
self.COLLECTION_ID, self.owner_id, exploration_id=self.EXP_ID)
self.save_new_valid_exploration(self.EXP_ID_1, self.editor_id)
collection_services.update_collection(
self.owner_id, self.COLLECTION_ID, [{
'cmd': collection_domain.CMD_ADD_COLLECTION_NODE,
'exploration_id': self.EXP_ID_1
}], 'Added another creator\'s private exploration')
rights_manager.publish_collection(self.owner_id, self.COLLECTION_ID)
collection_dict = summary_services.get_learner_collection_dict_by_id(
self.COLLECTION_ID, self.owner_id, allow_invalid_explorations=True)
# The author's private exploration will be contained in the public
# collection since invalid explorations are being allowed, but the
# private exploration of another author will not.
collection_node_dicts = collection_dict['nodes']
self.assertEqual(
collection_node_dicts[0]['exploration_summary']['id'],
self.EXP_ID)
self.assertIsNone(collection_node_dicts[1]['exploration_summary'])
class TopRatedExplorationDisplayableSummariesTest(
test_utils.GenericTestBase):
"""Test functions for getting displayable top rated exploration
summary dicts.
"""
ALBERT_EMAIL = '[email protected]'
ALICE_EMAIL = '[email protected]'
BOB_EMAIL = '[email protected]'
ALBERT_NAME = 'albert'
ALICE_NAME = 'alice'
BOB_NAME = 'bob'
EXP_ID_1 = 'eid1'
EXP_ID_2 = 'eid2'
EXP_ID_3 = 'eid3'
EXP_ID_4 = 'eid4'
EXP_ID_5 = 'eid5'
EXP_ID_6 = 'eid6'
EXP_ID_7 = 'eid7'
EXP_ID_8 = 'eid8'
EXP_ID_9 = 'eid9'
def setUp(self):
"""Populate the database of explorations and their summaries.
The sequence of events is:
- (1) Albert creates EXP_ID_1.
- (2) Albert creates EXP_ID_2.
- (3) Albert creates EXP_ID_3.
- (4) Albert creates EXP_ID_4.
- (5) Albert creates EXP_ID_5.
- (6) Albert creates EXP_ID_6.
- (7) Albert creates EXP_ID_7.
- (8) Albert creates EXP_ID_8.
- (9) Albert creates EXP_ID_9.
- (10) Albert publishes EXP_ID_1.
- (11) Albert publishes EXP_ID_2.
- (12) Albert publishes EXP_ID_3.
- (13) Albert publishes EXP_ID_4.
- (14) Albert publishes EXP_ID_5.
- (15) Albert publishes EXP_ID_6.
- (16) Albert publishes EXP_ID_7.
- (17) Albert publishes EXP_ID_8.
- (18) Albert publishes EXP_ID_9.
- (19) Admin user is set up.
"""
super(TopRatedExplorationDisplayableSummariesTest, self).setUp()
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.alice_id = self.get_user_id_from_email(self.ALICE_EMAIL)
self.bob_id = self.get_user_id_from_email(self.BOB_EMAIL)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.signup(self.ALICE_EMAIL, self.ALICE_NAME)
self.signup(self.BOB_EMAIL, self.BOB_NAME)
self.save_new_valid_exploration(self.EXP_ID_1, self.albert_id)
self.save_new_valid_exploration(self.EXP_ID_2, self.albert_id)
self.save_new_valid_exploration(self.EXP_ID_3, self.albert_id)
self.save_new_valid_exploration(self.EXP_ID_4, self.albert_id)
self.save_new_valid_exploration(self.EXP_ID_5, self.albert_id)
self.save_new_valid_exploration(self.EXP_ID_6, self.albert_id)
self.save_new_valid_exploration(self.EXP_ID_7, self.albert_id)
self.save_new_valid_exploration(self.EXP_ID_8, self.albert_id)
self.save_new_valid_exploration(self.EXP_ID_9, self.albert_id)
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_1)
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_2)
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_3)
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_4)
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_5)
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_6)
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_7)
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_8)
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_9)
self.set_admins([self.ADMIN_USERNAME])
def test_at_most_eight_top_rated_explorations(self):
"""Note that at most 8 explorations should be returned.
"""
rating_services.assign_rating_to_exploration(
self.bob_id, self.EXP_ID_2, 5)
rating_services.assign_rating_to_exploration(
self.alice_id, self.EXP_ID_3, 5)
rating_services.assign_rating_to_exploration(
self.bob_id, self.EXP_ID_3, 4)
rating_services.assign_rating_to_exploration(
self.bob_id, self.EXP_ID_4, 4)
rating_services.assign_rating_to_exploration(
self.alice_id, self.EXP_ID_5, 4)
rating_services.assign_rating_to_exploration(
self.bob_id, self.EXP_ID_5, 3)
rating_services.assign_rating_to_exploration(
self.bob_id, self.EXP_ID_6, 3)
rating_services.assign_rating_to_exploration(
self.alice_id, self.EXP_ID_6, 2)
rating_services.assign_rating_to_exploration(
self.bob_id, self.EXP_ID_8, 2)
rating_services.assign_rating_to_exploration(
self.alice_id, self.EXP_ID_8, 2)
rating_services.assign_rating_to_exploration(
self.bob_id, self.EXP_ID_7, 2)
rating_services.assign_rating_to_exploration(
self.bob_id, self.EXP_ID_9, 2)
rating_services.assign_rating_to_exploration(
self.bob_id, self.EXP_ID_1, 1)
top_rated_exploration_summaries = (
summary_services.get_top_rated_exploration_summary_dicts([
feconf.DEFAULT_LANGUAGE_CODE]))
expected_summary = {
'status': u'public',
'thumbnail_bg_color': '#a33f40',
'community_owned': False,
'tags': [],
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'language_code': feconf.DEFAULT_LANGUAGE_CODE,
'id': self.EXP_ID_3,
'category': u'A category',
'ratings': {u'1': 0, u'3': 0, u'2': 0, u'5': 1, u'4': 1},
'title': u'A title',
'num_views': 0,
'objective': u'An objective'
}
self.assertDictContainsSubset(
expected_summary, top_rated_exploration_summaries[0])
expected_ordering = [
self.EXP_ID_3, self.EXP_ID_2, self.EXP_ID_5, self.EXP_ID_4,
self.EXP_ID_6, self.EXP_ID_8, self.EXP_ID_7, self.EXP_ID_9]
actual_ordering = [exploration['id'] for exploration in
top_rated_exploration_summaries]
self.assertEqual(expected_ordering, actual_ordering)
def test_only_explorations_with_ratings_are_returned(self):
"""Note that only explorations with ratings will be included
"""
rating_services.assign_rating_to_exploration(
self.bob_id, self.EXP_ID_2, 5)
top_rated_exploration_summaries = (
summary_services.get_top_rated_exploration_summary_dicts([
feconf.DEFAULT_LANGUAGE_CODE]))
expected_summary = {
'status': u'public',
'thumbnail_bg_color': '#a33f40',
'community_owned': False,
'tags': [],
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'language_code': feconf.DEFAULT_LANGUAGE_CODE,
'id': self.EXP_ID_2,
'category': u'A category',
'ratings': {u'1': 0, u'3': 0, u'2': 0, u'5': 1, u'4': 0},
'title': u'A title',
'num_views': 0,
'objective': u'An objective'
}
self.assertDictContainsSubset(
expected_summary, top_rated_exploration_summaries[0])
expected_ordering = [self.EXP_ID_2]
actual_ordering = [exploration['id'] for exploration in
top_rated_exploration_summaries]
self.assertEqual(expected_ordering, actual_ordering)
class RecentlyPublishedExplorationDisplayableSummariesTest(
test_utils.GenericTestBase):
"""Test functions for getting displayable recently published exploration
summary dicts.
"""
ALBERT_NAME = 'albert'
ALBERT_EMAIL = '[email protected]'
EXP_ID_1 = 'eid1'
EXP_ID_2 = 'eid2'
EXP_ID_3 = 'eid3'
def setUp(self):
"""Populate the database of explorations and their summaries.
The sequence of events is:
- (1) Albert creates EXP_ID_1.
- (2) Albert creates EXP_ID_2.
- (3) Albert creates EXP_ID_3.
- (4) Albert publishes EXP_ID_1.
- (5) Albert publishes EXP_ID_2.
- (6) Albert publishes EXP_ID_3.
- (7) Admin user is set up.
"""
super(RecentlyPublishedExplorationDisplayableSummariesTest,
self).setUp()
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.save_new_valid_exploration(
self.EXP_ID_1, self.albert_id,
end_state_name='End')
self.save_new_valid_exploration(
self.EXP_ID_2, self.albert_id,
end_state_name='End')
self.save_new_valid_exploration(
self.EXP_ID_3, self.albert_id,
end_state_name='End')
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_2)
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_1)
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_3)
self.set_admins([self.ADMIN_USERNAME])
def test_for_recently_published_explorations(self):
""" Tests for recently published explorations.
"""
recently_published_exploration_summaries = (
summary_services.get_recently_published_exploration_summary_dicts())
test_summary_1 = {
'status': 'public',
'thumbnail_bg_color': '#a33f40',
'community_owned': False,
'tags': [],
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'language_code': feconf.DEFAULT_LANGUAGE_CODE,
'id': self.EXP_ID_1,
'category': u'A category',
'ratings': feconf.get_empty_ratings(),
'title': u'A title',
'num_views': 0,
'objective': u'An objective'
}
test_summary_2 = {
'status': 'public',
'thumbnail_bg_color': '#a33f40',
'community_owned': False,
'tags': [],
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'language_code': feconf.DEFAULT_LANGUAGE_CODE,
'id': self.EXP_ID_2,
'category': u'A category',
'ratings': feconf.get_empty_ratings(),
'title': u'A title',
'num_views': 0,
'objective': u'An objective'
}
test_summary_3 = {
'status': 'public',
'thumbnail_bg_color': '#a33f40',
'community_owned': False,
'tags': [],
'thumbnail_icon_url': self.get_static_asset_url(
'/images/subjects/Lightbulb.svg'),
'language_code': feconf.DEFAULT_LANGUAGE_CODE,
'id': self.EXP_ID_3,
'category': u'A category',
'ratings': feconf.get_empty_ratings(),
'title': u'A title',
'num_views': 0,
'objective': u'An objective'
}
self.assertDictContainsSubset(
test_summary_3, recently_published_exploration_summaries[0])
self.assertDictContainsSubset(
test_summary_1, recently_published_exploration_summaries[1])
self.assertDictContainsSubset(
test_summary_2, recently_published_exploration_summaries[2])
# Test that editing an exploration does not change its
# 'recently-published' status.
exp_services.update_exploration(
self.albert_id, self.EXP_ID_1, [{
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New title'
}], 'Changed title.')
recently_published_exploration_summaries = (
summary_services.get_recently_published_exploration_summary_dicts())
self.assertEqual(
recently_published_exploration_summaries[1]['title'], 'New title')
self.assertDictContainsSubset(
test_summary_3, recently_published_exploration_summaries[0])
class ActivityReferenceAccessCheckerTests(test_utils.GenericTestBase):
"""Tests for requiring that activity references are public."""
EXP_ID_0 = 'exp_id_0'
EXP_ID_1 = 'exp_id_1'
COL_ID_2 = 'col_id_2'
def setUp(self):
super(ActivityReferenceAccessCheckerTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
def test_requiring_nonexistent_activities_be_public_raises_exception(self):
with self.assertRaisesRegexp(Exception, 'non-existent exploration'):
summary_services.require_activities_to_be_public([
activity_domain.ActivityReference(
feconf.ACTIVITY_TYPE_EXPLORATION, 'fake')])
with self.assertRaisesRegexp(Exception, 'non-existent collection'):
summary_services.require_activities_to_be_public([
activity_domain.ActivityReference(
feconf.ACTIVITY_TYPE_COLLECTION, 'fake')])
def test_requiring_private_activities_to_be_public_raises_exception(self):
self.save_new_valid_exploration(self.EXP_ID_0, self.owner_id)
self.save_new_valid_exploration(self.EXP_ID_1, self.owner_id)
self.save_new_valid_collection(
self.COL_ID_2, self.owner_id, exploration_id=self.EXP_ID_0)
with self.assertRaisesRegexp(Exception, 'private exploration'):
summary_services.require_activities_to_be_public([
activity_domain.ActivityReference(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID_0)])
with self.assertRaisesRegexp(Exception, 'private collection'):
summary_services.require_activities_to_be_public([
activity_domain.ActivityReference(
feconf.ACTIVITY_TYPE_COLLECTION, self.COL_ID_2)])
def test_requiring_public_activities_to_be_public_succeeds(self):
self.save_new_valid_exploration(self.EXP_ID_0, self.owner_id)
self.save_new_valid_collection(
self.COL_ID_2, self.owner_id, exploration_id=self.EXP_ID_0)
rights_manager.publish_exploration(self.owner_id, self.EXP_ID_0)
rights_manager.publish_collection(self.owner_id, self.COL_ID_2)
# There are no validation errors.
summary_services.require_activities_to_be_public([
activity_domain.ActivityReference(
feconf.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID_0),
activity_domain.ActivityReference(
feconf.ACTIVITY_TYPE_COLLECTION, self.COL_ID_2)])
| raju249/oppia | core/domain/summary_services_test.py | Python | apache-2.0 | 36,554 |
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import shutil
import subprocess
import time
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not
from charms.kubernetes.common import get_version
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
remove_state('kubernetes-worker.gpu.enabled')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', gethostname().lower())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
@when('certificates.available')
def send_data(tls):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
hookenv.unit_private_ip(),
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
# set --allow-privileged flag for kubelet
set_privileged()
create_config(random.choice(servers), creds)
configure_kubelet(dns)
configure_kube_proxy(servers, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
apply_node_labels()
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress RC enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-replication-controller.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('kubernetes-worker.ingress.available')
def scale_ingress_controller():
''' Scale the number of ingress controller replicas to match the number of
nodes. '''
try:
output = kubectl('get', 'nodes', '-o', 'name')
count = len(output.splitlines())
kubectl('scale', '--replicas=%d' % count, 'rc/nginx-ingress-controller') # noqa
except CalledProcessError:
hookenv.log('Failed to scale ingress controllers. Will attempt again next update.') # noqa
@when('config.changed.labels', 'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the node.
'''
# scrub and try to format an array from the configuration option
config = hookenv.config()
user_labels = _parse_labels(config.get('labels'))
# For diffing sake, iterate the previous label set
if config.previous('labels'):
previous_labels = _parse_labels(config.previous('labels'))
hookenv.log('previous labels: {}'.format(previous_labels))
else:
# this handles first time run if there is no previous labels config
previous_labels = _parse_labels("")
# Calculate label removal
for label in previous_labels:
if label not in user_labels:
hookenv.log('Deleting node label {}'.format(label))
_apply_node_label(label, delete=True)
# if the label is in user labels we do nothing here, it will get set
# during the atomic update below.
# Atomically set a label
for label in user_labels:
_apply_node_label(label, overwrite=True)
@when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args')
def extra_args_changed():
set_state('kubernetes-worker.restart-needed')
@when('config.changed.docker-logins')
def docker_logins_changed():
config = hookenv.config()
previous_logins = config.previous('docker-logins')
logins = config['docker-logins']
logins = json.loads(logins)
if previous_logins:
previous_logins = json.loads(previous_logins)
next_servers = {login['server'] for login in logins}
previous_servers = {login['server'] for login in previous_logins}
servers_to_logout = previous_servers - next_servers
for server in servers_to_logout:
cmd = ['docker', 'logout', server]
subprocess.check_call(cmd)
for login in logins:
server = login['server']
username = login['username']
password = login['password']
cmd = ['docker', 'login', server, '-u', username, '-p', password]
subprocess.check_call(cmd)
set_state('kubernetes-worker.restart-needed')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-worker.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def configure_kubelet(dns):
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = {}
kubelet_opts['require-kubeconfig'] = 'true'
kubelet_opts['kubeconfig'] = kubeconfig_path
kubelet_opts['network-plugin'] = 'cni'
kubelet_opts['v'] = '0'
kubelet_opts['address'] = '0.0.0.0'
kubelet_opts['port'] = '10250'
kubelet_opts['cluster-domain'] = dns['domain']
kubelet_opts['anonymous-auth'] = 'false'
kubelet_opts['client-ca-file'] = ca_cert_path
kubelet_opts['tls-cert-file'] = server_cert_path
kubelet_opts['tls-private-key-file'] = server_key_path
kubelet_opts['logtostderr'] = 'true'
kubelet_opts['fail-swap-on'] = 'false'
if (dns['enable-kube-dns']):
kubelet_opts['cluster-dns'] = dns['sdn-ip']
privileged = is_state('kubernetes-worker.privileged')
kubelet_opts['allow-privileged'] = 'true' if privileged else 'false'
if is_state('kubernetes-worker.gpu.enabled'):
if get_version('kubelet') < (1, 6):
hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet')
kubelet_opts['experimental-nvidia-gpus'] = '1'
else:
hookenv.log('Adding --feature-gates=Accelerators=true to kubelet')
kubelet_opts['feature-gates'] = 'Accelerators=true'
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def configure_kube_proxy(api_servers, cluster_cidr):
kube_proxy_opts = {}
kube_proxy_opts['cluster-cidr'] = cluster_cidr
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
kube_proxy_opts['logtostderr'] = 'true'
kube_proxy_opts['v'] = '0'
kube_proxy_opts['master'] = random.choice(api_servers)
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts['conntrack-max-per-core'] = '0'
configure_kubernetes_service('kube-proxy', kube_proxy_opts,
'proxy-extra-args')
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
context['defaultbackend_image'] = \
"gcr.io/google_containers/defaultbackend:1.4"
if arch() == 's390x':
context['defaultbackend_image'] = \
"gcr.io/google_containers/defaultbackend-s390x:1.4"
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress replication controller manifest
context['ingress_image'] = \
"gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13"
if arch() == 's390x':
context['ingress_image'] = \
"docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
manifest = addon_path.format('ingress-replication-controller.yaml')
render('ingress-replication-controller.yaml', manifest, context)
hookenv.log('Creating the ingress replication controller.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if succesful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Update the allow-privileged flag for kubelet.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
gpu_enabled = is_state('kubernetes-worker.gpu.enabled')
privileged = 'true' if gpu_enabled else 'false'
if privileged == 'true':
set_state('kubernetes-worker.privileged')
else:
remove_state('kubernetes-worker.privileged')
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('cuda.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
# Apply node labels
_apply_node_label('gpu=true', overwrite=True)
_apply_node_label('cuda=true', overwrite=True)
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('kubernetes-worker.privileged')
@when_not('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
This handler fires when we're running in gpu mode, and then the operator
sets allow-privileged="false". Since we can no longer run privileged
containers, we need to disable gpu mode.
"""
hookenv.log('Disabling gpu mode')
# Remove node labels
_apply_node_label('gpu', delete=True)
_apply_node_label('cuda', delete=True)
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(gethostname().lower())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(gethostname().lower())
creds = kube_control.get_auth_credentials(nodeuser)
if creds \
and data_changed('kube-control.creds', creds) \
and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
@when('docker.ready')
def fix_iptables_for_docker_1_13():
""" Fix iptables FORWARD policy for Docker >=1.13
https://github.com/kubernetes/kubernetes/issues/40182
https://github.com/kubernetes/kubernetes/issues/39823
"""
cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT']
check_call(cmd)
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
class ApplyNodeLabelFailed(Exception):
pass
def _apply_node_label(label, delete=False, overwrite=False):
''' Invoke kubectl to apply node label changes '''
# k8s lowercases hostnames and uses them as node names
hostname = gethostname().lower()
# TODO: Make this part of the kubectl calls instead of a special string
cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}'
if delete is True:
label_key = label.split('=')[0]
cmd = cmd_base.format(kubeconfig_path, hostname, label_key)
cmd = cmd + '-'
else:
cmd = cmd_base.format(kubeconfig_path, hostname, label)
if overwrite:
cmd = '{} --overwrite'.format(cmd)
cmd = cmd.split()
deadline = time.time() + 60
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
break
hookenv.log('Failed to apply label %s, exit code %d. Will retry.' % (
label, code))
time.sleep(1)
else:
msg = 'Failed to apply label %s' % label
raise ApplyNodeLabelFailed(msg)
def _parse_labels(labels):
''' Parse labels from a key=value string separated by space.'''
label_array = labels.split(' ')
sanitized_labels = []
for item in label_array:
if '=' in item:
sanitized_labels.append(item)
else:
hookenv.log('Skipping malformed option: {}'.format(item))
return sanitized_labels
| dqminh/kubernetes | cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py | Python | apache-2.0 | 35,127 |
# taken from http://code.activestate.com/recipes/363602/
def cached_property(f):
"""returns a cached property that is calculated by function f"""
def get(self):
try:
return self._property_cache[f]
except AttributeError:
self._property_cache = {}
x = self._property_cache[f] = f(self)
return x
except KeyError:
x = self._property_cache[f] = f(self)
return x
return property(get)
| prongs/rbt-jira | apache_dev_tool/utils.py | Python | apache-2.0 | 490 |
# Tweepy
# Copyright 2009 Joshua Roesslein
# See LICENSE
import htmlentitydefs
import re
from datetime import datetime
import time
from tweepy.models import models
def _parse_cursor(obj):
return obj['next_cursor'], obj['prev_cursor']
def parse_json(obj, api):
return obj
def parse_return_true(obj, api):
return True
def parse_none(obj, api):
return None
def parse_error(obj):
return obj['error']
def _parse_datetime(str):
# We must parse datetime this way to work in python 2.4
return datetime(*(time.strptime(str, '%a %b %d %H:%M:%S +0000 %Y')[0:6]))
def _parse_search_datetime(str):
# python 2.4
return datetime(*(time.strptime(str, '%a, %d %b %Y %H:%M:%S +0000')[0:6]))
def unescape_html(text):
"""Created by Fredrik Lundh (http://effbot.org/zone/re-sub.htm#unescape-html)"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
def _parse_html_value(html):
return html[html.find('>')+1:html.rfind('<')]
def _parse_a_href(atag):
start = atag.find('"') + 1
end = atag.find('"', start)
return atag[start:end]
def parse_user(obj, api):
user = models['user']()
user._api = api
for k, v in obj.items():
if k == 'created_at':
setattr(user, k, _parse_datetime(v))
elif k == 'status':
setattr(user, k, parse_status(v, api))
elif k == 'following':
# twitter sets this to null if it is false
if v is True:
setattr(user, k, True)
else:
setattr(user, k, False)
else:
setattr(user, k, v)
return user
def parse_users(obj, api):
if isinstance(obj, list) is False:
item_list = obj['users']
else:
item_list = obj
users = []
for item in item_list:
users.append(parse_user(item, api))
return users
def parse_status(obj, api):
status = models['status']()
status._api = api
for k, v in obj.items():
if k == 'user':
user = parse_user(v, api)
setattr(status, 'author', user)
setattr(status, 'user', user) # DEPRECIATED
elif k == 'created_at':
setattr(status, k, _parse_datetime(v))
elif k == 'source':
if '<' in v:
setattr(status, k, _parse_html_value(v))
setattr(status, 'source_url', _parse_a_href(v))
else:
setattr(status, k, v)
elif k == 'retweeted_status':
setattr(status, k, parse_status(v, api))
else:
setattr(status, k, v)
return status
def parse_statuses(obj, api):
statuses = []
for item in obj:
statuses.append(parse_status(item, api))
return statuses
def parse_dm(obj, api):
dm = models['direct_message']()
dm._api = api
for k, v in obj.items():
if k == 'sender' or k == 'recipient':
setattr(dm, k, parse_user(v, api))
elif k == 'created_at':
setattr(dm, k, _parse_datetime(v))
else:
setattr(dm, k, v)
return dm
def parse_directmessages(obj, api):
directmessages = []
for item in obj:
directmessages.append(parse_dm(item, api))
return directmessages
def parse_friendship(obj, api):
relationship = obj['relationship']
# parse source
source = models['friendship']()
for k, v in relationship['source'].items():
setattr(source, k, v)
# parse target
target = models['friendship']()
for k, v in relationship['target'].items():
setattr(target, k, v)
return source, target
def parse_ids(obj, api):
if isinstance(obj, list) is False:
return obj['ids']
else:
return obj
def parse_saved_search(obj, api):
ss = models['saved_search']()
ss._api = api
for k, v in obj.items():
if k == 'created_at':
setattr(ss, k, _parse_datetime(v))
else:
setattr(ss, k, v)
return ss
def parse_saved_searches(obj, api):
saved_searches = []
saved_search = models['saved_search']()
for item in obj:
saved_searches.append(parse_saved_search(item, api))
return saved_searches
def parse_search_result(obj, api):
result = models['search_result']()
for k, v in obj.items():
if k == 'created_at':
setattr(result, k, _parse_search_datetime(v))
elif k == 'source':
setattr(result, k, _parse_html_value(unescape_html(v)))
else:
setattr(result, k, v)
return result
def parse_search_results(obj, api):
results = obj['results']
result_objects = []
for item in results:
result_objects.append(parse_search_result(item, api))
return result_objects
def parse_list(obj, api):
lst = models['list']()
for k,v in obj.items():
if k == 'user':
setattr(lst, k, parse_user(v, api))
else:
setattr(lst, k, v)
return lst
def parse_lists(obj, api):
lists = []
for item in obj['lists']:
lists.append(parse_list(item, api))
return lists
| gabelula/b-counted | tweepy/parsers.py | Python | apache-2.0 | 5,676 |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import warnings
from itertools import cycle
from matplotlib.pyplot import cm
import numpy as np
import pandas as pd
from IPython.display import display, HTML
import empyrical.utils
from . import pos
from . import txn
APPROX_BDAYS_PER_MONTH = 21
APPROX_BDAYS_PER_YEAR = 252
MONTHS_PER_YEAR = 12
WEEKS_PER_YEAR = 52
MM_DISPLAY_UNIT = 1000000.
DAILY = 'daily'
WEEKLY = 'weekly'
MONTHLY = 'monthly'
YEARLY = 'yearly'
ANNUALIZATION_FACTORS = {
DAILY: APPROX_BDAYS_PER_YEAR,
WEEKLY: WEEKS_PER_YEAR,
MONTHLY: MONTHS_PER_YEAR
}
COLORMAP = 'Paired'
COLORS = ['#e6194b', '#3cb44b', '#ffe119', '#0082c8', '#f58231',
'#911eb4', '#46f0f0', '#f032e6', '#d2f53c', '#fabebe',
'#008080', '#e6beff', '#aa6e28', '#800000', '#aaffc3',
'#808000', '#ffd8b1', '#000080', '#808080']
def one_dec_places(x, pos):
"""
Adds 1/10th decimal to plot ticks.
"""
return '%.1f' % x
def two_dec_places(x, pos):
"""
Adds 1/100th decimal to plot ticks.
"""
return '%.2f' % x
def percentage(x, pos):
"""
Adds percentage sign to plot ticks.
"""
return '%.0f%%' % x
def format_asset(asset):
"""
If zipline asset objects are used, we want to print them out prettily
within the tear sheet. This function should only be applied directly
before displaying.
"""
try:
import zipline.assets
except ImportError:
return asset
if isinstance(asset, zipline.assets.Asset):
return asset.symbol
else:
return asset
def vectorize(func):
"""
Decorator so that functions can be written to work on Series but
may still be called with DataFrames.
"""
def wrapper(df, *args, **kwargs):
if df.ndim == 1:
return func(df, *args, **kwargs)
elif df.ndim == 2:
return df.apply(func, *args, **kwargs)
return wrapper
def extract_rets_pos_txn_from_zipline(backtest):
"""
Extract returns, positions, transactions and leverage from the
backtest data structure returned by zipline.TradingAlgorithm.run().
The returned data structures are in a format compatible with the
rest of pyfolio and can be directly passed to
e.g. tears.create_full_tear_sheet().
Parameters
----------
backtest : pd.DataFrame
DataFrame returned by zipline.TradingAlgorithm.run()
Returns
-------
returns : pd.Series
Daily returns of strategy.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
Example (on the Quantopian research platform)
---------------------------------------------
>>> backtest = my_algo.run()
>>> returns, positions, transactions =
>>> pyfolio.utils.extract_rets_pos_txn_from_zipline(backtest)
>>> pyfolio.tears.create_full_tear_sheet(returns,
>>> positions, transactions)
"""
backtest.index = backtest.index.normalize()
if backtest.index.tzinfo is None:
backtest.index = backtest.index.tz_localize('UTC')
returns = backtest.returns
raw_positions = []
for dt, pos_row in backtest.positions.iteritems():
df = pd.DataFrame(pos_row)
df.index = [dt] * len(df)
raw_positions.append(df)
if not raw_positions:
raise ValueError("The backtest does not have any positions.")
positions = pd.concat(raw_positions)
positions = pos.extract_pos(positions, backtest.ending_cash)
transactions = txn.make_transaction_frame(backtest.transactions)
if transactions.index.tzinfo is None:
transactions.index = transactions.index.tz_localize('utc')
return returns, positions, transactions
def print_table(table,
name=None,
float_format=None,
formatters=None,
header_rows=None):
"""
Pretty print a pandas DataFrame.
Uses HTML output if running inside Jupyter Notebook, otherwise
formatted text output.
Parameters
----------
table : pandas.Series or pandas.DataFrame
Table to pretty-print.
name : str, optional
Table name to display in upper left corner.
float_format : function, optional
Formatter to use for displaying table elements, passed as the
`float_format` arg to pd.Dataframe.to_html.
E.g. `'{0:.2%}'.format` for displaying 100 as '100.00%'.
formatters : list or dict, optional
Formatters to use by column, passed as the `formatters` arg to
pd.Dataframe.to_html.
header_rows : dict, optional
Extra rows to display at the top of the table.
"""
if isinstance(table, pd.Series):
table = pd.DataFrame(table)
if name is not None:
table.columns.name = name
html = table.to_html(float_format=float_format, formatters=formatters)
if header_rows is not None:
# Count the number of columns for the text to span
n_cols = html.split('<thead>')[1].split('</thead>')[0].count('<th>')
# Generate the HTML for the extra rows
rows = ''
for name, value in header_rows.items():
rows += ('\n <tr style="text-align: right;"><th>%s</th>' +
'<td colspan=%d>%s</td></tr>') % (name, n_cols, value)
# Inject the new HTML
html = html.replace('<thead>', '<thead>' + rows)
display(HTML(html))
def standardize_data(x):
"""
Standardize an array with mean and standard deviation.
Parameters
----------
x : np.array
Array to standardize.
Returns
-------
np.array
Standardized array.
"""
return (x - np.mean(x)) / np.std(x)
def detect_intraday(positions, transactions, threshold=0.25):
"""
Attempt to detect an intraday strategy. Get the number of
positions held at the end of the day, and divide that by the
number of unique stocks transacted every day. If the average quotient
is below a threshold, then an intraday strategy is detected.
Parameters
----------
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
Returns
-------
boolean
True if an intraday strategy is detected.
"""
daily_txn = transactions.copy()
daily_txn.index = daily_txn.index.date
txn_count = daily_txn.groupby(level=0).symbol.nunique().sum()
daily_pos = positions.drop('cash', axis=1).replace(0, np.nan)
return daily_pos.count(axis=1).sum() / txn_count < threshold
def check_intraday(estimate, returns, positions, transactions):
"""
Logic for checking if a strategy is intraday and processing it.
Parameters
----------
estimate: boolean or str, optional
Approximate returns for intraday strategies.
See description in tears.create_full_tear_sheet.
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
Returns
-------
pd.DataFrame
Daily net position values, adjusted for intraday movement.
"""
if estimate == 'infer':
if positions is not None and transactions is not None:
if detect_intraday(positions, transactions):
warnings.warn('Detected intraday strategy; inferring positi' +
'ons from transactions. Set estimate_intraday' +
'=False to disable.')
return estimate_intraday(returns, positions, transactions)
else:
return positions
else:
return positions
elif estimate:
if positions is not None and transactions is not None:
return estimate_intraday(returns, positions, transactions)
else:
raise ValueError('Positions and txns needed to estimate intraday')
else:
return positions
def estimate_intraday(returns, positions, transactions, EOD_hour=23):
"""
Intraday strategies will often not hold positions at the day end.
This attempts to find the point in the day that best represents
the activity of the strategy on that day, and effectively resamples
the end-of-day positions with the positions at this point of day.
The point of day is found by detecting when our exposure in the
market is at its maximum point. Note that this is an estimate.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
Returns
-------
pd.DataFrame
Daily net position values, resampled for intraday behavior.
"""
# Construct DataFrame of transaction amounts
txn_val = transactions.copy()
txn_val.index.names = ['date']
txn_val['value'] = txn_val.amount * txn_val.price
txn_val = txn_val.reset_index().pivot_table(
index='date', values='value',
columns='symbol').replace(np.nan, 0)
# Cumulate transaction amounts each day
txn_val = txn_val.groupby(txn_val.index.date).cumsum()
# Calculate exposure, then take peak of exposure every day
txn_val['exposure'] = txn_val.abs().sum(axis=1)
condition = (txn_val['exposure'] == txn_val.groupby(
pd.Grouper(freq='24H'))['exposure'].transform(max))
txn_val = txn_val[condition].drop('exposure', axis=1)
# Compute cash delta
txn_val['cash'] = -txn_val.sum(axis=1)
# Shift EOD positions to positions at start of next trading day
positions_shifted = positions.copy().shift(1).fillna(0)
starting_capital = positions.iloc[0].sum() / (1 + returns[0])
positions_shifted.cash[0] = starting_capital
# Format and add start positions to intraday position changes
txn_val.index = txn_val.index.normalize()
corrected_positions = positions_shifted.add(txn_val, fill_value=0)
corrected_positions.index.name = 'period_close'
corrected_positions.columns.name = 'sid'
return corrected_positions
def clip_returns_to_benchmark(rets, benchmark_rets):
"""
Drop entries from rets so that the start and end dates of rets match those
of benchmark_rets.
Parameters
----------
rets : pd.Series
Daily returns of the strategy, noncumulative.
- See pf.tears.create_full_tear_sheet for more details
benchmark_rets : pd.Series
Daily returns of the benchmark, noncumulative.
Returns
-------
clipped_rets : pd.Series
Daily noncumulative returns with index clipped to match that of
benchmark returns.
"""
if (rets.index[0] < benchmark_rets.index[0]) \
or (rets.index[-1] > benchmark_rets.index[-1]):
clipped_rets = rets[benchmark_rets.index]
else:
clipped_rets = rets
return clipped_rets
def to_utc(df):
"""
For use in tests; applied UTC timestamp to DataFrame.
"""
try:
df.index = df.index.tz_localize('UTC')
except TypeError:
df.index = df.index.tz_convert('UTC')
return df
def to_series(df):
"""
For use in tests; converts DataFrame's first column to Series.
"""
return df[df.columns[0]]
# This functions is simply a passthrough to empyrical, but is
# required by the register_returns_func and get_symbol_rets.
default_returns_func = empyrical.utils.default_returns_func
# Settings dict to store functions/values that may
# need to be overridden depending on the users environment
SETTINGS = {
'returns_func': default_returns_func
}
def register_return_func(func):
"""
Registers the 'returns_func' that will be called for
retrieving returns data.
Parameters
----------
func : function
A function that returns a pandas Series of asset returns.
The signature of the function must be as follows
>>> func(symbol)
Where symbol is an asset identifier
Returns
-------
None
"""
SETTINGS['returns_func'] = func
def get_symbol_rets(symbol, start=None, end=None):
"""
Calls the currently registered 'returns_func'
Parameters
----------
symbol : object
An identifier for the asset whose return
series is desired.
e.g. ticker symbol or database ID
start : date, optional
Earliest date to fetch data for.
Defaults to earliest date available.
end : date, optional
Latest date to fetch data for.
Defaults to latest date available.
Returns
-------
pandas.Series
Returned by the current 'returns_func'
"""
return SETTINGS['returns_func'](symbol,
start=start,
end=end)
def configure_legend(ax, autofmt_xdate=True, change_colors=False,
rotation=30, ha='right'):
"""
Format legend for perf attribution plots:
- put legend to the right of plot instead of overlapping with it
- make legend order match up with graph lines
- set colors according to colormap
"""
chartBox = ax.get_position()
ax.set_position([chartBox.x0, chartBox.y0,
chartBox.width * 0.75, chartBox.height])
# make legend order match graph lines
handles, labels = ax.get_legend_handles_labels()
handles_and_labels_sorted = sorted(zip(handles, labels),
key=lambda x: x[0].get_ydata()[-1],
reverse=True)
handles_sorted = [h[0] for h in handles_and_labels_sorted]
labels_sorted = [h[1] for h in handles_and_labels_sorted]
if change_colors:
for handle, color in zip(handles_sorted,
cycle(COLORS)):
handle.set_color(color)
ax.legend(handles=handles_sorted,
labels=labels_sorted,
frameon=True,
framealpha=0.5,
loc='upper left',
bbox_to_anchor=(1.05, 1),
fontsize='small')
# manually rotate xticklabels instead of using matplotlib's autofmt_xdate
# because it disables xticklabels for all but the last plot
if autofmt_xdate:
for label in ax.get_xticklabels():
label.set_ha(ha)
label.set_rotation(rotation)
def sample_colormap(cmap_name, n_samples):
"""
Sample a colormap from matplotlib
"""
colors = []
colormap = cm.cmap_d[cmap_name]
for i in np.linspace(0, 1, n_samples):
colors.append(colormap(i))
return colors
| chayapan/pyfolio | pyfolio/utils.py | Python | apache-2.0 | 16,144 |
from setuptools import setup, Extension
import platform
version = '1.5.0'
ext_modules = []
setup (name = 'droneapi',
zip_safe=True,
version = version,
description = 'Python language bindings for the DroneApi',
long_description = '''Python language bindings for the DroneApi''',
url = 'https://github.com/diydrones/droneapi-python',
author = '3D Robotics',
install_requires = [ 'pymavlink >= 1.1.50',
'protobuf >= 2.5.0',
'requests == 2.5.1' ],
author_email = '[email protected]',
classifiers=['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering'
],
license='apache',
packages = ['droneapi', 'droneapi.module', 'droneapi.lib' ],
# doesn't work: package_data={'droneapi': ['examples/*']},
ext_modules = ext_modules)
| trishhyles/dronekit-python | setup.py | Python | apache-2.0 | 1,223 |
from __future__ import absolute_import, print_function, unicode_literals
import sys
import os
here = os.path.split(os.path.abspath(__file__))[0]
root = os.path.abspath(os.path.join(here, '../../'))
sys.path[0:0] = [root]
from streamparse.bolt import Bolt
class DummyBoltAutoAck(Bolt):
auto_ack = True
auto_anchor = False
auto_fail = False
def process(self, tup):
if tup.id == "emit_many":
self.emit_many([tup.values] * 5)
else:
self.emit(tup.values)
if __name__ == '__main__':
DummyBoltAutoAck().run()
| scrapinghub/streamparse | test/ipc/dummy_bolt_auto_ack.py | Python | apache-2.0 | 571 |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import query_params
def test_query_array_params(capsys):
query_params.query_array_params(
gender='M',
states=['WA', 'WI', 'WV', 'WY'])
out, _ = capsys.readouterr()
assert 'James' in out
def test_query_named_params(capsys):
query_params.query_named_params(
corpus='romeoandjuliet',
min_word_count=100)
out, _ = capsys.readouterr()
assert 'love' in out
def test_query_positional_params(capsys):
query_params.query_positional_params(
corpus='romeoandjuliet',
min_word_count=100)
out, _ = capsys.readouterr()
assert 'love' in out
def test_query_struct_params(capsys):
query_params.query_struct_params(765, "hello world")
out, _ = capsys.readouterr()
assert '765' in out
assert 'hello world' in out
def test_query_timestamp_params(capsys):
query_params.query_timestamp_params(2016, 12, 7, 8, 0)
out, _ = capsys.readouterr()
assert '2016, 12, 7, 9, 0' in out
| sharbison3/python-docs-samples | bigquery/cloud-client/query_params_test.py | Python | apache-2.0 | 1,551 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wavenet layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
layers = tf.contrib.layers
slim = tf.contrib.slim
def wavenet_layer(inp,
depth,
width=3,
rate=1,
context=None,
scope=None,
reuse=None):
"""Single wavenet layer.
This assumes that the input is a rank 4 tensor of shape:
[batch, reduced_text_dimension, auxilliary_text_dimension, feature_depth]
If rate is more than one, this will be reshaped to
[B, R//(2**(rate-1)), A*(2**(rate-1)), D]
Then a conv2d will be applied with kernel size [width, 1].
The rest of the wavenet activations will be applied and the result will be
returned without reshaping, this allows a multilayer wavenet to be implemented
by subsequent calls to wavenet_layer and rate=2.
Arguments:
inp: input tensor
depth: depth of the intermediate nonlinear activations before reduced.
width: the width of the conv filter, 2 by default.
rate: the dilation, use 1 in the first layer and 2 in subsequent layers.
context: Optional 2-D [batch, dim] tensor on which to condition each node.
scope: name of scope if given.
reuse: reuse for variable scope if given.
Returns:
output: output tensor.
"""
tf.logging.info('Creating wavenet layer d=%d w=%d r=%d', depth, width, rate)
with tf.variable_scope(scope, 'wavenet_layer', [inp], reuse=reuse):
current_shape = inp.get_shape()
true_shape = tf.shape(inp)
in_depth = current_shape[3].value
mul = 2**(rate - 1)
reshaped = tf.reshape(
inp,
[true_shape[0], true_shape[1] // mul, mul * true_shape[2], in_depth])
conved = slim.conv2d(
reshaped,
2 * depth, [width, 1],
rate=1,
padding='SAME',
activation_fn=None)
if context is not None:
conved += layers.linear(context, 2 * depth)[:, None, None, :]
act = tf.nn.tanh(conved[:, :, :, :depth])
gate = tf.nn.sigmoid(conved[:, :, :, depth:])
z = act * gate
if in_depth != depth:
z = slim.conv2d(z, in_depth, [1, 1], padding='SAME', activation_fn=None)
return z + reshaped
def wavenet_block(net,
num_layers,
depth,
comb_weight=1.0,
context=None,
scope=None,
reuse=None,
width=3,
keep_prob=1.0):
"""Stack many increasingly dilated wavenet layers together.
Arguments:
net: input tensor, expected to be 4D to start [batch, text_length, 1, dim]
num_layers: Number of wavenet layers to apply in the block, note that This
requires the input text_length to be divisible by 2**num_layers.
depth: The depth to use for each of the wavenet layers, internally.
comb_weight: The weight for the residual update (multiplies the residual
value).
context: Optional 2-D tensor on which to condition each node.
scope: Name of scope if given.
reuse: Reuse for variable scope if given.
width: Patch size of the convolution.
keep_prob: Keep probability for the block input dropout.
Returns:
output: output tensor, reshaped back to be [batch, text_length, 1, dim]
"""
inp = net
tf.logging.info('Creating wavenet block with width %d', width)
with tf.variable_scope(scope, 'wavenet_block', [net], reuse=reuse):
# first wavenet layer is a rate=1 conv.
input_shape = tf.shape(net)
if keep_prob < 1.0:
inp_shape = tf.shape(net)
noise_shape=(inp_shape[0], 1, inp_shape[2], inp_shape[3])
net = tf.nn.dropout(
net,
rate=(1.0 - keep_prob),
noise_shape=noise_shape)
net = wavenet_layer(net, depth, rate=1, width=width)
for _ in range(num_layers):
# repeated layers are rate=2 but operate on subsequently reshaped inputs.
# so as to implement increasing dilations.
net = wavenet_layer(net, depth, rate=2, width=width, context=context)
# reshape back at top of block
net = tf.reshape(net, input_shape)
return comb_weight * net + inp
| tensorflow/deepmath | deepmath/guidance/wavenet.py | Python | apache-2.0 | 4,883 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six.moves.cPickle as pickle
import mock
import os
import unittest
import random
import itertools
from contextlib import closing
from gzip import GzipFile
from tempfile import mkdtemp
from shutil import rmtree
from test import listen_zero
from test.unit import (
make_timestamp_iter, debug_logger, patch_policies, mocked_http_conn,
FakeLogger)
from time import time
from distutils.dir_util import mkpath
from eventlet import spawn, Timeout
from swift.obj import updater as object_updater
from swift.obj.diskfile import (
ASYNCDIR_BASE, get_async_dir, DiskFileManager, get_tmp_dir)
from swift.common.ring import RingData
from swift.common import utils
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.swob import bytes_to_wsgi
from swift.common.utils import (
hash_path, normalize_timestamp, mkdirs, write_pickle)
from swift.common.storage_policy import StoragePolicy, POLICIES
class MockPool(object):
def __init__(self, *a, **kw):
pass
def spawn(self, func, *args, **kwargs):
func(*args, **kwargs)
def waitall(self):
pass
def __enter__(self):
return self
def __exit__(self, *a, **kw):
pass
_mocked_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', True)]
@patch_policies(_mocked_policies)
class TestObjectUpdater(unittest.TestCase):
def setUp(self):
utils.HASH_PATH_SUFFIX = b'endcap'
utils.HASH_PATH_PREFIX = b''
self.testdir = mkdtemp()
ring_file = os.path.join(self.testdir, 'container.ring.gz')
with closing(GzipFile(ring_file, 'wb')) as f:
pickle.dump(
RingData([[0, 1, 2, 0, 1, 2],
[1, 2, 0, 1, 2, 0],
[2, 3, 1, 2, 3, 1]],
[{'id': 0, 'ip': '127.0.0.1', 'port': 1,
'device': 'sda1', 'zone': 0},
{'id': 1, 'ip': '127.0.0.1', 'port': 1,
'device': 'sda1', 'zone': 2},
{'id': 2, 'ip': '127.0.0.1', 'port': 1,
'device': 'sda1', 'zone': 4},
{'id': 3, 'ip': '127.0.0.1', 'port': 1,
'device': 'sda1', 'zone': 6}], 30),
f)
self.devices_dir = os.path.join(self.testdir, 'devices')
os.mkdir(self.devices_dir)
self.sda1 = os.path.join(self.devices_dir, 'sda1')
os.mkdir(self.sda1)
for policy in POLICIES:
os.mkdir(os.path.join(self.sda1, get_tmp_dir(policy)))
self.logger = debug_logger()
self.ts_iter = make_timestamp_iter()
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def test_creation(self):
ou = object_updater.ObjectUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '2',
'node_timeout': '5.5'})
self.assertTrue(hasattr(ou, 'logger'))
self.assertTrue(ou.logger is not None)
self.assertEqual(ou.devices, self.devices_dir)
self.assertEqual(ou.interval, 1)
self.assertEqual(ou.concurrency, 2)
self.assertEqual(ou.node_timeout, 5.5)
self.assertTrue(ou.get_container_ring() is not None)
def test_conf_params(self):
# defaults
daemon = object_updater.ObjectUpdater({}, logger=self.logger)
self.assertEqual(daemon.devices, '/srv/node')
self.assertEqual(daemon.mount_check, True)
self.assertEqual(daemon.swift_dir, '/etc/swift')
self.assertEqual(daemon.interval, 300)
self.assertEqual(daemon.concurrency, 8)
self.assertEqual(daemon.updater_workers, 1)
self.assertEqual(daemon.max_objects_per_second, 50.0)
# non-defaults
conf = {
'devices': '/some/where/else',
'mount_check': 'huh?',
'swift_dir': '/not/here',
'interval': '600',
'concurrency': '2',
'updater_workers': '3',
'objects_per_second': '10.5',
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
self.assertEqual(daemon.devices, '/some/where/else')
self.assertEqual(daemon.mount_check, False)
self.assertEqual(daemon.swift_dir, '/not/here')
self.assertEqual(daemon.interval, 600)
self.assertEqual(daemon.concurrency, 2)
self.assertEqual(daemon.updater_workers, 3)
self.assertEqual(daemon.max_objects_per_second, 10.5)
# check deprecated option
daemon = object_updater.ObjectUpdater({'slowdown': '0.04'},
logger=self.logger)
self.assertEqual(daemon.max_objects_per_second, 20.0)
def check_bad(conf):
with self.assertRaises(ValueError):
object_updater.ObjectUpdater(conf, logger=self.logger)
check_bad({'interval': 'foo'})
check_bad({'interval': '300.0'})
check_bad({'concurrency': 'bar'})
check_bad({'concurrency': '1.0'})
check_bad({'slowdown': 'baz'})
check_bad({'objects_per_second': 'quux'})
@mock.patch('os.listdir')
def test_listdir_with_exception(self, mock_listdir):
e = OSError('permission_denied')
mock_listdir.side_effect = e
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
paths = daemon._listdir('foo/bar')
self.assertEqual([], paths)
log_lines = self.logger.get_lines_for_level('error')
msg = ('ERROR: Unable to access foo/bar: permission_denied')
self.assertEqual(log_lines[0], msg)
@mock.patch('os.listdir', return_value=['foo', 'bar'])
def test_listdir_without_exception(self, mock_listdir):
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
path = daemon._listdir('foo/bar/')
log_lines = self.logger.get_lines_for_level('error')
self.assertEqual(len(log_lines), 0)
self.assertEqual(path, ['foo', 'bar'])
def test_object_sweep(self):
def check_with_idx(index, warn, should_skip):
if int(index) > 0:
asyncdir = os.path.join(self.sda1,
ASYNCDIR_BASE + "-" + index)
else:
asyncdir = os.path.join(self.sda1, ASYNCDIR_BASE)
prefix_dir = os.path.join(asyncdir, 'abc')
mkpath(prefix_dir)
# A non-directory where directory is expected should just be
# skipped, but should not stop processing of subsequent
# directories.
not_dirs = (
os.path.join(self.sda1, 'not_a_dir'),
os.path.join(self.sda1,
ASYNCDIR_BASE + '-' + 'twentington'),
os.path.join(self.sda1,
ASYNCDIR_BASE + '-' + str(int(index) + 100)))
for not_dir in not_dirs:
with open(not_dir, 'w'):
pass
objects = {
'a': [1089.3, 18.37, 12.83, 1.3],
'b': [49.4, 49.3, 49.2, 49.1],
'c': [109984.123],
}
expected = set()
for o, timestamps in objects.items():
ohash = hash_path('account', 'container', o)
for t in timestamps:
o_path = os.path.join(prefix_dir, ohash + '-' +
normalize_timestamp(t))
if t == timestamps[0]:
expected.add((o_path, int(index)))
write_pickle({}, o_path)
seen = set()
class MockObjectUpdater(object_updater.ObjectUpdater):
def process_object_update(self, update_path, device, policy):
seen.add((update_path, int(policy)))
os.unlink(update_path)
ou = MockObjectUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '5'})
ou.logger = mock_logger = mock.MagicMock()
ou.object_sweep(self.sda1)
self.assertEqual(mock_logger.warning.call_count, warn)
self.assertTrue(
os.path.exists(os.path.join(self.sda1, 'not_a_dir')))
if should_skip:
# if we were supposed to skip over the dir, we didn't process
# anything at all
self.assertEqual(set(), seen)
else:
self.assertEqual(expected, seen)
# test cleanup: the tempdir gets cleaned up between runs, but this
# way we can be called multiple times in a single test method
for not_dir in not_dirs:
os.unlink(not_dir)
# first check with valid policies
for pol in POLICIES:
check_with_idx(str(pol.idx), 0, should_skip=False)
# now check with a bogus async dir policy and make sure we get
# a warning indicating that the '99' policy isn't valid
check_with_idx('99', 1, should_skip=True)
def test_sweep_logs(self):
asyncdir = os.path.join(self.sda1, ASYNCDIR_BASE)
prefix_dir = os.path.join(asyncdir, 'abc')
mkpath(prefix_dir)
for o, t in [('abc', 123), ('def', 234), ('ghi', 345),
('jkl', 456), ('mno', 567)]:
ohash = hash_path('account', 'container', o)
o_path = os.path.join(prefix_dir, ohash + '-' +
normalize_timestamp(t))
write_pickle({}, o_path)
class MockObjectUpdater(object_updater.ObjectUpdater):
def process_object_update(self, update_path, device, policy):
os.unlink(update_path)
self.stats.successes += 1
self.stats.unlinks += 1
logger = FakeLogger()
ou = MockObjectUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'report_interval': '10.0',
'node_timeout': '5'}, logger=logger)
now = [time()]
def mock_time_function():
rv = now[0]
now[0] += 5
return rv
# With 10s between updates, time() advancing 5s every time we look,
# and 5 async_pendings on disk, we should get at least two progress
# lines.
with mock.patch('swift.obj.updater.time',
mock.MagicMock(time=mock_time_function)), \
mock.patch.object(object_updater, 'ContextPool', MockPool):
ou.object_sweep(self.sda1)
info_lines = logger.get_lines_for_level('info')
self.assertEqual(4, len(info_lines))
self.assertIn("sweep starting", info_lines[0])
self.assertIn(self.sda1, info_lines[0])
self.assertIn("sweep progress", info_lines[1])
# the space ensures it's a positive number
self.assertIn(
"2 successes, 0 failures, 0 quarantines, 2 unlinks, 0 errors, "
"0 redirects",
info_lines[1])
self.assertIn(self.sda1, info_lines[1])
self.assertIn("sweep progress", info_lines[2])
self.assertIn(
"4 successes, 0 failures, 0 quarantines, 4 unlinks, 0 errors, "
"0 redirects",
info_lines[2])
self.assertIn(self.sda1, info_lines[2])
self.assertIn("sweep complete", info_lines[3])
self.assertIn(
"5 successes, 0 failures, 0 quarantines, 5 unlinks, 0 errors, "
"0 redirects",
info_lines[3])
self.assertIn(self.sda1, info_lines[3])
def test_sweep_logs_multiple_policies(self):
for policy in _mocked_policies:
asyncdir = os.path.join(self.sda1, get_async_dir(policy.idx))
prefix_dir = os.path.join(asyncdir, 'abc')
mkpath(prefix_dir)
for o, t in [('abc', 123), ('def', 234), ('ghi', 345)]:
ohash = hash_path('account', 'container%d' % policy.idx, o)
o_path = os.path.join(prefix_dir, ohash + '-' +
normalize_timestamp(t))
write_pickle({}, o_path)
class MockObjectUpdater(object_updater.ObjectUpdater):
def process_object_update(self, update_path, device, policy):
os.unlink(update_path)
self.stats.successes += 1
self.stats.unlinks += 1
logger = FakeLogger()
ou = MockObjectUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'report_interval': '10.0',
'node_timeout': '5'}, logger=logger)
now = [time()]
def mock_time():
rv = now[0]
now[0] += 0.01
return rv
with mock.patch('swift.obj.updater.time',
mock.MagicMock(time=mock_time)):
ou.object_sweep(self.sda1)
completion_lines = [l for l in logger.get_lines_for_level('info')
if "sweep complete" in l]
self.assertEqual(len(completion_lines), 1)
self.assertIn("sweep complete", completion_lines[0])
self.assertIn(
"6 successes, 0 failures, 0 quarantines, 6 unlinks, 0 errors, "
"0 redirects",
completion_lines[0])
@mock.patch.object(object_updater, 'check_drive')
def test_run_once_with_disk_unmounted(self, mock_check_drive):
mock_check_drive.side_effect = ValueError
ou = object_updater.ObjectUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '15'})
ou.run_once()
async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
os.mkdir(async_dir)
ou.run_once()
self.assertTrue(os.path.exists(async_dir))
# each run calls check_device
self.assertEqual([
mock.call(self.devices_dir, 'sda1', False),
mock.call(self.devices_dir, 'sda1', False),
], mock_check_drive.mock_calls)
mock_check_drive.reset_mock()
ou = object_updater.ObjectUpdater({
'devices': self.devices_dir,
'mount_check': 'TrUe',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '15'}, logger=self.logger)
odd_dir = os.path.join(async_dir, 'not really supposed '
'to be here')
os.mkdir(odd_dir)
ou.run_once()
self.assertTrue(os.path.exists(async_dir))
self.assertTrue(os.path.exists(odd_dir)) # skipped - not mounted!
self.assertEqual([
mock.call(self.devices_dir, 'sda1', True),
], mock_check_drive.mock_calls)
self.assertEqual(ou.logger.get_increment_counts(), {})
@mock.patch.object(object_updater, 'check_drive')
def test_run_once(self, mock_check_drive):
mock_check_drive.side_effect = lambda r, d, mc: os.path.join(r, d)
ou = object_updater.ObjectUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '15'}, logger=self.logger)
ou.run_once()
async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
os.mkdir(async_dir)
ou.run_once()
self.assertTrue(os.path.exists(async_dir))
# each run calls check_device
self.assertEqual([
mock.call(self.devices_dir, 'sda1', False),
mock.call(self.devices_dir, 'sda1', False),
], mock_check_drive.mock_calls)
mock_check_drive.reset_mock()
ou = object_updater.ObjectUpdater({
'devices': self.devices_dir,
'mount_check': 'TrUe',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '15'}, logger=self.logger)
odd_dir = os.path.join(async_dir, 'not really supposed '
'to be here')
os.mkdir(odd_dir)
ou.run_once()
self.assertTrue(os.path.exists(async_dir))
self.assertEqual([
mock.call(self.devices_dir, 'sda1', True),
], mock_check_drive.mock_calls)
ohash = hash_path('a', 'c', 'o')
odir = os.path.join(async_dir, ohash[-3:])
mkdirs(odir)
older_op_path = os.path.join(
odir,
'%s-%s' % (ohash, normalize_timestamp(time() - 1)))
op_path = os.path.join(
odir,
'%s-%s' % (ohash, normalize_timestamp(time())))
for path in (op_path, older_op_path):
with open(path, 'wb') as async_pending:
pickle.dump({'op': 'PUT', 'account': 'a',
'container': 'c',
'obj': 'o', 'headers': {
'X-Container-Timestamp':
normalize_timestamp(0)}},
async_pending)
ou.run_once()
self.assertTrue(not os.path.exists(older_op_path))
self.assertTrue(os.path.exists(op_path))
self.assertEqual(ou.logger.get_increment_counts(),
{'failures': 1, 'unlinks': 1})
self.assertIsNone(pickle.load(open(op_path, 'rb')).get('successes'))
bindsock = listen_zero()
def accepter(sock, return_code):
try:
with Timeout(3):
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write(b'HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEqual(inc.readline(),
b'PUT /sda1/0/a/c/o HTTP/1.1\r\n')
headers = HeaderKeyDict()
line = bytes_to_wsgi(inc.readline())
while line and line != '\r\n':
headers[line.split(':')[0]] = \
line.split(':')[1].strip()
line = bytes_to_wsgi(inc.readline())
self.assertIn('x-container-timestamp', headers)
self.assertIn('X-Backend-Storage-Policy-Index',
headers)
except BaseException as err:
return err
return None
def accept(return_codes):
try:
events = []
for code in return_codes:
with Timeout(3):
sock, addr = bindsock.accept()
events.append(
spawn(accepter, sock, code))
for event in events:
err = event.wait()
if err:
raise err
except BaseException as err:
return err
return None
event = spawn(accept, [201, 500, 500])
for dev in ou.get_container_ring().devs:
if dev is not None:
dev['port'] = bindsock.getsockname()[1]
ou.logger._clear()
ou.run_once()
err = event.wait()
if err:
raise err
self.assertTrue(os.path.exists(op_path))
self.assertEqual(ou.logger.get_increment_counts(),
{'failures': 1})
self.assertEqual([0],
pickle.load(open(op_path, 'rb')).get('successes'))
event = spawn(accept, [404, 201])
ou.logger._clear()
ou.run_once()
err = event.wait()
if err:
raise err
self.assertTrue(os.path.exists(op_path))
self.assertEqual(ou.logger.get_increment_counts(),
{'failures': 1})
self.assertEqual([0, 2],
pickle.load(open(op_path, 'rb')).get('successes'))
event = spawn(accept, [201])
ou.logger._clear()
ou.run_once()
err = event.wait()
if err:
raise err
# we remove the async_pending and its containing suffix dir, but not
# anything above that
self.assertFalse(os.path.exists(op_path))
self.assertFalse(os.path.exists(os.path.dirname(op_path)))
self.assertTrue(os.path.exists(os.path.dirname(os.path.dirname(
op_path))))
self.assertEqual(ou.logger.get_increment_counts(),
{'unlinks': 1, 'successes': 1})
def test_obj_put_legacy_updates(self):
ts = (normalize_timestamp(t) for t in
itertools.count(int(time())))
policy = POLICIES.get_by_index(0)
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
async_dir = os.path.join(self.sda1, get_async_dir(policy))
os.mkdir(async_dir)
account, container, obj = 'a', 'c', 'o'
# write an async
for op in ('PUT', 'DELETE'):
self.logger._clear()
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
dfmanager = DiskFileManager(conf, daemon.logger)
# don't include storage-policy-index in headers_out pickle
headers_out = HeaderKeyDict({
'x-size': 0,
'x-content-type': 'text/plain',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-timestamp': next(ts),
})
data = {'op': op, 'account': account, 'container': container,
'obj': obj, 'headers': headers_out}
dfmanager.pickle_async_update(self.sda1, account, container, obj,
data, next(ts), policy)
request_log = []
def capture(*args, **kwargs):
request_log.append((args, kwargs))
# run once
fake_status_codes = [200, 200, 200]
with mocked_http_conn(*fake_status_codes, give_connect=capture):
daemon.run_once()
self.assertEqual(len(fake_status_codes), len(request_log))
for request_args, request_kwargs in request_log:
ip, part, method, path, headers, qs, ssl = request_args
self.assertEqual(method, op)
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
str(int(policy)))
self.assertEqual(daemon.logger.get_increment_counts(),
{'successes': 1, 'unlinks': 1,
'async_pendings': 1})
def _write_async_update(self, dfmanager, timestamp, policy,
headers=None, container_path=None):
# write an async
account, container, obj = 'a', 'c', 'o'
op = 'PUT'
headers_out = headers or {
'x-size': 0,
'x-content-type': 'text/plain',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-timestamp': timestamp.internal,
'X-Backend-Storage-Policy-Index': int(policy),
'User-Agent': 'object-server %s' % os.getpid()
}
data = {'op': op, 'account': account, 'container': container,
'obj': obj, 'headers': headers_out}
if container_path:
data['container_path'] = container_path
dfmanager.pickle_async_update(self.sda1, account, container, obj,
data, timestamp, policy)
def test_obj_put_async_updates(self):
ts_iter = make_timestamp_iter()
policies = list(POLICIES)
random.shuffle(policies)
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
os.mkdir(async_dir)
def do_test(headers_out, expected, container_path=None):
# write an async
dfmanager = DiskFileManager(conf, daemon.logger)
self._write_async_update(dfmanager, next(ts_iter), policies[0],
headers=headers_out,
container_path=container_path)
request_log = []
def capture(*args, **kwargs):
request_log.append((args, kwargs))
# run once
fake_status_codes = [
200, # object update success
200, # object update success
200, # object update conflict
]
with mocked_http_conn(*fake_status_codes, give_connect=capture):
daemon.run_once()
self.assertEqual(len(fake_status_codes), len(request_log))
for request_args, request_kwargs in request_log:
ip, part, method, path, headers, qs, ssl = request_args
self.assertEqual(method, 'PUT')
self.assertDictEqual(expected, headers)
self.assertEqual(
daemon.logger.get_increment_counts(),
{'successes': 1, 'unlinks': 1, 'async_pendings': 1})
self.assertFalse(os.listdir(async_dir))
daemon.logger.clear()
ts = next(ts_iter)
# use a dict rather than HeaderKeyDict so we can vary the case of the
# pickled headers
headers_out = {
'x-size': 0,
'x-content-type': 'text/plain',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-timestamp': ts.normal,
'X-Backend-Storage-Policy-Index': int(policies[0]),
'User-Agent': 'object-server %s' % os.getpid()
}
expected = {
'X-Size': '0',
'X-Content-Type': 'text/plain',
'X-Etag': 'd41d8cd98f00b204e9800998ecf8427e',
'X-Timestamp': ts.normal,
'X-Backend-Storage-Policy-Index': str(int(policies[0])),
'User-Agent': 'object-updater %s' % os.getpid(),
'X-Backend-Accept-Redirect': 'true',
'X-Backend-Accept-Quoted-Location': 'true',
}
# always expect X-Backend-Accept-Redirect and
# X-Backend-Accept-Quoted-Location to be true
do_test(headers_out, expected, container_path='.shards_a/shard_c')
do_test(headers_out, expected)
# ...unless they're already set
expected['X-Backend-Accept-Redirect'] = 'false'
expected['X-Backend-Accept-Quoted-Location'] = 'false'
headers_out_2 = dict(headers_out)
headers_out_2['X-Backend-Accept-Redirect'] = 'false'
headers_out_2['X-Backend-Accept-Quoted-Location'] = 'false'
do_test(headers_out_2, expected)
# updater should add policy header if missing
expected['X-Backend-Accept-Redirect'] = 'true'
expected['X-Backend-Accept-Quoted-Location'] = 'true'
headers_out['X-Backend-Storage-Policy-Index'] = None
do_test(headers_out, expected)
# updater should not overwrite a mismatched policy header
headers_out['X-Backend-Storage-Policy-Index'] = int(policies[1])
expected['X-Backend-Storage-Policy-Index'] = str(int(policies[1]))
do_test(headers_out, expected)
# check for case insensitivity
headers_out['user-agent'] = headers_out.pop('User-Agent')
headers_out['x-backend-storage-policy-index'] = headers_out.pop(
'X-Backend-Storage-Policy-Index')
do_test(headers_out, expected)
def _check_update_requests(self, requests, timestamp, policy):
# do some sanity checks on update request
expected_headers = {
'X-Size': '0',
'X-Content-Type': 'text/plain',
'X-Etag': 'd41d8cd98f00b204e9800998ecf8427e',
'X-Timestamp': timestamp.internal,
'X-Backend-Storage-Policy-Index': str(int(policy)),
'User-Agent': 'object-updater %s' % os.getpid(),
'X-Backend-Accept-Redirect': 'true',
'X-Backend-Accept-Quoted-Location': 'true'}
for request in requests:
self.assertEqual('PUT', request['method'])
self.assertDictEqual(expected_headers, request['headers'])
def test_obj_put_async_root_update_redirected(self):
policies = list(POLICIES)
random.shuffle(policies)
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
os.mkdir(async_dir)
dfmanager = DiskFileManager(conf, daemon.logger)
ts_obj = next(self.ts_iter)
self._write_async_update(dfmanager, ts_obj, policies[0])
# run once
ts_redirect_1 = next(self.ts_iter)
ts_redirect_2 = next(self.ts_iter)
fake_responses = [
# first round of update attempts, newest redirect should be chosen
(200, {}),
(301, {'Location': '/.shards_a/c_shard_new/o',
'X-Backend-Redirect-Timestamp': ts_redirect_2.internal}),
(301, {'Location': '/.shards_a/c_shard_old/o',
'X-Backend-Redirect-Timestamp': ts_redirect_1.internal}),
# second round of update attempts
(200, {}),
(200, {}),
(200, {}),
]
fake_status_codes, fake_headers = zip(*fake_responses)
with mocked_http_conn(
*fake_status_codes, headers=fake_headers) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests[:3], ts_obj, policies[0])
self._check_update_requests(conn.requests[3:], ts_obj, policies[0])
self.assertEqual(['/sda1/0/a/c/o'] * 3 +
['/sda1/0/.shards_a/c_shard_new/o'] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'redirects': 1, 'successes': 1,
'unlinks': 1, 'async_pendings': 1},
daemon.logger.get_increment_counts())
self.assertFalse(os.listdir(async_dir)) # no async file
def test_obj_put_async_root_update_redirected_previous_success(self):
policies = list(POLICIES)
random.shuffle(policies)
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
os.mkdir(async_dir)
dfmanager = DiskFileManager(conf, daemon.logger)
ts_obj = next(self.ts_iter)
self._write_async_update(dfmanager, ts_obj, policies[0])
orig_async_path, orig_async_data = self._check_async_file(async_dir)
# run once
with mocked_http_conn(
507, 200, 507) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests, ts_obj, policies[0])
self.assertEqual(['/sda1/0/a/c/o'] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'failures': 1, 'async_pendings': 1},
daemon.logger.get_increment_counts())
async_path, async_data = self._check_async_file(async_dir)
self.assertEqual(dict(orig_async_data, successes=[1]), async_data)
# run again - expect 3 redirected updates despite previous success
ts_redirect = next(self.ts_iter)
resp_headers_1 = {'Location': '/.shards_a/c_shard_1/o',
'X-Backend-Redirect-Timestamp': ts_redirect.internal}
fake_responses = (
# 1st round of redirects, 2nd round of redirects
[(301, resp_headers_1)] * 2 + [(200, {})] * 3)
fake_status_codes, fake_headers = zip(*fake_responses)
with mocked_http_conn(
*fake_status_codes, headers=fake_headers) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests[:2], ts_obj, policies[0])
self._check_update_requests(conn.requests[2:], ts_obj, policies[0])
root_part = daemon.container_ring.get_part('a/c')
shard_1_part = daemon.container_ring.get_part('.shards_a/c_shard_1')
self.assertEqual(
['/sda1/%s/a/c/o' % root_part] * 2 +
['/sda1/%s/.shards_a/c_shard_1/o' % shard_1_part] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'redirects': 1, 'successes': 1, 'failures': 1, 'unlinks': 1,
'async_pendings': 1},
daemon.logger.get_increment_counts())
self.assertFalse(os.listdir(async_dir)) # no async file
def _check_async_file(self, async_dir):
async_subdirs = os.listdir(async_dir)
self.assertEqual([mock.ANY], async_subdirs)
async_files = os.listdir(os.path.join(async_dir, async_subdirs[0]))
self.assertEqual([mock.ANY], async_files)
async_path = os.path.join(
async_dir, async_subdirs[0], async_files[0])
with open(async_path, 'rb') as fd:
async_data = pickle.load(fd)
return async_path, async_data
def _check_obj_put_async_update_bad_redirect_headers(self, headers):
policies = list(POLICIES)
random.shuffle(policies)
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
os.mkdir(async_dir)
dfmanager = DiskFileManager(conf, daemon.logger)
ts_obj = next(self.ts_iter)
self._write_async_update(dfmanager, ts_obj, policies[0])
orig_async_path, orig_async_data = self._check_async_file(async_dir)
fake_responses = [
(301, headers),
(301, headers),
(301, headers),
]
fake_status_codes, fake_headers = zip(*fake_responses)
with mocked_http_conn(
*fake_status_codes, headers=fake_headers) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests, ts_obj, policies[0])
self.assertEqual(['/sda1/0/a/c/o'] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'failures': 1, 'async_pendings': 1},
daemon.logger.get_increment_counts())
# async file still intact
async_path, async_data = self._check_async_file(async_dir)
self.assertEqual(orig_async_path, async_path)
self.assertEqual(orig_async_data, async_data)
return daemon
def test_obj_put_async_root_update_missing_location_header(self):
headers = {
'X-Backend-Redirect-Timestamp': next(self.ts_iter).internal}
self._check_obj_put_async_update_bad_redirect_headers(headers)
def test_obj_put_async_root_update_bad_location_header(self):
headers = {
'Location': 'bad bad bad',
'X-Backend-Redirect-Timestamp': next(self.ts_iter).internal}
daemon = self._check_obj_put_async_update_bad_redirect_headers(headers)
error_lines = daemon.logger.get_lines_for_level('error')
self.assertIn('Container update failed', error_lines[0])
self.assertIn('Invalid path: bad%20bad%20bad', error_lines[0])
def test_obj_put_async_shard_update_redirected_twice(self):
policies = list(POLICIES)
random.shuffle(policies)
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
os.mkdir(async_dir)
dfmanager = DiskFileManager(conf, daemon.logger)
ts_obj = next(self.ts_iter)
self._write_async_update(dfmanager, ts_obj, policies[0],
container_path='.shards_a/c_shard_older')
orig_async_path, orig_async_data = self._check_async_file(async_dir)
# run once
ts_redirect_1 = next(self.ts_iter)
ts_redirect_2 = next(self.ts_iter)
ts_redirect_3 = next(self.ts_iter)
fake_responses = [
# 1st round of redirects, newest redirect should be chosen
(301, {'Location': '/.shards_a/c_shard_old/o',
'X-Backend-Redirect-Timestamp': ts_redirect_1.internal}),
(301, {'Location': '/.shards_a/c%5Fshard%5Fnew/o',
'X-Backend-Location-Is-Quoted': 'true',
'X-Backend-Redirect-Timestamp': ts_redirect_2.internal}),
(301, {'Location': '/.shards_a/c%5Fshard%5Fold/o',
'X-Backend-Location-Is-Quoted': 'true',
'X-Backend-Redirect-Timestamp': ts_redirect_1.internal}),
# 2nd round of redirects
(301, {'Location': '/.shards_a/c_shard_newer/o',
'X-Backend-Redirect-Timestamp': ts_redirect_3.internal}),
(301, {'Location': '/.shards_a/c_shard_newer/o',
'X-Backend-Redirect-Timestamp': ts_redirect_3.internal}),
(301, {'Location': '/.shards_a/c_shard_newer/o',
'X-Backend-Redirect-Timestamp': ts_redirect_3.internal}),
]
fake_status_codes, fake_headers = zip(*fake_responses)
with mocked_http_conn(
*fake_status_codes, headers=fake_headers) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests, ts_obj, policies[0])
# only *one* set of redirected requests is attempted per cycle
older_part = daemon.container_ring.get_part('.shards_a/c_shard_older')
new_part = daemon.container_ring.get_part('.shards_a/c_shard_new')
newer_part = daemon.container_ring.get_part('.shards_a/c_shard_newer')
self.assertEqual(
['/sda1/%s/.shards_a/c_shard_older/o' % older_part] * 3 +
['/sda1/%s/.shards_a/c_shard_new/o' % new_part] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'redirects': 2, 'async_pendings': 1},
daemon.logger.get_increment_counts())
# update failed, we still have pending file with most recent redirect
# response Location header value added to data
async_path, async_data = self._check_async_file(async_dir)
self.assertEqual(orig_async_path, async_path)
self.assertEqual(
dict(orig_async_data, container_path='.shards_a/c_shard_newer',
redirect_history=['.shards_a/c_shard_new',
'.shards_a/c_shard_newer']),
async_data)
# next cycle, should get latest redirect from pickled async update
fake_responses = [(200, {})] * 3
fake_status_codes, fake_headers = zip(*fake_responses)
with mocked_http_conn(
*fake_status_codes, headers=fake_headers) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests, ts_obj, policies[0])
self.assertEqual(
['/sda1/%s/.shards_a/c_shard_newer/o' % newer_part] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'redirects': 2, 'successes': 1, 'unlinks': 1,
'async_pendings': 1},
daemon.logger.get_increment_counts())
self.assertFalse(os.listdir(async_dir)) # no async file
def test_obj_put_async_update_redirection_loop(self):
policies = list(POLICIES)
random.shuffle(policies)
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
os.mkdir(async_dir)
dfmanager = DiskFileManager(conf, daemon.logger)
ts_obj = next(self.ts_iter)
self._write_async_update(dfmanager, ts_obj, policies[0])
orig_async_path, orig_async_data = self._check_async_file(async_dir)
# run once
ts_redirect = next(self.ts_iter)
resp_headers_1 = {'Location': '/.shards_a/c_shard_1/o',
'X-Backend-Redirect-Timestamp': ts_redirect.internal}
resp_headers_2 = {'Location': '/.shards_a/c_shard_2/o',
'X-Backend-Redirect-Timestamp': ts_redirect.internal}
fake_responses = (
# 1st round of redirects, 2nd round of redirects
[(301, resp_headers_1)] * 3 + [(301, resp_headers_2)] * 3)
fake_status_codes, fake_headers = zip(*fake_responses)
with mocked_http_conn(
*fake_status_codes, headers=fake_headers) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests[:3], ts_obj, policies[0])
self._check_update_requests(conn.requests[3:], ts_obj, policies[0])
# only *one* set of redirected requests is attempted per cycle
root_part = daemon.container_ring.get_part('a/c')
shard_1_part = daemon.container_ring.get_part('.shards_a/c_shard_1')
shard_2_part = daemon.container_ring.get_part('.shards_a/c_shard_2')
shard_3_part = daemon.container_ring.get_part('.shards_a/c_shard_3')
self.assertEqual(['/sda1/%s/a/c/o' % root_part] * 3 +
['/sda1/%s/.shards_a/c_shard_1/o' % shard_1_part] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'redirects': 2, 'async_pendings': 1},
daemon.logger.get_increment_counts())
# update failed, we still have pending file with most recent redirect
# response Location header value added to data
async_path, async_data = self._check_async_file(async_dir)
self.assertEqual(orig_async_path, async_path)
self.assertEqual(
dict(orig_async_data, container_path='.shards_a/c_shard_2',
redirect_history=['.shards_a/c_shard_1',
'.shards_a/c_shard_2']),
async_data)
# next cycle, more redirects! first is to previously visited location
resp_headers_3 = {'Location': '/.shards_a/c_shard_3/o',
'X-Backend-Redirect-Timestamp': ts_redirect.internal}
fake_responses = (
# 1st round of redirects, 2nd round of redirects
[(301, resp_headers_1)] * 3 + [(301, resp_headers_3)] * 3)
fake_status_codes, fake_headers = zip(*fake_responses)
with mocked_http_conn(
*fake_status_codes, headers=fake_headers) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests[:3], ts_obj, policies[0])
self._check_update_requests(conn.requests[3:], ts_obj, policies[0])
# first try the previously persisted container path, response to that
# creates a loop so ignore and send to root
self.assertEqual(
['/sda1/%s/.shards_a/c_shard_2/o' % shard_2_part] * 3 +
['/sda1/%s/a/c/o' % root_part] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'redirects': 4, 'async_pendings': 1},
daemon.logger.get_increment_counts())
# update failed, we still have pending file with most recent redirect
# response Location header value from root added to persisted data
async_path, async_data = self._check_async_file(async_dir)
self.assertEqual(orig_async_path, async_path)
# note: redirect_history was reset when falling back to root
self.assertEqual(
dict(orig_async_data, container_path='.shards_a/c_shard_3',
redirect_history=['.shards_a/c_shard_3']),
async_data)
# next cycle, more redirects! first is to a location visited previously
# but not since last fall back to root, so that location IS tried;
# second is to a location visited since last fall back to root so that
# location is NOT tried
fake_responses = (
# 1st round of redirects, 2nd round of redirects
[(301, resp_headers_1)] * 3 + [(301, resp_headers_3)] * 3)
fake_status_codes, fake_headers = zip(*fake_responses)
with mocked_http_conn(
*fake_status_codes, headers=fake_headers) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests, ts_obj, policies[0])
self.assertEqual(
['/sda1/%s/.shards_a/c_shard_3/o' % shard_3_part] * 3 +
['/sda1/%s/.shards_a/c_shard_1/o' % shard_1_part] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'redirects': 6, 'async_pendings': 1},
daemon.logger.get_increment_counts())
# update failed, we still have pending file, but container_path is None
# because most recent redirect location was a repeat
async_path, async_data = self._check_async_file(async_dir)
self.assertEqual(orig_async_path, async_path)
self.assertEqual(
dict(orig_async_data, container_path=None,
redirect_history=[]),
async_data)
# next cycle, persisted container path is None so update should go to
# root, this time it succeeds
fake_responses = [(200, {})] * 3
fake_status_codes, fake_headers = zip(*fake_responses)
with mocked_http_conn(
*fake_status_codes, headers=fake_headers) as conn:
with mock.patch('swift.obj.updater.dump_recon_cache'):
daemon.run_once()
self._check_update_requests(conn.requests, ts_obj, policies[0])
self.assertEqual(['/sda1/%s/a/c/o' % root_part] * 3,
[req['path'] for req in conn.requests])
self.assertEqual(
{'redirects': 6, 'successes': 1, 'unlinks': 1,
'async_pendings': 1},
daemon.logger.get_increment_counts())
self.assertFalse(os.listdir(async_dir)) # no async file
if __name__ == '__main__':
unittest.main()
| swiftstack/swift | test/unit/obj/test_updater.py | Python | apache-2.0 | 49,085 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.conf import settings
from django.core.urlresolvers import reverse
from glance.common import exception as glance_exception
from openstackx.api import exceptions as api_exceptions
from novaclient import exceptions as novaclient_exceptions
from novaclient.v1_1 import security_group_rules as nova_rules
from mox import IgnoreArg, IsA
from horizon import api
from horizon import test
from .tables import SecurityGroupsTable, RulesTable
SECGROUP_ID = '2'
INDEX_URL = reverse('horizon:nova:access_and_security:index')
SG_CREATE_URL = \
reverse('horizon:nova:access_and_security:security_groups:create')
SG_EDIT_RULE_URL = \
reverse('horizon:nova:access_and_security:security_groups:edit_rules',
args=[SECGROUP_ID])
def strip_absolute_base(uri):
return uri.split(settings.TESTSERVER, 1)[-1]
class SecurityGroupsViewTests(test.BaseViewTests):
def setUp(self):
super(SecurityGroupsViewTests, self).setUp()
sg1 = api.SecurityGroup(None)
sg1.id = 1
sg1.name = 'default'
sg2 = api.SecurityGroup(None)
sg2.id = 2
sg2.name = 'group_2'
rule = {'id': 1,
'ip_protocol': u"tcp",
'from_port': "80",
'to_port': "80",
'parent_group_id': "2",
'ip_range': {'cidr': "0.0.0.0/32"}}
manager = nova_rules.SecurityGroupRuleManager
rule_obj = nova_rules.SecurityGroupRule(manager, rule)
self.rules = [rule_obj]
sg1.rules = self.rules
sg2.rules = self.rules
self.security_groups = (sg1, sg2)
def test_create_security_groups_get(self):
res = self.client.get(SG_CREATE_URL)
self.assertTemplateUsed(res,
'nova/access_and_security/security_groups/create.html')
def test_create_security_groups_post(self):
SECGROUP_NAME = 'fakegroup'
SECGROUP_DESC = 'fakegroup_desc'
new_group = self.mox.CreateMock(api.SecurityGroup)
new_group.name = SECGROUP_NAME
formData = {'method': 'CreateGroup',
'tenant_id': self.TEST_TENANT,
'name': SECGROUP_NAME,
'description': SECGROUP_DESC,
}
self.mox.StubOutWithMock(api, 'security_group_create')
api.security_group_create(IsA(http.HttpRequest),
SECGROUP_NAME, SECGROUP_DESC).AndReturn(new_group)
self.mox.ReplayAll()
res = self.client.post(SG_CREATE_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_create_security_groups_post_exception(self):
SECGROUP_NAME = 'fakegroup'
SECGROUP_DESC = 'fakegroup_desc'
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
formData = {'method': 'CreateGroup',
'tenant_id': self.TEST_TENANT,
'name': SECGROUP_NAME,
'description': SECGROUP_DESC,
}
self.mox.StubOutWithMock(api, 'security_group_create')
api.security_group_create(IsA(http.HttpRequest),
SECGROUP_NAME, SECGROUP_DESC).AndRaise(exception)
self.mox.ReplayAll()
res = self.client.post(SG_CREATE_URL, formData)
self.assertTemplateUsed(res,
'nova/access_and_security/security_groups/create.html')
def test_edit_rules_get(self):
self.mox.StubOutWithMock(api, 'security_group_get')
api.security_group_get(IsA(http.HttpRequest), SECGROUP_ID).AndReturn(
self.security_groups[1])
self.mox.ReplayAll()
res = self.client.get(SG_EDIT_RULE_URL)
self.assertTemplateUsed(res,
'nova/access_and_security/security_groups/edit_rules.html')
self.assertItemsEqual(res.context['security_group'].name,
self.security_groups[1].name)
def test_edit_rules_get_exception(self):
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
self.mox.StubOutWithMock(api, 'security_group_get')
api.security_group_get(IsA(http.HttpRequest), SECGROUP_ID) \
.AndRaise(exception)
self.mox.ReplayAll()
res = self.client.get(SG_EDIT_RULE_URL)
self.assertRedirects(res, INDEX_URL)
def test_edit_rules_add_rule(self):
RULE_ID = '1'
FROM_PORT = '-1'
TO_PORT = '-1'
IP_PROTOCOL = 'icmp'
CIDR = '0.0.0.0/0'
new_rule = self.mox.CreateMock(api.SecurityGroup)
new_rule.from_port = FROM_PORT
new_rule.to_port = TO_PORT
new_rule.ip_protocol = IP_PROTOCOL
new_rule.cidr = CIDR
new_rule.security_group_id = SECGROUP_ID
new_rule.id = RULE_ID
formData = {'method': 'AddRule',
'tenant_id': self.TEST_TENANT,
'security_group_id': SECGROUP_ID,
'from_port': FROM_PORT,
'to_port': TO_PORT,
'ip_protocol': IP_PROTOCOL,
'cidr': CIDR}
self.mox.StubOutWithMock(api, 'security_group_rule_create')
api.security_group_rule_create(IsA(http.HttpRequest),
SECGROUP_ID, IP_PROTOCOL, FROM_PORT, TO_PORT, CIDR)\
.AndReturn(new_rule)
self.mox.ReplayAll()
res = self.client.post(SG_EDIT_RULE_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_edit_rules_add_rule_exception(self):
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
RULE_ID = '1'
FROM_PORT = '-1'
TO_PORT = '-1'
IP_PROTOCOL = 'icmp'
CIDR = '0.0.0.0/0'
formData = {'method': 'AddRule',
'tenant_id': self.TEST_TENANT,
'security_group_id': SECGROUP_ID,
'from_port': FROM_PORT,
'to_port': TO_PORT,
'ip_protocol': IP_PROTOCOL,
'cidr': CIDR}
self.mox.StubOutWithMock(api, 'security_group_rule_create')
api.security_group_rule_create(IsA(http.HttpRequest),
SECGROUP_ID, IP_PROTOCOL, FROM_PORT,
TO_PORT, CIDR).AndRaise(exception)
self.mox.ReplayAll()
res = self.client.post(SG_EDIT_RULE_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_edit_rules_delete_rule(self):
RULE_ID = 1
self.mox.StubOutWithMock(api, 'security_group_rule_delete')
api.security_group_rule_delete(IsA(http.HttpRequest), RULE_ID)
self.mox.ReplayAll()
form_data = {"action": "rules__delete__%s" % RULE_ID}
req = self.factory.post(SG_EDIT_RULE_URL, form_data)
table = RulesTable(req, self.rules)
handled = table.maybe_handle()
self.assertEqual(strip_absolute_base(handled['location']),
INDEX_URL)
def test_edit_rules_delete_rule_exception(self):
RULE_ID = 1
self.mox.StubOutWithMock(api, 'security_group_rule_delete')
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
api.security_group_rule_delete(IsA(http.HttpRequest), RULE_ID) \
.AndRaise(exception)
self.mox.ReplayAll()
form_data = {"action": "rules__delete__%s" % RULE_ID}
req = self.factory.post(SG_EDIT_RULE_URL, form_data)
table = RulesTable(req, self.rules)
handled = table.maybe_handle()
self.assertEqual(strip_absolute_base(handled['location']),
INDEX_URL)
def test_delete_group(self):
self.mox.StubOutWithMock(api, 'security_group_delete')
api.security_group_delete(IsA(http.HttpRequest), 2)
self.mox.ReplayAll()
form_data = {"action": "security_groups__delete__%s" % '2'}
req = self.factory.post(INDEX_URL, form_data)
table = SecurityGroupsTable(req, self.security_groups)
handled = table.maybe_handle()
self.assertEqual(strip_absolute_base(handled['location']),
INDEX_URL)
def test_delete_group_exception(self):
self.mox.StubOutWithMock(api, 'security_group_delete')
exception = novaclient_exceptions.ClientException('ClientException',
message='ClientException')
api.security_group_delete(IsA(http.HttpRequest), 2).\
AndRaise(exception)
self.mox.ReplayAll()
form_data = {"action": "security_groups__delete__%s" % '2'}
req = self.factory.post(INDEX_URL, form_data)
table = SecurityGroupsTable(req, self.security_groups)
handled = table.maybe_handle()
self.assertEqual(strip_absolute_base(handled['location']),
INDEX_URL)
| citrix-openstack/horizon | horizon/horizon/dashboards/nova/access_and_security/security_groups/tests.py | Python | apache-2.0 | 10,202 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from heat_integrationtests.functional import functional_base
LOG = logging.getLogger(__name__)
class LoadBalancerv2Test(functional_base.FunctionalTestsBase):
create_template = '''
heat_template_version: 2016-04-08
resources:
loadbalancer:
type: OS::Neutron::LBaaS::LoadBalancer
properties:
description: aLoadBalancer
vip_subnet: private-subnet
listener:
type: OS::Neutron::LBaaS::Listener
properties:
description: aListener
loadbalancer: { get_resource: loadbalancer }
protocol: HTTP
protocol_port: 80
connection_limit: 5555
pool:
type: OS::Neutron::LBaaS::Pool
properties:
description: aPool
lb_algorithm: ROUND_ROBIN
protocol: HTTP
listener: { get_resource: listener }
poolmember:
type: OS::Neutron::LBaaS::PoolMember
properties:
address: 1.1.1.1
pool: { get_resource: pool }
protocol_port: 1111
subnet: private-subnet
weight: 255
# pm2
healthmonitor:
type: OS::Neutron::LBaaS::HealthMonitor
properties:
delay: 3
type: HTTP
timeout: 3
max_retries: 3
pool: { get_resource: pool }
outputs:
loadbalancer:
value: { get_attr: [ loadbalancer, show ] }
pool:
value: { get_attr: [ pool, show ] }
poolmember:
value: { get_attr: [ poolmember, show ] }
listener:
value: { get_attr: [ listener, show ] }
healthmonitor:
value: { get_attr: [ healthmonitor, show ] }
'''
add_member = '''
poolmember2:
type: OS::Neutron::LBaaS::PoolMember
properties:
address: 2.2.2.2
pool: { get_resource: pool }
protocol_port: 2222
subnet: private-subnet
weight: 222
'''
def setUp(self):
super(LoadBalancerv2Test, self).setUp()
if not self.is_network_extension_supported('lbaasv2'):
self.skipTest('LBaasv2 extension not available, skipping')
def test_create_update_loadbalancer(self):
stack_identifier = self.stack_create(template=self.create_template)
stack = self.client.stacks.get(stack_identifier)
output = self._stack_output(stack, 'loadbalancer')
self.assertEqual('ONLINE', output['operating_status'])
template = self.create_template.replace('ROUND_ROBIN', 'SOURCE_IP')
template = template.replace('3', '6')
template = template.replace('255', '256')
template = template.replace('5555', '7777')
template = template.replace('aLoadBalancer', 'updatedLoadBalancer')
template = template.replace('aPool', 'updatedPool')
template = template.replace('aListener', 'updatedListener')
self.update_stack(stack_identifier, template=template)
stack = self.client.stacks.get(stack_identifier)
output = self._stack_output(stack, 'loadbalancer')
self.assertEqual('ONLINE', output['operating_status'])
self.assertEqual('updatedLoadBalancer', output['description'])
output = self._stack_output(stack, 'pool')
self.assertEqual('SOURCE_IP', output['lb_algorithm'])
self.assertEqual('updatedPool', output['description'])
output = self._stack_output(stack, 'poolmember')
self.assertEqual(256, output['weight'])
output = self._stack_output(stack, 'healthmonitor')
self.assertEqual(6, output['delay'])
self.assertEqual(6, output['timeout'])
self.assertEqual(6, output['max_retries'])
output = self._stack_output(stack, 'listener')
self.assertEqual(7777, output['connection_limit'])
self.assertEqual('updatedListener', output['description'])
def test_add_delete_poolmember(self):
stack_identifier = self.stack_create(template=self.create_template)
stack = self.client.stacks.get(stack_identifier)
output = self._stack_output(stack, 'loadbalancer')
self.assertEqual('ONLINE', output['operating_status'])
output = self._stack_output(stack, 'pool')
self.assertEqual(1, len(output['members']))
# add pool member
template = self.create_template.replace('# pm2', self.add_member)
self.update_stack(stack_identifier, template=template)
stack = self.client.stacks.get(stack_identifier)
output = self._stack_output(stack, 'loadbalancer')
self.assertEqual('ONLINE', output['operating_status'])
output = self._stack_output(stack, 'pool')
self.assertEqual(2, len(output['members']))
# delete pool member
self.update_stack(stack_identifier, template=self.create_template)
stack = self.client.stacks.get(stack_identifier)
output = self._stack_output(stack, 'loadbalancer')
self.assertEqual('ONLINE', output['operating_status'])
output = self._stack_output(stack, 'pool')
self.assertEqual(1, len(output['members']))
| steveb/heat | heat_integrationtests/functional/test_lbaasv2.py | Python | apache-2.0 | 5,460 |
import yaml
import os
from utils import to_bool
config_path = os.environ.get('SIXPACK_CONFIG', None)
if config_path:
try:
CONFIG = yaml.safe_load(open(config_path, 'r'))
except IOError:
raise RuntimeError('SIXPACK_CONFIG - {0} - is an invalid path'.format(config_path))
except yaml.YAMLError, exc:
raise RuntimeError('Error in configuration file: {0}'.format(str(exc)))
else:
CONFIG = {
'enabled': to_bool(os.environ.get('SIXPACK_CONFIG_ENABLED', 'True')),
'redis_port': int(os.environ.get('SIXPACK_CONFIG_REDIS_PORT', '6379')),
'redis_host': os.environ.get('SIXPACK_CONFIG_REDIS_HOST', "localhost"),
'redis_password': os.environ.get('SIXPACK_CONFIG_REDIS_PASSWORD', None),
'redis_prefix': os.environ.get('SIXPACK_CONFIG_REDIS_PREFIX', "sxp"),
'redis_socket_timeout': os.environ.get('SIXPACK_CONFIG_REDIS_SOCKET_TIMEOUT', None),
'redis_sentinel_service_name': os.environ.get('SIXPACK_CONFIG_REDIS_SENTINEL_SERVICE_NAME', None),
'redis_max_connections': int(os.environ.get('SIXPACK_CONFIG_REDIS_MAX_CONNECTIONS', '0')),
'redis_db': int(os.environ.get('SIXPACK_CONFIG_REDIS_DB', '15')),
'robot_regex': os.environ.get('SIXPACK_CONFIG_ROBOT_REGEX', "$^|trivial|facebook|MetaURI|butterfly|google|"
"amazon|goldfire|sleuth|xenu|msnbot|SiteUptime|"
"Slurp|WordPress|ZIBB|ZyBorg|pingdom|bot|yahoo|"
"slurp|java|fetch|spider|url|crawl|oneriot|abby|"
"commentreader|twiceler"),
'ignored_ip_addresses':os.environ.get('SIXPACK_CONFIG_IGNORE_IPS', "").split(","),
'asset_path':os.environ.get('SIXPACK_CONFIG_ASSET_PATH', "gen"),
'secret_key':os.environ.get('SIXPACK_CONFIG_SECRET', 'temp'),
'csrf_disable':os.environ.get('SIXPACK_CONFIG_CSRF_DISABLE', False)
}
if 'SIXPACK_CONFIG_REDIS_SENTINELS' in os.environ:
sentinels = []
for sentinel in os.environ['SIXPACK_CONFIG_REDIS_SENTINELS'].split(","):
server,port = sentinel.split(":")
sentinels.append([server, int(port)])
CONFIG['redis_sentinels'] = sentinels
| blackskad/sixpack | sixpack/config.py | Python | bsd-2-clause | 2,380 |
"""
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import smap, smap.archiver
from smap.archiver import stream
def help(topic=None):
if not topic:
return ("""Welcome to the archiver query language.
sMAP version: %s
sMAP archiver version: %s
We know about the following operators: """ % (
smap.__version__,
smap.archiver.__version__))+\
', '.join(sorted(stream.installed_ops.keys()))
elif topic in stream.installed_ops:
opdocs = stream.installed_ops[topic].__doc__
if opdocs == None and hasattr(stream.installed_ops[topic],
'base_operator'):
opdocs = stream.installed_ops[topic].base_operator.__doc__
if opdocs == None:
return "Operator '%s' is installed but we don't have any documentation for it" % topic
else:
return opdocs
else:
return "No help topic found for " + topic
| jf87/smap | python/smap/archiver/help.py | Python | bsd-2-clause | 2,264 |
#!/usr/bin/python
import sys
import serial
import glob
from traits.api import (HasTraits, Int, Str, List, Float, Instance, Bool, Tuple,
Either, Range, Property, ReadOnly, cached_property, on_trait_change)
class Fixture(HasTraits):
base = ReadOnly
slots = Property(depends_on='-ignore', ignore=True)
class DimmableColor(Fixture):
red = Property(lambda self: self.color[0])
green = Property(lambda self: self.color[1])
blue = Property(lambda self: self.color[2])
color = Tuple(Float, Float, Float)
intensity = Float
class MovingHead(Fixture):
pan = Float
tilt = Float
speed = Float(1)
class Gobo(Fixture):
gobo = Int
shake = Float
class Strobe(Fixture):
strobe = Float
class DMXController(HasTraits):
port = Either(None, Str)
tty = Property(depends_on='port')
fixtures = List(Instance(Fixture))
enabled = Bool
debug = Bool
channels = Range(value=128, low=1, high=512)
_slots = List(Int)
slots = Property(lambda self: self._slots)
def _port_default(self):
ports = glob.glob('/dev/tty.usb*') + glob.glob('/dev/ttyACM*')
if ports:
return ports[0]
@cached_property
def _get_tty(self):
if self.port:
return serial.Serial(self.port, 115200)
def write(self, *args):
command = ''.join(str(arg) for arg in args) + '\r\n'
if self.debug:
sys.stdout.write(command)
if self.tty:
self.tty.write(command)
self.tty.flush()
def _channels_changed(self):
self.write('m', self.channels)
@on_trait_change('enabled,fixtures,fixtures_items,fixtures:slots')
def _update(self, obj, name, old, new):
if name == 'enabled':
self.write('e', 1 if self.enabled else 0)
self._slots = []
if self.enabled:
slots = [0] * 512
max_channel = 0
for fixture in self.fixtures:
for i, value in enumerate(fixture.slots):
channel = fixture.base + i
slots[channel - 1] = min(255, max(0, value))
if channel > max_channel:
max_channel = channel
if max_channel > self.channels:
self.channels = max_channel
old_slots = self._slots or [0] * 512
for channel, value in enumerate(slots):
if value != old_slots[channel]:
self.write('c', channel + 1)
self.write('w', value)
self._slots = slots
class EuroliteMovingHeadWash(MovingHead, Strobe, DimmableColor):
def _get_slots(self):
pan = int((self.pan + 270.) / 540. * 65535)
tilt = int((self.tilt + 90.) / 180. * 65535)
strobe = 31 + int(self.strobe * 169) if self.strobe else 255
return [pan >> 8, tilt >> 8, int((1. - self.speed) * 255),
int(self.red * 255), int(self.green * 255), int(self.blue * 255),
0, strobe, int(self.intensity * 255), pan & 255, tilt & 255]
class VarytecMovingHeadSpot(MovingHead, Strobe, DimmableColor, Gobo):
def _get_slots(self):
pan = int((self.pan + 270.) / 540. * 65535)
tilt = int((self.tilt + 105.) / 210. * 65535)
if self.strobe:
dimmer = 135 + int(self.strobe * 104)
else:
dimmer = 0 if self.intensity < (1./127) else 135 - int(self.intensity * 127)
return [pan >> 8, pan & 255, tilt >> 8, tilt & 255, int((1. - self.speed) * 255),
dimmer, int(self.red * 255), int(self.green * 255), int(self.blue * 255),
0, 0, 0, self.gobo * 8 if not self.shake or not self.gobo else 65 + self.gobo * 15 + int(self.shake * 14)]
if __name__ == '__main__':
wash = EuroliteMovingHeadWash(base=1, color=(1, 0, 0.5), intensity=1)
controller = DMXController(debug=True, fixtures=[wash])
controller.enabled = True
| jonathanhogg/coderdojo-sequencer | light_controller/dmx.py | Python | bsd-2-clause | 3,970 |
from __future__ import with_statement
from __future__ import print_function
import os
from os import path
import re
from blazeutils.helpers import pprint
import six
from werkzeug.serving import run_simple
from werkzeug import Client
from werkzeug.wrappers.base_response import BaseResponse
from blazeweb.globals import ag, settings
from blazeweb.hierarchy import list_component_mappings
from blazeweb.paster_tpl import run_template
from blazeweb.tasks import run_tasks
from blazeweb.utils.filesystem import copy_static_files
import paste.script.command as pscmd
"""
BLAZEWEB COMMANDS FIRST
"""
class ProjectCommand(pscmd.Command):
summary = "Create a project layout using a pre-defined template"
usage = "APP_NAME"
group_name = ""
min_args = 1
max_args = 1
parser = pscmd.Command.standard_parser(verbose=False)
parser.add_option(
'-t', '--template',
dest='template',
default='bwproject',
help="The pre-defined template to use"
)
parser.add_option(
'--no-interactive',
dest='interactive',
action='store_false',
default=True,
)
parser.add_option(
'--verbose',
dest='verbose',
action='store_true',
default=False
)
parser.add_option(
'--no-overwrite',
dest='overwrite',
action='store_false',
default=True
)
def command(self):
projname = self.args[0]
output_dir = path.join(os.getcwd(), '%s-dist' % projname)
vars = {'project': projname,
'package': projname,
}
try:
run_template(
self.options.interactive,
self.options.verbose,
self.options.overwrite,
vars,
output_dir,
self.options.template,
'blazeweb_project_template'
)
except TypeError as e:
if not six.PY2 and 'bytes' in str(e):
print('ERROR: project command unavailable for python 3 due to '
'problem in paste library')
return
raise
"""
Now Application Specific Commands
"""
class ServeCommand(pscmd.Command):
# Parser configuration
summary = "Serve the application by starting a development http server"
usage = ""
parser = pscmd.Command.standard_parser(verbose=False)
parser.add_option(
'-a', '--address',
dest='address',
default='localhost',
help="IP address or hostname to serve from"
)
parser.add_option(
'-P', '--port',
dest='port',
default=5000,
type='int'
)
parser.add_option(
'--no-reloader',
dest='reloader',
action='store_false',
default=True,
)
parser.add_option(
'--with-debugger',
dest='debugger',
action='store_true',
default=False,
)
parser.add_option(
'--with-evalex',
dest='evalex',
action='store_true',
default=False,
)
parser.add_option(
'--with-threaded',
dest='threaded',
action='store_true',
default=False,
)
parser.add_option(
'--processes',
dest='processes',
default=1,
type='int',
help='number of processes to use'
)
parser.add_option(
'--reloader-interval',
dest='reloader_interval',
default=1,
type='int',
)
parser.add_option(
'--pass-through-errors',
dest='pass_through_errors',
action='store_true',
default=False,
)
def command(self):
if settings.logs.enabled:
# our logging conflicts with werkzeug's, see issue #13
# this is to give some visual feedback that the server did in fact start
print(' * Serving on http://%s:%s/' % (self.options.address, self.options.port))
run_simple(
self.options.address,
self.options.port,
self.wsgiapp,
use_reloader=self.options.reloader,
use_debugger=self.options.debugger,
use_evalex=self.options.evalex,
extra_files=None,
reloader_interval=self.options.reloader_interval,
threaded=self.options.threaded,
processes=self.options.processes,
passthrough_errors=self.options.pass_through_errors,
)
class TestRunCommand(pscmd.Command):
# Parser configuration
summary = "runs a single request through the application"
usage = "URL"
min_args = 0
max_args = 1
parser = pscmd.Command.standard_parser(verbose=False)
parser.add_option(
'--silent',
dest='silent',
action='store_true',
default=False,
)
parser.add_option(
'--no-headers',
dest='show_headers',
action='store_false',
default=True,
)
parser.add_option(
'--no-body',
dest='show_body',
action='store_false',
default=True,
)
def command(self):
options = self.options
c = Client(self.wsgiapp, BaseResponse)
if self.args:
url = self.args[0]
else:
url = '/'
resp = c.get(url)
if options.show_headers and not options.silent:
print(resp.status)
print(resp.headers)
if options.show_body and not options.silent:
for respstr in resp.response:
if isinstance(respstr, six.binary_type):
respstr = respstr.decode()
print(respstr)
class TasksCommand(pscmd.Command):
# Parser configuration
summary = "runs task(s)"
usage = "TASK [TASK [TASK [...]]]"
min_args = 1
parser = pscmd.Command.standard_parser(verbose=False)
parser.add_option(
'-t', '--test-only',
dest='test_only',
action='store_true',
default=False,
)
def command(self):
run_tasks(self.args, test_only=self.options.test_only)
class ShellCommand(pscmd.Command):
# Parser configuration
summary = "run a shell with an application initialized"
usage = ""
min_args = 0
max_args = 0
parser = pscmd.Command.standard_parser(verbose=False)
parser.add_option(
'--ipython',
dest='use_ipython',
action='store_true',
default=False,
help='use IPython'
)
def command(self):
# set what will already be in the namespace for the shell. Saves us from
# typing common import statements
shell_namespace = {
'ag': ag._current_obj(),
'settings': settings._current_obj()
}
shell_act = make_shell(lambda: shell_namespace, 'blazeweb Interactive Shell')
shell_act(self.options.use_ipython)
class RoutesCommand(pscmd.Command):
# Parser configuration
summary = "prints out all routes configured for the application"
usage = ""
min_args = 0
max_args = 0
parser = pscmd.Command.standard_parser(verbose=False)
parser.add_option(
'-e', '--show-endpoints',
dest='show_endpoints',
action='store_true',
default=False,
help='Shows the mapped URL as well as the endpoint'
)
def command(self):
toprint = []
for rule in ag.route_map.iter_rules():
if self.options.show_endpoints:
toprint.append((rule.rule, rule.endpoint))
else:
toprint.append(rule.rule)
pprint(toprint)
class StaticCopyCommand(pscmd.Command):
# Parser configuration
summary = "copy's app and component static files to the designated location"
usage = ""
min_args = 0
max_args = 0
parser = pscmd.Command.standard_parser(verbose=False)
parser.add_option(
'-d', '--delete-existing',
dest='delete_existing',
action='store_true',
default=False,
help='Delete "app" and "component" directories in the destination if they exist'
)
def command(self):
copy_static_files(delete_existing=self.options.delete_existing)
print('\n - files/dirs copied succesfully\n')
class JinjaConvertCommand(pscmd.Command):
# Parser configuration
summary = "convert jinja delimiters from old style to new style"
usage = ""
min_args = 0
max_args = 0
parser = pscmd.Command.standard_parser(verbose=False)
def change_tags(self, contents):
contents = re.sub('<{', '{{', contents)
contents = re.sub('<%', '{%', contents)
contents = re.sub('<#', '{#', contents)
contents = re.sub('}>', '}}', contents)
contents = re.sub('%>', '%}', contents)
contents = re.sub('#>', '#}', contents)
return contents
def command(self):
print('converting:')
cwd = os.getcwd()
for fname in os.listdir(cwd):
if not fname.endswith('.html'):
continue
with open(fname, 'r') as fh:
contents = fh.read().decode('utf-8')
contents = self.change_tags(contents)
with open(fname, 'w') as fh:
fh.write(contents.encode('utf-8'))
print(' %s' % fname)
class ComponentMapCommand(pscmd.Command):
# Parser configuration
summary = "List the component map"
usage = ""
min_args = 0
max_args = 0
parser = pscmd.Command.standard_parser(verbose=False)
def command(self):
pprint(list_component_mappings(inc_apps=True))
def make_shell(init_func=None, banner=None, use_ipython=True):
"""Returns an action callback that spawns a new interactive
python shell.
:param init_func: an optional initialization function that is
called before the shell is started. The return
value of this function is the initial namespace.
:param banner: the banner that is displayed before the shell. If
not specified a generic banner is used instead.
:param use_ipython: if set to `True` ipython is used if available.
"""
if banner is None:
banner = 'Interactive Werkzeug Shell'
if init_func is None:
init_func = dict
def action(ipython=use_ipython):
"""Start a new interactive python session."""
namespace = init_func()
if ipython:
try:
try:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
sh = InteractiveShellEmbed(banner1=banner)
except ImportError:
from IPython.Shell import IPShellEmbed
sh = IPShellEmbed(banner=banner)
except ImportError:
pass
else:
sh(global_ns={}, local_ns=namespace)
return
from code import interact
interact(banner, local=namespace)
return action
| level12/blazeweb | blazeweb/commands.py | Python | bsd-3-clause | 11,047 |
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import inspect
import os
import re
import sys
from py_utils import camel_case
def DiscoverModules(start_dir, top_level_dir, pattern='*'):
"""Discover all modules in |start_dir| which match |pattern|.
Args:
start_dir: The directory to recursively search.
top_level_dir: The top level of the package, for importing.
pattern: Unix shell-style pattern for filtering the filenames to import.
Returns:
list of modules.
"""
# start_dir and top_level_dir must be consistent with each other.
start_dir = os.path.realpath(start_dir)
top_level_dir = os.path.realpath(top_level_dir)
modules = []
sub_paths = list(os.walk(start_dir))
# We sort the directories & file paths to ensure a deterministic ordering when
# traversing |top_level_dir|.
sub_paths.sort(key=lambda paths_tuple: paths_tuple[0])
for dir_path, _, filenames in sub_paths:
# Sort the directories to walk recursively by the directory path.
filenames.sort()
for filename in filenames:
# Filter out unwanted filenames.
if filename.startswith('.') or filename.startswith('_'):
continue
if os.path.splitext(filename)[1] != '.py':
continue
if not fnmatch.fnmatch(filename, pattern):
continue
# Find the module.
module_rel_path = os.path.relpath(
os.path.join(dir_path, filename), top_level_dir)
module_name = re.sub(r'[/\\]', '.', os.path.splitext(module_rel_path)[0])
# Import the module.
try:
# Make sure that top_level_dir is the first path in the sys.path in case
# there are naming conflict in module parts.
original_sys_path = sys.path[:]
sys.path.insert(0, top_level_dir)
module = __import__(module_name, fromlist=[True])
modules.append(module)
finally:
sys.path = original_sys_path
return modules
def AssertNoKeyConflicts(classes_by_key_1, classes_by_key_2):
for k in classes_by_key_1:
if k in classes_by_key_2:
assert classes_by_key_1[k] is classes_by_key_2[k], (
'Found conflicting classes for the same key: '
'key=%s, class_1=%s, class_2=%s' % (
k, classes_by_key_1[k], classes_by_key_2[k]))
# TODO(dtu): Normalize all discoverable classes to have corresponding module
# and class names, then always index by class name.
def DiscoverClasses(start_dir,
top_level_dir,
base_class,
pattern='*',
index_by_class_name=True,
directly_constructable=False):
"""Discover all classes in |start_dir| which subclass |base_class|.
Base classes that contain subclasses are ignored by default.
Args:
start_dir: The directory to recursively search.
top_level_dir: The top level of the package, for importing.
base_class: The base class to search for.
pattern: Unix shell-style pattern for filtering the filenames to import.
index_by_class_name: If True, use class name converted to
lowercase_with_underscores instead of module name in return dict keys.
directly_constructable: If True, will only return classes that can be
constructed without arguments
Returns:
dict of {module_name: class} or {underscored_class_name: class}
"""
modules = DiscoverModules(start_dir, top_level_dir, pattern)
classes = {}
for module in modules:
new_classes = DiscoverClassesInModule(
module, base_class, index_by_class_name, directly_constructable)
# TODO(nednguyen): we should remove index_by_class_name once
# benchmark_smoke_unittest in chromium/src/tools/perf no longer relied
# naming collisions to reduce the number of smoked benchmark tests.
# crbug.com/548652
if index_by_class_name:
AssertNoKeyConflicts(classes, new_classes)
classes = dict(classes.items() + new_classes.items())
return classes
# TODO(nednguyen): we should remove index_by_class_name once
# benchmark_smoke_unittest in chromium/src/tools/perf no longer relied
# naming collisions to reduce the number of smoked benchmark tests.
# crbug.com/548652
def DiscoverClassesInModule(module,
base_class,
index_by_class_name=False,
directly_constructable=False):
"""Discover all classes in |module| which subclass |base_class|.
Base classes that contain subclasses are ignored by default.
Args:
module: The module to search.
base_class: The base class to search for.
index_by_class_name: If True, use class name converted to
lowercase_with_underscores instead of module name in return dict keys.
Returns:
dict of {module_name: class} or {underscored_class_name: class}
"""
classes = {}
for _, obj in inspect.getmembers(module):
# Ensure object is a class.
if not inspect.isclass(obj):
continue
# Include only subclasses of base_class.
if not issubclass(obj, base_class):
continue
# Exclude the base_class itself.
if obj is base_class:
continue
# Exclude protected or private classes.
if obj.__name__.startswith('_'):
continue
# Include only the module in which the class is defined.
# If a class is imported by another module, exclude those duplicates.
if obj.__module__ != module.__name__:
continue
if index_by_class_name:
key_name = camel_case.ToUnderscore(obj.__name__)
else:
key_name = module.__name__.split('.')[-1]
if not directly_constructable or IsDirectlyConstructable(obj):
if key_name in classes and index_by_class_name:
assert classes[key_name] is obj, (
'Duplicate key_name with different objs detected: '
'key=%s, obj1=%s, obj2=%s' % (key_name, classes[key_name], obj))
else:
classes[key_name] = obj
return classes
def IsDirectlyConstructable(cls):
"""Returns True if instance of |cls| can be construct without arguments."""
assert inspect.isclass(cls)
if not hasattr(cls, '__init__'):
# Case |class A: pass|.
return True
if cls.__init__ is object.__init__:
# Case |class A(object): pass|.
return True
# Case |class (object):| with |__init__| other than |object.__init__|.
args, _, _, defaults = inspect.getargspec(cls.__init__)
if defaults is None:
defaults = ()
# Return true if |self| is only arg without a default.
return len(args) == len(defaults) + 1
_COUNTER = [0]
def _GetUniqueModuleName():
_COUNTER[0] += 1
return "module_" + str(_COUNTER[0])
| catapult-project/catapult-csm | common/py_utils/py_utils/discover.py | Python | bsd-3-clause | 6,734 |
'''
#;+
#; NAME:
#; radec
#; Version 1.1
#;
#; PURPOSE:
#; 2014 Written by JXP
#;-
#;------------------------------------------------------------------------------
'''
# Import libraries
import numpy as np
from astropy.table import QTable, Column, Table
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.units import Quantity
from xastropy.xutils import xdebug as xdb
# def stod1 :: Input one RA/DEC pair as strings and return RA/DEC in decimal degrees
# def to_coord :: Input RA/DEC in one of several formats and return SkyCoord
#### ###############################
# Main driver (return decimal values for inputted string)
def stod1(rads):
"""
Input RA/DEC as strings and return RA/DEC in decimal degrees
Parameters:
----------
rads:
tuple (RA, DEC as a string with colon format)
string (JXXXXXX.X+XXXXXX.X format)
Returns:
----------
rad: tuple (RA, DEC in decimal degrees with units)
"""
# JXXXXXXX+XXXXXX format
if len(rads) > 2:
i0 = rads.find('J')+1
isign = np.max( [rads.find('+'), rads.find('-')] )
# Parse
ra = np.array( [rads[i0:i0+2], rads[i0+2:i0+4], rads[i0+4:isign]], dtype='float')
dec = np.array( [rads[isign:isign+3], rads[isign+3:isign+5], rads[isign+5:]], dtype='float')
flg_neg = rads[isign] == '-'
else:
# Look for a colon
if rads[0].find(':') == -1:
# No colons
ra = np.array(rads[0].split(' '),dtype='float')
dec = np.array(rads[1].split(' '),dtype='float')
else:
ra = np.array(rads[0].split(':'),dtype='float')
dec = np.array(rads[1].split(':'),dtype='float')
# Goofy length
if len(ra) == 2:
ra = np.append(ra, [0.])
# Sign
#flg_neg = (rads[1].strip())[0] == '-'
flg_neg = '-' in rads[1].strip() # Handles $-$ too
#xdb.set_trace()
# RA
rad = (360./24.)*(ra[0] + ra[1]/60. + ra[2]/3600.)
# DEC
decd = abs(dec[0]) + dec[1]/60. + dec[2]/3600.
# Deal with negative sign
if flg_neg:
decd = -1. * decd
return rad*u.degree, decd*u.degree
#### ###############################
# Decimal degress or SkyCoord to string
def dtos1(irad, fmt=0):
'''
Converts a tuple of RA/DEC into J## format
Parameters
----------
rad: tuple (RA, DEC in decimal degrees [with units!]) or SkyCoord
fmt: int (0)
0: colon delimitered, e.g. '11:23:21.23', '+23:11:45.0'
1: J name, e.g. 'J112321.23+231145.0'
'''
# Get to SkyCoord
if type(irad) is SkyCoord:
coord = irad
else:
rad = list(irad)
for ii in range(2):
try:
rad[ii].to('degree')
except AttributeError:
rad[ii] = rad[ii] * u.degree
coord = to_coord(rad)
# String
if fmt == 0:
ras = coord.ra.to_string(unit=u.hour,sep=':',pad=True, precision=2)
decs = coord.dec.to_string(sep=':',pad=True, alwayssign=True, precision=1)
return str(ras), str(decs)
elif fmt == 1:
ras = coord.ra.to_string(unit=u.hour,sep='',pad=True, precision=2)
decs = coord.dec.to_string(sep='',pad=True, alwayssign=True, precision=1)
return str('J'+ras+decs)
#### ###############################
# Add RA, DEC to a Table
def stod_table(table):
''' Converts RAS, DECS columns in a QTable to RA, DEC
Parameters
----------
table: QTable (needs to handle units)
'''
if not isinstance(table,QTable):
raise TypeError('radec.stod_table: table needs to be a QTable')
# Generate Columns (as needed)
for card in ['RA','DEC']:
if card not in table.dtype.names:
table.add_column( Column( np.zeros(len(table)), name=card, unit=u.degree) )
# Loop on rows
for k,row in enumerate(table):
try:
rad, decd = stod1( (row['RAS'], row['DECS']) )
except KeyError:
rad, decd = stod1( (row['RA'], row['DEC']) )
table['RA'][k] = rad
table['DEC'][k] = decd
#### ###############################
# String to decimal degress
def stod(in_rads, radec=None):
import radec as x_r
from astropy.table import Table
x_r.stod_table(in_rads)
#### ###############################
# Main conversion
def to_coord(irad):
"""Input RA/DEC as a tuple, str or SkyCoord and return a SkyCoord
Parameters:
--------------
irad: tuple, str, or SkyCoord
Input RA/DEC
tuple: (float,float), (deg,deg), or (str,str)
e.g. (213.221,45.222), (213.221*u.deg,45.222*u.deg),
('00:23:23.1','-23:11:02.3')
Returns:
---------
SkyCoord object of that RA/DEC
"""
# SkyCoord
if type(irad) is SkyCoord:
return irad
if not type(irad) in [tuple,list]:
if len(irad) < 10:
raise TypeError('radec.to_coord: Expecting JXXXXXX.X+XXXXXX.X format')
else:
if len(irad) != 2:
raise TypeError('radec.to_coord: Requires length two (RA,DEC)')
# String?
if isinstance(irad[0],basestring):
rad = stod1(irad)
elif type(irad[0]) is Quantity:
rad = irad
else: # Assuming two floats
rad = [iirad*u.degree for iirad in irad]
# Return
return SkyCoord(ra=rad[0], dec=rad[1])
#### ###############################
# Offsets
def offsets(irad1, irad2, verbose=True):
"""
Input a pair of RA/DEC and calculate the RA/DEC offsets between them
Parameters:
----------
irad1 : RA/DEC of source 1 (origin)
irad2 : RA/DEC of source 2 (destination)
verbose: bool, optional
Returns:
-------
offsets, PA : Tuple of offsets (itself a Tuple in arcsec) and Position Angle (degrees)
"""
# Convert to SkyCoord
coord1 = to_coord(irad1)
coord2 = to_coord(irad2)
# Angular separation
sep = coord1.separation(coord2).to('arcsec')
# PA
PA = coord1.position_angle(coord2)
# RA/DEC
dec_off = np.cos(PA) * sep # arcsec
ra_off = np.sin(PA) * sep # arcsec (East is *higher* RA)
# Print
if verbose:
print('RA Offset from 1 to 2 is {:g}'.format(ra_off))
print('DEC Offset from 1 to 2 is {:g}'.format(dec_off))
print('PA = {:g}'.format(PA.degree*u.degree))
# Return
return (ra_off, dec_off), PA.degree * u.degree
| profxj/xastropy | xastropy/obs/radec.py | Python | bsd-3-clause | 6,434 |
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
from contextlib import contextmanager
import os
import sys
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from rospkg import RosPack, RosStack
def get_test_dir():
return os.path.abspath(os.path.dirname(__file__))
def get_cache_dir():
p = os.path.join(get_test_dir(), 'sources_cache')
assert os.path.isdir(p)
return p
def create_test_SourcesListLoader():
from rosdep2.sources_list import SourcesListLoader
return SourcesListLoader.create_default(sources_cache_dir=get_cache_dir(), verbose=True)
def get_test_tree_dir():
return os.path.abspath(os.path.join(os.path.dirname(__file__), 'tree'))
def get_test_rospkgs():
test_dir = get_test_tree_dir()
ros_root = os.path.join(test_dir, 'ros')
ros_package_path = os.path.join(test_dir, 'stacks')
ros_paths = [ros_root, ros_package_path]
rospack = RosPack(ros_paths=ros_paths)
rosstack = RosStack(ros_paths=ros_paths)
return rospack, rosstack
def test_InstallerContext_ctor():
from rosdep2.installers import InstallerContext
from rospkg.os_detect import OsDetect
context = InstallerContext()
assert context.get_os_detect() is not None
assert isinstance(context.get_os_detect(), OsDetect)
detect = OsDetect()
context = InstallerContext(detect)
assert context.get_os_detect() == detect
assert len(context.get_installer_keys()) == 0
assert len(context.get_os_keys()) == 0
context.verbose = True
assert context.get_os_detect() == detect
assert len(context.get_installer_keys()) == 0
assert len(context.get_os_keys()) == 0
def test_InstallerContext_get_os_version_type():
from rospkg.os_detect import OS_UBUNTU, OsDetect
from rosdep2.installers import InstallerContext
context = InstallerContext()
try:
context.set_os_version_type(OS_UBUNTU, 'bad')
assert False, 'should check type'
except ValueError:
pass
assert OsDetect.get_version == context.get_os_version_type(OS_UBUNTU)
context.set_os_version_type(OS_UBUNTU, OsDetect.get_codename)
assert OsDetect.get_codename == context.get_os_version_type(OS_UBUNTU)
def test_InstallerContext_os_version_and_name():
from rospkg.os_detect import OsDetect
from rosdep2.installers import InstallerContext
context = InstallerContext()
context.set_verbose(True)
os_name, os_version = context.get_os_name_and_version()
assert os_name is not None
assert os_version is not None
val = ('fakeos', 'blah')
context.set_os_override(*val)
assert val == context.get_os_name_and_version()
from mock import Mock
os_detect_mock = Mock(spec=OsDetect)
os_detect_mock.get_name.return_value = 'fakeos'
os_detect_mock.get_version.return_value = 'fakeos-version'
os_detect_mock.get_codename.return_value = 'fakeos-codename'
context = InstallerContext(os_detect_mock)
context.set_os_version_type('fakeos', os_detect_mock.get_codename)
os_name, os_version = context.get_os_name_and_version()
assert os_name == 'fakeos', os_name
assert os_version == 'fakeos-codename', os_version
context.set_os_version_type('fakeos', os_detect_mock.get_version)
os_name, os_version = context.get_os_name_and_version()
assert os_name == 'fakeos', os_name
assert os_version == 'fakeos-version', os_version
def test_InstallerContext_installers():
from rosdep2.installers import InstallerContext, Installer
from rospkg.os_detect import OsDetect
detect = OsDetect()
context = InstallerContext(detect)
context.verbose = True
key = 'fake-apt'
try:
installer = context.get_installer(key)
assert False, 'should have raised: %s' % (installer)
except KeyError:
pass
class Foo:
pass
class FakeInstaller(Installer):
pass
class FakeInstaller2(Installer):
pass
# test TypeError on set_installer
try:
context.set_installer(key, 1)
assert False, 'should have raised'
except TypeError:
pass
try:
context.set_installer(key, Foo())
assert False, 'should have raised'
except TypeError:
pass
try:
# must be instantiated
context.set_installer(key, FakeInstaller)
assert False, 'should have raised'
except TypeError:
pass
installer = FakeInstaller()
installer2 = FakeInstaller2()
context.set_installer(key, installer)
assert context.get_installer(key) == installer
assert list(context.get_installer_keys()) == [key]
# repeat with same args
context.set_installer(key, installer)
assert context.get_installer(key) == installer
assert list(context.get_installer_keys()) == [key]
# repeat with new installer
context.set_installer(key, installer2)
assert context.get_installer(key) == installer2
assert list(context.get_installer_keys()) == [key]
# repeat with new key
key2 = 'fake-port'
context.set_installer(key2, installer2)
assert context.get_installer(key2) == installer2
assert set(context.get_installer_keys()) == set([key, key2])
# test installer deletion
key3 = 'fake3'
context.set_installer(key3, installer2)
assert context.get_installer(key3) == installer2
assert set(context.get_installer_keys()) == set([key, key2, key3])
context.set_installer(key3, None)
try:
context.get_installer(key3)
assert False
except KeyError:
pass
assert set(context.get_installer_keys()) == set([key, key2])
def test_InstallerContext_os_installers():
from rosdep2.installers import InstallerContext, Installer
from rospkg.os_detect import OsDetect
detect = OsDetect()
context = InstallerContext(detect)
context.verbose = True
os_key = 'ubuntu'
try:
context.get_os_installer_keys(os_key)
assert False, 'should have raised'
except KeyError:
pass
try:
context.get_default_os_installer_key(os_key)
assert False, 'should have raised'
except KeyError:
pass
try:
context.add_os_installer_key(os_key, 'fake-key')
assert False, 'should have raised'
except KeyError:
pass
try:
context.set_default_os_installer_key(os_key, 'non-method')
assert False, 'should have raised'
except KeyError:
pass
try:
context.set_default_os_installer_key(os_key, lambda self: 'fake-key')
assert False, 'should have raised'
except KeyError:
pass
try:
context.get_default_os_installer_key('bad-os')
assert False, 'should have raised'
except KeyError:
pass
installer_key1 = 'fake1'
installer_key2 = 'fake2'
class FakeInstaller(Installer):
pass
class FakeInstaller2(Installer):
pass
# configure our context with two valid installers
context.set_installer(installer_key1, FakeInstaller())
context.set_installer(installer_key2, FakeInstaller2())
# start adding installers for os_key
context.add_os_installer_key(os_key, installer_key1)
assert context.get_os_installer_keys(os_key) == [installer_key1]
# retest set_default_os_installer_key, now with installer_key not configured on os
try:
context.set_default_os_installer_key(os_key, lambda self: installer_key2)
assert False, 'should have raised'
except KeyError as e:
assert 'add_os_installer' in str(e), e
# now properly add in key2
context.add_os_installer_key(os_key, installer_key2)
assert set(context.get_os_installer_keys(os_key)) == set([installer_key1, installer_key2])
# test default
assert context.get_default_os_installer_key(os_key) is None
context.set_default_os_installer_key(os_key, lambda self: installer_key1)
assert installer_key1 == context.get_default_os_installer_key(os_key)
context.set_default_os_installer_key(os_key, lambda self: installer_key2)
assert installer_key2 == context.get_default_os_installer_key(os_key)
# retest set_default_os_installer_key, now with invalid os
try:
context.set_default_os_installer_key('bad-os', lambda self: installer_key1)
assert False, 'should have raised'
except KeyError:
pass
def test_Installer_tripwire():
from rosdep2.installers import Installer
try:
Installer().is_installed('foo')
assert False
except NotImplementedError:
pass
try:
Installer().get_install_command('foo')
assert False
except NotImplementedError:
pass
try:
Installer().resolve({})
assert False
except NotImplementedError:
pass
try:
Installer().unique([])
assert False
except NotImplementedError:
pass
assert Installer().get_depends({}) == []
def detect_fn_empty(packages):
return []
def detect_fn_all(packages):
return packages
# return any packages that are string length 1
def detect_fn_single(packages):
return [p for p in packages if len(p) == 1]
def test_PackageManagerInstaller():
from rosdep2.installers import PackageManagerInstaller
try:
PackageManagerInstaller(detect_fn_all).get_install_command(['foo'])
assert False
except NotImplementedError:
pass
def test_PackageManagerInstaller_resolve():
from rosdep2 import InvalidData
from rosdep2.installers import PackageManagerInstaller
installer = PackageManagerInstaller(detect_fn_all)
assert ['baz'] == installer.resolve(dict(depends=['foo', 'bar'], packages=['baz']))
assert ['baz', 'bar'] == installer.resolve(dict(packages=['baz', 'bar']))
# test string logic
assert ['baz'] == installer.resolve(dict(depends=['foo', 'bar'], packages='baz'))
assert ['baz', 'bar'] == installer.resolve(dict(packages='baz bar'))
assert ['baz'] == installer.resolve('baz')
assert ['baz', 'bar'] == installer.resolve('baz bar')
# test list logic
assert ['baz'] == installer.resolve(['baz'])
assert ['baz', 'bar'] == installer.resolve(['baz', 'bar'])
# test invalid data
try:
installer.resolve(0)
assert False, 'should have raised'
except InvalidData:
pass
def test_PackageManagerInstaller_depends():
from rosdep2.installers import PackageManagerInstaller
installer = PackageManagerInstaller(detect_fn_all, supports_depends=True)
assert ['foo', 'bar'] == installer.get_depends(dict(depends=['foo', 'bar'], packages=['baz']))
installer = PackageManagerInstaller(detect_fn_all, supports_depends=False)
assert [] == installer.get_depends(dict(depends=['foo', 'bar'], packages=['baz']))
def test_PackageManagerInstaller_unique():
from rosdep2.installers import PackageManagerInstaller
installer = PackageManagerInstaller(detect_fn_all)
assert [] == installer.unique()
assert [] == installer.unique([])
assert [] == installer.unique([], [])
assert ['a'] == installer.unique([], [], ['a'])
assert ['a'] == installer.unique(['a'], [], ['a'])
assert set(['a', 'b', 'c']) == set(installer.unique(['a', 'b', 'c'], ['a', 'b', 'c']))
assert set(['a', 'b', 'c']) == set(installer.unique(['a'], ['b'], ['c']))
assert set(['a', 'b', 'c']) == set(installer.unique(['a', 'b'], ['c']))
assert set(['a', 'b', 'c']) == set(installer.unique(['a', 'b'], ['c', 'a']))
def test_PackageManagerInstaller_is_installed():
from rosdep2.installers import PackageManagerInstaller
installer = PackageManagerInstaller(detect_fn_all)
for r in ['a', 'b', 'c']:
assert installer.is_installed(r), installer.is_installed(r)
installer = PackageManagerInstaller(detect_fn_empty)
for r in ['a', 'b', 'c']:
assert not installer.is_installed(r), installer.is_installed(r)
def test_PackageManagerInstaller_get_packages_to_install():
from rosdep2.installers import PackageManagerInstaller
installer = PackageManagerInstaller(detect_fn_all)
assert [] == installer.get_packages_to_install([])
assert [] == installer.get_packages_to_install(['a', 'b', 'c'])
assert set(['a', 'b', 'c']) == set(installer.get_packages_to_install(['a', 'b', 'c'], reinstall=True))
installer = PackageManagerInstaller(detect_fn_empty)
assert set(['a', 'b', 'c']) == set(installer.get_packages_to_install(['a', 'b', 'c']))
assert set(['a', 'b', 'c']) == set(installer.get_packages_to_install(['a', 'b', 'c'], reinstall=True))
installer = PackageManagerInstaller(detect_fn_single)
assert set(['baba', 'cada']) == set(installer.get_packages_to_install(['a', 'baba', 'b', 'cada', 'c']))
def test_RosdepInstaller_ctor():
# tripwire/coverage
from rosdep2 import create_default_installer_context
from rosdep2.lookup import RosdepLookup
from rosdep2.installers import RosdepInstaller
lookup = RosdepLookup.create_from_rospkg()
context = create_default_installer_context()
installer = RosdepInstaller(context, lookup)
assert lookup == installer.lookup
assert context == installer.installer_context
def test_RosdepInstaller_get_uninstalled():
from rosdep2 import create_default_installer_context
from rosdep2.lookup import RosdepLookup
from rosdep2.installers import RosdepInstaller
from rosdep2.platforms.debian import APT_INSTALLER
rospack, rosstack = get_test_rospkgs()
# create our test fixture. use most of the default toolchain, but
# replace the apt installer with one that we can have more fun
# with. we will do all tests with ubuntu lucid keys -- other
# tests should cover different resolution cases.
sources_loader = create_test_SourcesListLoader()
lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack, sources_loader=sources_loader)
context = create_default_installer_context()
context.set_os_override('ubuntu', 'lucid')
installer = RosdepInstaller(context, lookup)
# in this first test, detect_fn detects everything as installed
fake_apt = get_fake_apt(lambda x: x)
context.set_installer(APT_INSTALLER, fake_apt)
for verbose in [True, False]:
tests = [['roscpp_fake'], ['roscpp_fake', 'rospack_fake'], ['empty_package'],
['roscpp_fake', 'rospack_fake', 'empty_package'],
['roscpp_fake', 'rospack_fake'],
]
for test in tests:
uninstalled, errors = installer.get_uninstalled(test, verbose)
assert not uninstalled, uninstalled
assert not errors, errors
# in this second test, detect_fn detects nothing as installed
fake_apt = get_fake_apt(lambda x: [])
context.set_installer(APT_INSTALLER, fake_apt)
for verbose in [True, False]:
uninstalled, errors = installer.get_uninstalled(['empty'], verbose)
assert not uninstalled, uninstalled
assert not errors
expected = set(['libltdl-dev', 'libboost1.40-all-dev', 'libtool'])
uninstalled, errors = installer.get_uninstalled(['roscpp_fake'], verbose)
keys, values = zip(*uninstalled)
apt_uninstalled = []
for k, v in uninstalled:
if k == APT_INSTALLER:
apt_uninstalled.extend(v)
assert list(set(keys)) == [APT_INSTALLER]
assert set(apt_uninstalled) == expected
assert not errors
expected = ['libtinyxml-dev']
uninstalled, errors = installer.get_uninstalled(['rospack_fake'], verbose)
keys, values = zip(*uninstalled)
apt_uninstalled = []
for k, v in uninstalled:
if k == APT_INSTALLER:
apt_uninstalled.extend(v)
assert list(set(keys)) == [APT_INSTALLER]
assert apt_uninstalled == expected, uninstalled
assert not errors
def get_fake_apt(detect_fn):
# mainly did this to keep coverage results
from rosdep2.installers import PackageManagerInstaller
class FakeAptInstaller(PackageManagerInstaller):
"""
An implementation of the Installer for use on debian style
systems.
"""
def __init__(self):
super(FakeAptInstaller, self).__init__(detect_fn)
def get_install_command(self, resolved, interactive=True, reinstall=False):
return [[resolved, interactive, reinstall]]
return FakeAptInstaller()
def test_RosdepInstaller_get_uninstalled_unconfigured():
from rosdep2 import create_default_installer_context, RosdepInternalError
from rosdep2.lookup import RosdepLookup, ResolutionError
from rosdep2.installers import RosdepInstaller, PackageManagerInstaller
from rosdep2.platforms.debian import APT_INSTALLER
rospack, rosstack = get_test_rospkgs()
sources_loader = create_test_SourcesListLoader()
# create our test fixture. we want to setup a fixture that cannot resolve the rosdep data in order to test error conditions
lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack, sources_loader=sources_loader)
context = create_default_installer_context()
context.set_os_override('ubuntu', 'lucid')
installer = RosdepInstaller(context, lookup)
# - delete the apt installer
context.set_installer(APT_INSTALLER, None)
for verbose in [True, False]:
uninstalled, errors = installer.get_uninstalled(['empty'], verbose)
assert not uninstalled, uninstalled
assert not errors
# make sure there is an error when we lookup something that resolves to an apt depend
uninstalled, errors = installer.get_uninstalled(['roscpp_fake'], verbose)
assert not uninstalled, uninstalled
assert list(errors.keys()) == ['roscpp_fake']
uninstalled, errors = installer.get_uninstalled(['roscpp_fake', 'stack1_p1'], verbose)
assert not uninstalled, uninstalled
assert set(errors.keys()) == set(['roscpp_fake', 'stack1_p1'])
print(errors)
assert isinstance(errors['roscpp_fake'], ResolutionError), errors['roscpp_fake'][0]
# fake/bad installer to test that we re-cast general installer issues
class BadInstaller(PackageManagerInstaller):
def __init__(self):
super(BadInstaller, self).__init__(lambda x: x)
def get_packages_to_install(*args):
raise Exception('deadbeef')
context.set_installer(APT_INSTALLER, BadInstaller())
try:
installer.get_uninstalled(['roscpp_fake'])
assert False, 'should have raised'
except RosdepInternalError as e:
assert 'apt' in str(e)
# annoying mock to test generally impossible error condition
from mock import Mock
lookup = Mock(spec=RosdepLookup)
lookup.resolve_all.return_value = ([('bad-key', ['stuff'])], [])
installer = RosdepInstaller(context, lookup)
try:
installer.get_uninstalled(['roscpp_fake'])
assert False, 'should have raised'
except RosdepInternalError:
pass
@contextmanager
def fakeout():
realstdout = sys.stdout
realstderr = sys.stderr
fakestdout = StringIO()
fakestderr = StringIO()
sys.stdout = fakestdout
sys.stderr = fakestderr
yield fakestdout, fakestderr
sys.stdout = realstdout
sys.stderr = realstderr
def test_RosdepInstaller_install_resolved():
from rosdep2 import create_default_installer_context
from rosdep2.lookup import RosdepLookup
from rosdep2.installers import RosdepInstaller
from rosdep2.platforms.debian import APT_INSTALLER
rospack, rosstack = get_test_rospkgs()
# create our test fixture. use most of the default toolchain, but
# replace the apt installer with one that we can have more fun
# with. we will do all tests with ubuntu lucid keys -- other
# tests should cover different resolution cases.
sources_loader = create_test_SourcesListLoader()
lookup = RosdepLookup.create_from_rospkg(rospack=rospack, rosstack=rosstack, sources_loader=sources_loader)
context = create_default_installer_context()
context.set_os_override('ubuntu', 'lucid')
installer = RosdepInstaller(context, lookup)
with fakeout() as (stdout, stderr):
installer.install_resolved(APT_INSTALLER, [], simulate=True, verbose=False)
with fakeout() as (stdout, stderr):
installer.install_resolved(APT_INSTALLER, [], simulate=True, verbose=True)
assert stdout.getvalue().strip() == '#No packages to install'
with fakeout() as (stdout, stderr):
try:
installer.install_resolved(APT_INSTALLER, ['rosdep-fake1', 'rosdep-fake2'], simulate=True, verbose=True)
except OSError as e:
if str(e).count('[Errno 2] No such file or directory') == 0:
raise
return True
stdout_lines = [x.strip() for x in stdout.getvalue().split('\n') if x.strip()]
assert len(stdout_lines) == 3
assert stdout_lines[0] == '#[apt] Installation commands:'
assert 'sudo -H apt-get install rosdep-fake1' in stdout_lines, 'stdout_lines: %s' % stdout_lines
assert 'sudo -H apt-get install rosdep-fake2' in stdout_lines, 'stdout_lines: %s' % stdout_lines
| allenh1/rosdep | test/test_rosdep_installers.py | Python | bsd-3-clause | 22,802 |
"""contains a cache to store router's certificates
Two lists are kept:
- ApprovedCert which contains the certificate of router that provided a valid Certification Path
- TBApprovedCert that temporarily store certificates that can't be yet verified
"""
from __future__ import with_statement
import tempfile
import os
import hashlib
from subprocess import Popen, PIPE
from threading import RLock
from NDprotector.Log import warn
import NDprotector
import NDprotector.Cleanup
from scapy6send.cert import *
# after this period, an unverifiable Certificate Path is discarded
TIMEOUT = 60
def test_CertCache():
# verifying the certificate validation process
NDprotector.trustanchors = [ "examples/test/cacert.pem" ]
# we consider that we have the X.509 extensions
# for IP addresses
NDprotector.x509_ipextension = True
# we do not test the ECC support
NDprotector.ECCsupport = False
# certificate's Public Key
for ta in NDprotector.trustanchors:
with open("examples/test/level1-cert.pem") as f:
certl1 = f.read()
with open("examples/test/level2-cert.pem") as f:
certl2 = f.read()
certl2 = Cert(certl2).output("DER")
cc = CertCache()
cc2 = CertCache()
cc.storecert(1,certl1)
cc.storecert(1,certl2)
# no certificate have been verified yet
assert len(cc.ApprovedCert) == 0
# this certificate path is correct and
# triggers an entry creation in the Approved Certificate list
cc.checkcertpath(1)
assert len(cc.ApprovedCert) == 1
# this certificate path is uncompleted and
# does not modify anything
cc.storecert(2,certl2)
cc.checkcertpath(2)
assert len(cc.ApprovedCert) == 1
certl2 = Cert(certl2)
# prefix authorized by the cert
prefixes = ["2001:aaaa:bbbb::"] # /48
pk_hash = sha.sha(certl2.key.derkey).digest()[:16]
# the 0 means RSA/SHA-1
assert cc.trustable_hash(pk_hash, prefixes, 0)
prefixes = ["2001:aaaa:cccc::"] # /48
assert not cc.trustable_hash(pk_hash, prefixes, 0)
cc.storeid(4242)
assert cc2.id_match(4242)
assert cc.id_match(2424) == False
def test_helper_func():
addr1_int = addr_to_int("2001:660::")
addr2_int = addr_to_int("2001:600::")
addr3_int = addr_to_int("2001::")
assert (addr1_int & prefix_mask(16)) == (addr2_int & prefix_mask(16))
assert (addr1_int & prefix_mask(19)) == (addr2_int & prefix_mask(19))
assert (addr2_int & prefix_mask(22)) != (addr3_int & prefix_mask(22))
def addr_to_int(addr_str):
"""An help function that convert a printable address into
the int number corresponding to that address"""
addr_net = socket.inet_pton(socket.AF_INET6,addr_str)
addr_int = 0
for s in addr_net:
addr_int = addr_int * 256 + ord(s)
return addr_int
def prefix_mask(lenght):
"""return a prefix legnth that matches the first "lenght" bit of the address"""
return (2**128-1)-(2**(128-lenght) -1)
class CertCache(object):
"""a certificate cache (using the DP borg)"""
__shared_state = {}
def __init__(self):
# DP borg: all instances share the same variables
self.__dict__ = self.__shared_state
if not hasattr(self,"ApprovedCert"):
self.ApprovedCert = []
if not hasattr(self,"ApprLock"):
self.ApprLock= RLock()
if not hasattr(self,"TBApprovedCert"):
self.TBApprovedCert = {}
if not hasattr(self,"TBApprLock"):
self.TBApprLock = RLock()
if not hasattr(self,"Id"):
self.Id = {}
if not hasattr(self,"IdLock"):
self.IdLock = RLock()
# control the TBapprovedCert cache cleaning
if not hasattr(self,"clean"):
self.clean = True
def storeid(self,id):
"""store the Identifier when sending a CPS"""
warn("storing ID %d for a new CPS\n" % id)
with self.IdLock:
self.Id[id] = TIMEOUT
def id_match(self,id):
"""verifies that a Identifier carried in a CPA
matches a sent CPS"""
warn("checking ID %d against a previously sent CPS\n" % id)
with self.IdLock:
return id in self.Id
def storecert(self,id,cert):
"""temporarly store certs, they are sorted by their ID"""
warn("storing on cert for Certificate Path #%d\n" % id)
with self.TBApprLock:
certpath = []
ttl = TIMEOUT
try:
(certpath, oldttl) = self.TBApprovedCert[id]
except KeyError:
pass
certpath.append(Cert(cert))
self.TBApprovedCert[id] = (certpath, ttl)
def checkcertpath(self,id):
"""check if a complete cert path is valid,
if it is, the last cert is moved to the ApprovedCert list
if it isn't, it is discarded"""
warn("Verifying certification path for #%d\n" % id)
with self.TBApprLock:
try:
valid_path = False
certs, _ = self.TBApprovedCert[id]
# removes everything if the last certificate in the chain
# is already trusted
already_trusted = False
with self.ApprLock:
for accepted_cert in [ c.output("DER") for c in self.ApprovedCert ] :
if accepted_cert == certs[-1].output("DER"):
warn("The Certificate Path we received is already trusted\n")
already_trusted = True
if not already_trusted:
# we concat all the cert we got in a new file
cert_desc, certfilename = tempfile.mkstemp()
valid_IPExt = True
if NDprotector.x509_ipextension:
# we check that each certificate includes the previous one
# each certificate are expected to carry an IP extension
# address with only 1s
(prev_addr,prev_preflen) = certs[0].IPAddrExt
prev_addr = addr_to_int(prev_addr)
try:
for cert in certs:
(addr,preflen) = cert.IPAddrExt
addr = addr_to_int(addr)
if (addr & prefix_mask(prev_preflen)) == \
(prev_addr & prefix_mask(prev_preflen)) and \
prev_preflen <= preflen :
prev_addr = addr
prev_preflen = preflen
# this prefix is not contained inside its parent's certificate
else:
warn("Certificate's IP extension does not"
" match its parent's Certificate\n")
valid_IPExt = False
break
# if we get in there, it probably means that one certificate is lacking
# of IP address extension
except TypeError:
warn("At least one certificate in the chain seems "
"to lack of the X.509 Extensions for IP Address\n")
valid_IPExt = False
if valid_IPExt:
for cert in certs:
os.write(cert_desc,cert.output(fmt="PEM"))
os.close(cert_desc)
for ta in NDprotector.trustanchors:
tacert = Cert(ta)
# we copy the TA in a temporary file
ca_desc, cafilename = tempfile.mkstemp()
os.write(ca_desc, tacert.output(fmt="PEM"))
os.close(ca_desc)
# XXX double check this command
# we ask openssl to check the certificate for us
cmd = "openssl verify -CAfile %s %s" % (cafilename,certfilename)
res = Popen(cmd, stdout=PIPE, shell=True)
output = res.stdout.read()
# we clean all the temporary files
os.unlink(cafilename)
if "OK" in output:
valid_path = True
break
os.unlink(certfilename)
if valid_path:
warn("We received a complete and valid Certification Path\n")
with self.ApprLock:
# only the last certificate from the chain is valuable
self.ApprovedCert.append(certs[-1])
# either way, we remove the cert path that has been processed
del self.TBApprovedCert[id]
except KeyError:
pass
with self.IdLock:
try:
del self.Id[id]
except KeyError:
pass
def trustable_hash(self,hash_cert, prefixes, sigtypeID):
"""check if a hash contained in a RSA signature option corresponds to a trustable certificate
Also check if the certificate's IP Addr extension matches to the advertised prefixes"""
hashfunc = getattr(hashlib, SigTypeHashfunc[sigtypeID])
try:
with self.ApprLock:
# case #1: we have accepted a certificate for these prefixes
for cert in self.ApprovedCert:
if NDprotector.x509_ipextension:
(addr,preflen) = cert.IPAddrExt
addr = addr_to_int(addr)
if hash_cert == hashfunc(cert.key.derkey).digest()[:16]:
if NDprotector.x509_ipextension:
for prefix in\
(((addr_to_int(p) & prefix_mask(preflen)) for p in prefixes)):
if prefix != (addr & prefix_mask(preflen)):
return False
else:
return True
elif not NDprotector.x509_ipextension:
return True
# case #2: the certificate linked to the messages
# is directly a trust anchor
for certfile in NDprotector.trustanchors:
cert = Cert(certfile)
if NDprotector.x509_ipextension:
(addr,preflen) = cert.IPAddrExt
addr = addr_to_int(addr)
if hash_cert == sha.sha(cert.key.derkey).digest()[:16]:
if NDprotector.x509_ipextension:
for prefix in ((addr_to_int(p) & prefix_mask(preflen) for p in prefixes)):
if prefix != (addr & prefix_mask(preflen)):
return False
else:
return True
elif not NDprotector.x509_ipextension:
return True
# likely due to a missing IP Addr extension
except TypeError:
warn("The verified certificate most likely "
"does not have any IP addresses extension field\n")
return False
def close_cleaning_thread(self):
"""when the program is exiting, we need to close the cleaning thread"""
self.clean = False
def cleaning_certcache():
"""a thread that cleans the Certificate cache regularly"""
cc = CertCache()
with cc.IdLock:
for id, value in cc.Id.items():
cc.Id[id] = value - 1
if cc.Id[id] <= 0:
# TTL has reached its limit, remove the entry
del cc.Id[id]
with cc.TBApprLock:
for id, misc in cc.TBApprovedCert.items():
(certs, ttl) = misc
if ttl <=0:
del cc.TBApprovedCert[id]
else:
cc.TBApprovedCert[id] = (certs, ttl - 1)
def CertCacheStart():
# start the cleaning Thread for the CC
NDprotector.Cleanup.cleanup_thread_subscribe(cleaning_certcache)
# to ensure default values are initialized
CertCache()
| daveti/NDprotector | NDprotector/CertCache.py | Python | bsd-3-clause | 12,772 |
# Django settings for tumblelog project.
import sys
import os
PROJECT_ROOT = os.path.dirname(__file__)
sys.path.append(os.path.join(PROJECT_ROOT, '../../../'))
DEBUG = True
TEMPLATE_DEBUG = False
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
# MongoDB Databases
MONGODB_DATABASES = {
'default': {'name': 'django_mongoengine_test'}
}
DATABASES = {
'default': {
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'd2h8yt+x2g0$+e#9$z5z$auy%v0axov(wt3o*bj1#h^1+x^n(!'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.common.CommonMiddleware'
)
ROOT_URLCONF = ''
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'tumblelog.wsgi.application'
TEMPLATE_DIRS = ()
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'tests.views'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| seraphlnWu/django-mongoengine | tests/settings.py | Python | bsd-3-clause | 4,488 |
from ctypes import *
from ctypes.test import need_symbol, xfail
import unittest
import os
import _ctypes_test
class BITS(Structure):
_fields_ = [("A", c_int, 1),
("B", c_int, 2),
("C", c_int, 3),
("D", c_int, 4),
("E", c_int, 5),
("F", c_int, 6),
("G", c_int, 7),
("H", c_int, 8),
("I", c_int, 9),
("M", c_short, 1),
("N", c_short, 2),
("O", c_short, 3),
("P", c_short, 4),
("Q", c_short, 5),
("R", c_short, 6),
("S", c_short, 7)]
func = CDLL(_ctypes_test.__file__).unpack_bitfields
func.argtypes = POINTER(BITS), c_char
##for n in "ABCDEFGHIMNOPQRS":
## print n, hex(getattr(BITS, n).size), getattr(BITS, n).offset
class C_Test(unittest.TestCase):
def test_ints(self):
for i in range(512):
for name in "ABCDEFGHI":
b = BITS()
setattr(b, name, i)
self.assertEqual(getattr(b, name), func(byref(b), name.encode('ascii')))
def test_shorts(self):
for i in range(256):
for name in "MNOPQRS":
b = BITS()
setattr(b, name, i)
self.assertEqual(getattr(b, name), func(byref(b), name.encode('ascii')))
signed_int_types = (c_byte, c_short, c_int, c_long, c_longlong)
unsigned_int_types = (c_ubyte, c_ushort, c_uint, c_ulong, c_ulonglong)
int_types = unsigned_int_types + signed_int_types
class BitFieldTest(unittest.TestCase):
def test_longlong(self):
class X(Structure):
_fields_ = [("a", c_longlong, 1),
("b", c_longlong, 62),
("c", c_longlong, 1)]
self.assertEqual(sizeof(X), sizeof(c_longlong))
x = X()
x.a, x.b, x.c = -1, 7, -1
self.assertEqual((x.a, x.b, x.c), (-1, 7, -1))
def test_ulonglong(self):
class X(Structure):
_fields_ = [("a", c_ulonglong, 1),
("b", c_ulonglong, 62),
("c", c_ulonglong, 1)]
self.assertEqual(sizeof(X), sizeof(c_longlong))
x = X()
self.assertEqual((x.a, x.b, x.c), (0, 0, 0))
x.a, x.b, x.c = 7, 7, 7
self.assertEqual((x.a, x.b, x.c), (1, 7, 1))
def test_signed(self):
for c_typ in signed_int_types:
class X(Structure):
_fields_ = [("dummy", c_typ),
("a", c_typ, 3),
("b", c_typ, 3),
("c", c_typ, 1)]
self.assertEqual(sizeof(X), sizeof(c_typ)*2)
x = X()
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, 0, 0))
x.a = -1
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, -1, 0, 0))
x.a, x.b = 0, -1
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, -1, 0))
def test_unsigned(self):
for c_typ in unsigned_int_types:
class X(Structure):
_fields_ = [("a", c_typ, 3),
("b", c_typ, 3),
("c", c_typ, 1)]
self.assertEqual(sizeof(X), sizeof(c_typ))
x = X()
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, 0, 0))
x.a = -1
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 7, 0, 0))
x.a, x.b = 0, -1
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, 7, 0))
def fail_fields(self, *fields):
return self.get_except(type(Structure), "X", (),
{"_fields_": fields})
def test_nonint_types(self):
# bit fields are not allowed on non-integer types.
result = self.fail_fields(("a", c_char_p, 1))
self.assertEqual(result[0], TypeError)
self.assertIn('bit fields not allowed for type', result[1])
result = self.fail_fields(("a", c_void_p, 1))
self.assertEqual(result[0], TypeError)
self.assertIn('bit fields not allowed for type', result[1])
if c_int != c_long:
result = self.fail_fields(("a", POINTER(c_int), 1))
self.assertEqual(result[0], TypeError)
self.assertIn('bit fields not allowed for type', result[1])
result = self.fail_fields(("a", c_char, 1))
self.assertEqual(result[0], TypeError)
self.assertIn('bit fields not allowed for type', result[1])
class Dummy(Structure):
_fields_ = []
result = self.fail_fields(("a", Dummy, 1))
self.assertEqual(result[0], TypeError)
self.assertIn('bit fields not allowed for type', result[1])
@need_symbol('c_wchar')
def test_c_wchar(self):
result = self.fail_fields(("a", c_wchar, 1))
self.assertEqual(result[0], TypeError)
self.assertIn('bit fields not allowed for type', result[1])
def test_single_bitfield_size(self):
for c_typ in int_types:
result = self.fail_fields(("a", c_typ, -1))
self.assertEqual(result, (ValueError, 'number of bits invalid for bit field'))
result = self.fail_fields(("a", c_typ, 0))
self.assertEqual(result, (ValueError, 'number of bits invalid for bit field'))
class X(Structure):
_fields_ = [("a", c_typ, 1)]
self.assertEqual(sizeof(X), sizeof(c_typ))
class X(Structure):
_fields_ = [("a", c_typ, sizeof(c_typ)*8)]
self.assertEqual(sizeof(X), sizeof(c_typ))
result = self.fail_fields(("a", c_typ, sizeof(c_typ)*8 + 1))
self.assertEqual(result, (ValueError, 'number of bits invalid for bit field'))
def test_multi_bitfields_size(self):
class X(Structure):
_fields_ = [("a", c_short, 1),
("b", c_short, 14),
("c", c_short, 1)]
self.assertEqual(sizeof(X), sizeof(c_short))
class X(Structure):
_fields_ = [("a", c_short, 1),
("a1", c_short),
("b", c_short, 14),
("c", c_short, 1)]
self.assertEqual(sizeof(X), sizeof(c_short)*3)
self.assertEqual(X.a.offset, 0)
self.assertEqual(X.a1.offset, sizeof(c_short))
self.assertEqual(X.b.offset, sizeof(c_short)*2)
self.assertEqual(X.c.offset, sizeof(c_short)*2)
class X(Structure):
_fields_ = [("a", c_short, 3),
("b", c_short, 14),
("c", c_short, 14)]
self.assertEqual(sizeof(X), sizeof(c_short)*3)
self.assertEqual(X.a.offset, sizeof(c_short)*0)
self.assertEqual(X.b.offset, sizeof(c_short)*1)
self.assertEqual(X.c.offset, sizeof(c_short)*2)
def get_except(self, func, *args, **kw):
try:
func(*args, **kw)
except Exception as detail:
return detail.__class__, str(detail)
def test_mixed_1(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_int, 4)]
if os.name == "nt":
self.assertEqual(sizeof(X), sizeof(c_int)*2)
else:
self.assertEqual(sizeof(X), sizeof(c_int))
def test_mixed_2(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_int, 32)]
self.assertEqual(sizeof(X), alignment(c_int)+sizeof(c_int))
def test_mixed_3(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_ubyte, 4)]
self.assertEqual(sizeof(X), sizeof(c_byte))
def test_mixed_4(self):
class X(Structure):
_fields_ = [("a", c_short, 4),
("b", c_short, 4),
("c", c_int, 24),
("d", c_short, 4),
("e", c_short, 4),
("f", c_int, 24)]
# MSVC does NOT combine c_short and c_int into one field, GCC
# does (unless GCC is run with '-mms-bitfields' which
# produces code compatible with MSVC).
if os.name == "nt":
self.assertEqual(sizeof(X), sizeof(c_int) * 4)
else:
self.assertEqual(sizeof(X), sizeof(c_int) * 2)
def test_anon_bitfields(self):
# anonymous bit-fields gave a strange error message
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_ubyte, 4)]
class Y(Structure):
_anonymous_ = ["_"]
_fields_ = [("_", X)]
@need_symbol('c_uint32')
def test_uint32(self):
class X(Structure):
_fields_ = [("a", c_uint32, 32)]
x = X()
x.a = 10
self.assertEqual(x.a, 10)
x.a = 0xFDCBA987
self.assertEqual(x.a, 0xFDCBA987)
@need_symbol('c_uint64')
def test_uint64(self):
class X(Structure):
_fields_ = [("a", c_uint64, 64)]
x = X()
x.a = 10
self.assertEqual(x.a, 10)
x.a = 0xFEDCBA9876543211
self.assertEqual(x.a, 0xFEDCBA9876543211)
@need_symbol('c_uint32')
def test_uint32_swap_little_endian(self):
# Issue #23319
class Little(LittleEndianStructure):
_fields_ = [("a", c_uint32, 24),
("b", c_uint32, 4),
("c", c_uint32, 4)]
b = bytearray(4)
x = Little.from_buffer(b)
x.a = 0xabcdef
x.b = 1
x.c = 2
self.assertEqual(b, b'\xef\xcd\xab\x21')
@need_symbol('c_uint32')
@xfail
def test_uint32_swap_big_endian(self):
# Issue #23319
class Big(BigEndianStructure):
_fields_ = [("a", c_uint32, 24),
("b", c_uint32, 4),
("c", c_uint32, 4)]
b = bytearray(4)
x = Big.from_buffer(b)
x.a = 0xabcdef
x.b = 1
x.c = 2
self.assertEqual(b, b'\xab\xcd\xef\x12')
if __name__ == "__main__":
unittest.main()
| yotchang4s/cafebabepy | src/main/python/ctypes/test/test_bitfields.py | Python | bsd-3-clause | 10,212 |
import gc
import pytest
from pluginbase import PluginBase
@pytest.fixture(scope='function')
def base():
return PluginBase(package='dummy.plugins')
@pytest.fixture(scope='function')
def dummy_internal_name():
return 'pluginbase._internalspace._sp7bb7d8da1d24ae5a5205609c951b8be4'
@pytest.fixture(scope='function')
def source(base):
return base.make_plugin_source(searchpath=['./plugins'],
identifier='demo')
@pytest.yield_fixture(scope='function', autouse=True)
def run_garbage_collection():
gc.collect()
try:
yield
finally:
gc.collect()
| bailaohe/pluginbase | tests/conftest.py | Python | bsd-3-clause | 621 |
from django.urls import include, path, reverse
from django.utils.translation import gettext_lazy as _
from wagtail.admin.menu import MenuItem
from wagtail.contrib.forms import urls
from wagtail.contrib.forms.utils import get_forms_for_user
from wagtail.core import hooks
@hooks.register("register_admin_urls")
def register_admin_urls():
return [
path("forms/", include(urls, namespace="wagtailforms")),
]
class FormsMenuItem(MenuItem):
def is_shown(self, request):
# show this only if the user has permission to retrieve submissions for at least one form
return get_forms_for_user(request.user).exists()
@hooks.register("register_admin_menu_item")
def register_forms_menu_item():
return FormsMenuItem(
_("Forms"),
reverse("wagtailforms:index"),
name="forms",
icon_name="form",
order=700,
)
| wagtail/wagtail | wagtail/contrib/forms/wagtail_hooks.py | Python | bsd-3-clause | 881 |
from django.test import TestCase
from django.contrib.auth.models import User
from enroll.backends import ModelBackend
class TestModelBackend(TestCase):
def test_authenticate(self):
User.objects.create_user('bob', '[email protected]', 'secret')
backend = ModelBackend()
self.assertIsInstance(backend.authenticate('bob', 'secret'), User)
self.assertIsInstance(backend.authenticate('[email protected]', 'secret'), User)
self.assertIsNone(backend.authenticate('bob', 'invald_password'))
| farin/django-enroll | tests/testproject/test_app/tests/backends.py | Python | bsd-3-clause | 525 |
from __future__ import absolute_import
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.utils import timezone
from mock import patch
from sentry.models import (
EventMapping, Group, GroupBookmark, GroupSeen, GroupStatus
)
from sentry.testutils import APITestCase
from sentry.testutils.helpers import parse_link_header
class GroupListTest(APITestCase):
def _parse_links(self, header):
# links come in {url: {...attrs}}, but we need {rel: {...attrs}}
links = {}
for url, attrs in parse_link_header(header).iteritems():
links[attrs['rel']] = attrs
attrs['href'] = url
return links
def test_simple_pagination(self):
project = self.project
now = timezone.now()
group1 = self.create_group(
checksum='a' * 32,
last_seen=now - timedelta(seconds=1),
)
group2 = self.create_group(
checksum='b' * 32,
last_seen=now,
)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.get(url + '?sort_by=date&limit=1', format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group2.id)
links = self._parse_links(response['Link'])
assert links['previous']['results'] == 'false'
assert links['next']['results'] == 'true'
print(links['next']['cursor'])
response = self.client.get(links['next']['href'], format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group1.id)
links = self._parse_links(response['Link'])
assert links['previous']['results'] == 'true'
assert links['next']['results'] == 'false'
print(links['previous']['cursor'])
response = self.client.get(links['previous']['href'], format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group2.id)
links = self._parse_links(response['Link'])
assert links['previous']['results'] == 'false'
assert links['next']['results'] == 'true'
print(links['previous']['cursor'])
response = self.client.get(links['previous']['href'], format='json')
assert response.status_code == 200
assert len(response.data) == 0
group3 = self.create_group(
checksum='c' * 32,
last_seen=now + timedelta(seconds=1),
)
links = self._parse_links(response['Link'])
assert links['previous']['results'] == 'false'
assert links['next']['results'] == 'true'
print(links['previous']['cursor'])
response = self.client.get(links['previous']['href'], format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group3.id)
def test_stats_period(self):
# TODO(dcramer): this test really only checks if validation happens
# on statsPeriod
project = self.project
now = timezone.now()
group1 = self.create_group(
checksum='a' * 32,
last_seen=now - timedelta(seconds=1),
)
group2 = self.create_group(
checksum='b' * 32,
last_seen=now,
)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.get(url + '?statsPeriod=24h', format='json')
assert response.status_code == 200
response = self.client.get(url + '?statsPeriod=14d', format='json')
assert response.status_code == 200
response = self.client.get(url + '?statsPeriod=', format='json')
assert response.status_code == 200
response = self.client.get(url + '?statsPeriod=48h', format='json')
assert response.status_code == 400
def test_auto_resolved(self):
project = self.project
project.update_option('sentry:resolve_age', 1)
now = timezone.now()
group1 = self.create_group(
checksum='a' * 32,
last_seen=now - timedelta(days=1),
)
group2 = self.create_group(
checksum='b' * 32,
last_seen=now,
)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.get(url, format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group2.id)
def test_lookup_by_event_id(self):
project = self.project
project.update_option('sentry:resolve_age', 1)
now = timezone.now()
group = self.create_group(checksum='a' * 32)
self.create_group(checksum='b' * 32)
EventMapping.objects.create(
event_id='c' * 32,
project=group.project,
group=group,
)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.get(url + '?query=' + ('c' * 32), format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group.id)
def test_lookup_by_unknown_event_id(self):
project = self.project
project.update_option('sentry:resolve_age', 1)
now = timezone.now()
group = self.create_group(checksum='a' * 32)
self.create_group(checksum='b' * 32)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.get(url + '?query=' + ('c' * 32), format='json')
assert response.status_code == 200
assert len(response.data) == 0
class GroupUpdateTest(APITestCase):
def test_global_resolve(self):
group1 = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
group2 = self.create_group(checksum='b' * 32, status=GroupStatus.UNRESOLVED)
group3 = self.create_group(checksum='c' * 32, status=GroupStatus.MUTED)
group4 = self.create_group(
project=self.create_project(slug='foo'),
checksum='b' * 32, status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.put(url + '?status=unresolved', data={
'status': 'resolved',
}, format='json')
assert response.status_code == 200, response.data
assert response.data == {
'status': 'resolved',
}
# the previously resolved entry should not be included
new_group1 = Group.objects.get(id=group1.id)
assert new_group1.status == GroupStatus.RESOLVED
assert new_group1.resolved_at is None
new_group2 = Group.objects.get(id=group2.id)
assert new_group2.status == GroupStatus.RESOLVED
assert new_group2.resolved_at is not None
# the muted entry should not be included
new_group3 = Group.objects.get(id=group3.id)
assert new_group3.status == GroupStatus.MUTED
assert new_group3.resolved_at is None
new_group4 = Group.objects.get(id=group4.id)
assert new_group4.status == GroupStatus.UNRESOLVED
assert new_group4.resolved_at is None
def test_selective_status_update(self):
project = self.project
group1 = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
group2 = self.create_group(checksum='b' * 32, status=GroupStatus.UNRESOLVED)
group3 = self.create_group(checksum='c' * 32, status=GroupStatus.MUTED)
group4 = self.create_group(
project=self.create_project(slug='foo'),
checksum='b' * 32, status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}&group4={group4.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
group4=group4,
)
response = self.client.put(url, data={
'status': 'resolved',
}, format='json')
assert response.status_code == 200
assert response.data == {
'status': 'resolved',
}
new_group1 = Group.objects.get(id=group1.id)
assert new_group1.resolved_at is None
new_group2 = Group.objects.get(id=group2.id)
assert new_group2.resolved_at is not None
assert new_group2.status == GroupStatus.RESOLVED
new_group3 = Group.objects.get(id=group3.id)
assert new_group3.resolved_at is None
assert new_group3.status == GroupStatus.MUTED
new_group4 = Group.objects.get(id=group4.id)
assert new_group4.resolved_at is None
assert new_group4.status == GroupStatus.UNRESOLVED
def test_set_bookmarked(self):
project = self.project
group1 = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
group2 = self.create_group(checksum='b' * 32, status=GroupStatus.UNRESOLVED)
group3 = self.create_group(checksum='c' * 32, status=GroupStatus.MUTED)
group4 = self.create_group(
project=self.create_project(slug='foo'),
checksum='b' * 32, status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}&group4={group4.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
group4=group4,
)
response = self.client.put(url, data={
'isBookmarked': 'true',
}, format='json')
assert response.status_code == 200
assert response.data == {
'isBookmarked': True,
}
bookmark1 = GroupBookmark.objects.filter(group=group1, user=self.user)
assert bookmark1.exists()
bookmark2 = GroupBookmark.objects.filter(group=group2, user=self.user)
assert bookmark2.exists()
bookmark3 = GroupBookmark.objects.filter(group=group3, user=self.user)
assert not bookmark3.exists()
bookmark4 = GroupBookmark.objects.filter(group=group4, user=self.user)
assert not bookmark4.exists()
def test_set_public(self):
group1 = self.create_group(checksum='a' * 32, is_public=False)
group2 = self.create_group(checksum='b' * 32, is_public=False)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
)
response = self.client.put(url, data={
'isPublic': 'true',
}, format='json')
assert response.status_code == 200
assert response.data == {
'isPublic': True,
}
new_group1 = Group.objects.get(id=group1.id)
assert new_group1.is_public
new_group2 = Group.objects.get(id=group2.id)
assert new_group2.is_public
def test_set_private(self):
group1 = self.create_group(checksum='a' * 32, is_public=True)
group2 = self.create_group(checksum='b' * 32, is_public=True)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
)
response = self.client.put(url, data={
'isPublic': 'false',
}, format='json')
assert response.status_code == 200
assert response.data == {
'isPublic': False,
}
new_group1 = Group.objects.get(id=group1.id)
assert not new_group1.is_public
new_group2 = Group.objects.get(id=group2.id)
assert not new_group2.is_public
def test_set_has_seen(self):
project = self.project
group1 = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
group2 = self.create_group(checksum='b' * 32, status=GroupStatus.UNRESOLVED)
group3 = self.create_group(checksum='c' * 32, status=GroupStatus.MUTED)
group4 = self.create_group(
project=self.create_project(slug='foo'),
checksum='b' * 32, status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}&group4={group4.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
group4=group4,
)
response = self.client.put(url, data={
'hasSeen': 'true',
}, format='json')
assert response.status_code == 200
assert response.data == {
'hasSeen': True,
}
r1 = GroupSeen.objects.filter(group=group1, user=self.user)
assert r1.exists()
r2 = GroupSeen.objects.filter(group=group2, user=self.user)
assert r2.exists()
r3 = GroupSeen.objects.filter(group=group3, user=self.user)
assert not r3.exists()
r4 = GroupSeen.objects.filter(group=group4, user=self.user)
assert not r4.exists()
@patch('sentry.api.endpoints.project_group_index.merge_group')
def test_merge(self, merge_group):
project = self.project
group1 = self.create_group(checksum='a' * 32, times_seen=1)
group2 = self.create_group(checksum='b' * 32, times_seen=50)
group3 = self.create_group(checksum='c' * 32, times_seen=2)
group4 = self.create_group(checksum='d' * 32)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}&id={group3.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
group3=group3,
)
response = self.client.put(url, data={
'merge': '1',
}, format='json')
assert response.status_code == 200
assert response.data == {
'merge': True,
}
assert len(merge_group.mock_calls) == 2
merge_group.delay.assert_any_call(from_object_id=group1.id, to_object_id=group2.id)
merge_group.delay.assert_any_call(from_object_id=group3.id, to_object_id=group2.id)
class GroupDeleteTest(APITestCase):
def test_global_is_forbidden(self):
project = self.project
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.delete(url, data={
'status': 'resolved',
}, format='json')
assert response.status_code == 400
def test_delete_by_id(self):
project = self.project
group1 = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
group2 = self.create_group(checksum='b' * 32, status=GroupStatus.UNRESOLVED)
group3 = self.create_group(checksum='c' * 32, status=GroupStatus.MUTED)
group4 = self.create_group(
project=self.create_project(slug='foo'),
checksum='b' * 32, status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}&group4={group4.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
group4=group4,
)
with self.tasks():
response = self.client.delete(url, format='json')
assert response.status_code == 204
new_group1 = Group.objects.filter(id=group1.id)
assert not new_group1.exists()
new_group2 = Group.objects.filter(id=group2.id)
assert not new_group2.exists()
new_group3 = Group.objects.filter(id=group3.id)
assert new_group3.exists()
new_group4 = Group.objects.filter(id=group4.id)
assert new_group4.exists()
| hongliang5623/sentry | tests/sentry/api/endpoints/test_project_group_index.py | Python | bsd-3-clause | 18,112 |
from __future__ import absolute_import
import socket
from mock import patch
from kombu import common
from kombu.common import (
Broadcast, maybe_declare,
send_reply, isend_reply, collect_replies,
declaration_cached, ignore_errors,
QoS, PREFETCH_COUNT_MAX,
entry_to_queue,
)
from kombu.exceptions import StdChannelError
from .utils import TestCase
from .utils import ContextMock, Mock, MockPool
class test_ignore_errors(TestCase):
def test_ignored(self):
connection = Mock()
connection.channel_errors = (KeyError, )
connection.connection_errors = (KeyError, )
with ignore_errors(connection):
raise KeyError()
def raising():
raise KeyError()
ignore_errors(connection, raising)
connection.channel_errors = connection.connection_errors = \
()
with self.assertRaises(KeyError):
with ignore_errors(connection):
raise KeyError()
class test_declaration_cached(TestCase):
def test_when_cached(self):
chan = Mock()
chan.connection.client.declared_entities = ['foo']
self.assertTrue(declaration_cached('foo', chan))
def test_when_not_cached(self):
chan = Mock()
chan.connection.client.declared_entities = ['bar']
self.assertFalse(declaration_cached('foo', chan))
class test_Broadcast(TestCase):
def test_arguments(self):
q = Broadcast(name='test_Broadcast')
self.assertTrue(q.name.startswith('bcast.'))
self.assertEqual(q.alias, 'test_Broadcast')
self.assertTrue(q.auto_delete)
self.assertEqual(q.exchange.name, 'test_Broadcast')
self.assertEqual(q.exchange.type, 'fanout')
q = Broadcast('test_Broadcast', 'explicit_queue_name')
self.assertEqual(q.name, 'explicit_queue_name')
self.assertEqual(q.exchange.name, 'test_Broadcast')
class test_maybe_declare(TestCase):
def test_cacheable(self):
channel = Mock()
client = channel.connection.client = Mock()
client.declared_entities = set()
entity = Mock()
entity.can_cache_declaration = True
entity.auto_delete = False
entity.is_bound = True
entity.channel = channel
maybe_declare(entity, channel)
self.assertEqual(entity.declare.call_count, 1)
self.assertIn(entity, channel.connection.client.declared_entities)
maybe_declare(entity, channel)
self.assertEqual(entity.declare.call_count, 1)
entity.channel.connection = None
with self.assertRaises(StdChannelError):
maybe_declare(entity)
def test_binds_entities(self):
channel = Mock()
channel.connection.client.declared_entities = set()
entity = Mock()
entity.can_cache_declaration = True
entity.is_bound = False
entity.bind.return_value = entity
entity.bind.return_value.channel = channel
maybe_declare(entity, channel)
entity.bind.assert_called_with(channel)
def test_with_retry(self):
channel = Mock()
entity = Mock()
entity.can_cache_declaration = True
entity.is_bound = True
entity.channel = channel
maybe_declare(entity, channel, retry=True)
self.assertTrue(channel.connection.client.ensure.call_count)
class test_replies(TestCase):
def test_send_reply(self):
req = Mock()
req.content_type = 'application/json'
req.properties = {'reply_to': 'hello',
'correlation_id': 'world'}
channel = Mock()
exchange = Mock()
exchange.is_bound = True
exchange.channel = channel
producer = Mock()
producer.channel = channel
producer.channel.connection.client.declared_entities = set()
send_reply(exchange, req, {'hello': 'world'}, producer)
self.assertTrue(producer.publish.call_count)
args = producer.publish.call_args
self.assertDictEqual(args[0][0], {'hello': 'world'})
self.assertDictEqual(args[1], {'exchange': exchange,
'routing_key': 'hello',
'correlation_id': 'world',
'serializer': 'json'})
exchange.declare.assert_called_with()
@patch('kombu.common.ipublish')
def test_isend_reply(self, ipublish):
pool, exchange, req, msg, props = (Mock(), Mock(), Mock(),
Mock(), Mock())
isend_reply(pool, exchange, req, msg, props)
ipublish.assert_called_with(pool, send_reply,
(exchange, req, msg), props)
@patch('kombu.common.itermessages')
def test_collect_replies_with_ack(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
body, message = Mock(), Mock()
itermessages.return_value = [(body, message)]
it = collect_replies(conn, channel, queue, no_ack=False)
m = next(it)
self.assertIs(m, body)
itermessages.assert_called_with(conn, channel, queue, no_ack=False)
message.ack.assert_called_with()
with self.assertRaises(StopIteration):
next(it)
channel.after_reply_message_received.assert_called_with(queue.name)
@patch('kombu.common.itermessages')
def test_collect_replies_no_ack(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
body, message = Mock(), Mock()
itermessages.return_value = [(body, message)]
it = collect_replies(conn, channel, queue)
m = next(it)
self.assertIs(m, body)
itermessages.assert_called_with(conn, channel, queue, no_ack=True)
self.assertFalse(message.ack.called)
@patch('kombu.common.itermessages')
def test_collect_replies_no_replies(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
itermessages.return_value = []
it = collect_replies(conn, channel, queue)
with self.assertRaises(StopIteration):
next(it)
self.assertFalse(channel.after_reply_message_received.called)
class test_insured(TestCase):
@patch('kombu.common.logger')
def test_ensure_errback(self, logger):
common._ensure_errback('foo', 30)
self.assertTrue(logger.error.called)
def test_revive_connection(self):
on_revive = Mock()
channel = Mock()
common.revive_connection(Mock(), channel, on_revive)
on_revive.assert_called_with(channel)
common.revive_connection(Mock(), channel, None)
def test_revive_producer(self):
on_revive = Mock()
channel = Mock()
common.revive_producer(Mock(), channel, on_revive)
on_revive.assert_called_with(channel)
common.revive_producer(Mock(), channel, None)
def get_insured_mocks(self, insured_returns=('works', 'ignored')):
conn = ContextMock()
pool = MockPool(conn)
fun = Mock()
insured = conn.autoretry.return_value = Mock()
insured.return_value = insured_returns
return conn, pool, fun, insured
def test_insured(self):
conn, pool, fun, insured = self.get_insured_mocks()
ret = common.insured(pool, fun, (2, 2), {'foo': 'bar'})
self.assertEqual(ret, 'works')
conn.ensure_connection.assert_called_with(
errback=common._ensure_errback,
)
self.assertTrue(insured.called)
i_args, i_kwargs = insured.call_args
self.assertTupleEqual(i_args, (2, 2))
self.assertDictEqual(i_kwargs, {'foo': 'bar',
'connection': conn})
self.assertTrue(conn.autoretry.called)
ar_args, ar_kwargs = conn.autoretry.call_args
self.assertTupleEqual(ar_args, (fun, conn.default_channel))
self.assertTrue(ar_kwargs.get('on_revive'))
self.assertTrue(ar_kwargs.get('errback'))
def test_insured_custom_errback(self):
conn, pool, fun, insured = self.get_insured_mocks()
custom_errback = Mock()
common.insured(pool, fun, (2, 2), {'foo': 'bar'},
errback=custom_errback)
conn.ensure_connection.assert_called_with(errback=custom_errback)
def get_ipublish_args(self, ensure_returns=None):
producer = ContextMock()
pool = MockPool(producer)
fun = Mock()
ensure_returns = ensure_returns or Mock()
producer.connection.ensure.return_value = ensure_returns
return producer, pool, fun, ensure_returns
def test_ipublish(self):
producer, pool, fun, ensure_returns = self.get_ipublish_args()
ensure_returns.return_value = 'works'
ret = common.ipublish(pool, fun, (2, 2), {'foo': 'bar'})
self.assertEqual(ret, 'works')
self.assertTrue(producer.connection.ensure.called)
e_args, e_kwargs = producer.connection.ensure.call_args
self.assertTupleEqual(e_args, (producer, fun))
self.assertTrue(e_kwargs.get('on_revive'))
self.assertEqual(e_kwargs.get('errback'), common._ensure_errback)
ensure_returns.assert_called_with(2, 2, foo='bar', producer=producer)
def test_ipublish_with_custom_errback(self):
producer, pool, fun, _ = self.get_ipublish_args()
errback = Mock()
common.ipublish(pool, fun, (2, 2), {'foo': 'bar'}, errback=errback)
_, e_kwargs = producer.connection.ensure.call_args
self.assertEqual(e_kwargs.get('errback'), errback)
class MockConsumer(object):
consumers = set()
def __init__(self, channel, queues=None, callbacks=None, **kwargs):
self.channel = channel
self.queues = queues
self.callbacks = callbacks
def __enter__(self):
self.consumers.add(self)
return self
def __exit__(self, *exc_info):
self.consumers.discard(self)
class test_itermessages(TestCase):
class MockConnection(object):
should_raise_timeout = False
def drain_events(self, **kwargs):
if self.should_raise_timeout:
raise socket.timeout()
for consumer in MockConsumer.consumers:
for callback in consumer.callbacks:
callback('body', 'message')
def test_default(self):
conn = self.MockConnection()
channel = Mock()
channel.connection.client = conn
it = common.itermessages(conn, channel, 'q', limit=1,
Consumer=MockConsumer)
ret = next(it)
self.assertTupleEqual(ret, ('body', 'message'))
with self.assertRaises(StopIteration):
next(it)
def test_when_raises_socket_timeout(self):
conn = self.MockConnection()
conn.should_raise_timeout = True
channel = Mock()
channel.connection.client = conn
it = common.itermessages(conn, channel, 'q', limit=1,
Consumer=MockConsumer)
with self.assertRaises(StopIteration):
next(it)
@patch('kombu.common.deque')
def test_when_raises_IndexError(self, deque):
deque_instance = deque.return_value = Mock()
deque_instance.popleft.side_effect = IndexError()
conn = self.MockConnection()
channel = Mock()
it = common.itermessages(conn, channel, 'q', limit=1,
Consumer=MockConsumer)
with self.assertRaises(StopIteration):
next(it)
class test_entry_to_queue(TestCase):
def test_calls_Queue_from_dict(self):
with patch('kombu.common.Queue') as Queue:
entry_to_queue('name', exchange='bar')
Queue.from_dict.assert_called_with('name', exchange='bar')
class test_QoS(TestCase):
class _QoS(QoS):
def __init__(self, value):
self.value = value
QoS.__init__(self, None, value)
def set(self, value):
return value
def test_qos_exceeds_16bit(self):
with patch('kombu.common.logger') as logger:
callback = Mock()
qos = QoS(callback, 10)
qos.prev = 100
# cannot use 2 ** 32 because of a bug on OSX Py2.5:
# https://jira.mongodb.org/browse/PYTHON-389
qos.set(4294967296)
self.assertTrue(logger.warn.called)
callback.assert_called_with(prefetch_count=0)
def test_qos_increment_decrement(self):
qos = self._QoS(10)
self.assertEqual(qos.increment_eventually(), 11)
self.assertEqual(qos.increment_eventually(3), 14)
self.assertEqual(qos.increment_eventually(-30), 14)
self.assertEqual(qos.decrement_eventually(7), 7)
self.assertEqual(qos.decrement_eventually(), 6)
def test_qos_disabled_increment_decrement(self):
qos = self._QoS(0)
self.assertEqual(qos.increment_eventually(), 0)
self.assertEqual(qos.increment_eventually(3), 0)
self.assertEqual(qos.increment_eventually(-30), 0)
self.assertEqual(qos.decrement_eventually(7), 0)
self.assertEqual(qos.decrement_eventually(), 0)
self.assertEqual(qos.decrement_eventually(10), 0)
def test_qos_thread_safe(self):
qos = self._QoS(10)
def add():
for i in range(1000):
qos.increment_eventually()
def sub():
for i in range(1000):
qos.decrement_eventually()
def threaded(funs):
from threading import Thread
threads = [Thread(target=fun) for fun in funs]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
threaded([add, add])
self.assertEqual(qos.value, 2010)
qos.value = 1000
threaded([add, sub]) # n = 2
self.assertEqual(qos.value, 1000)
def test_exceeds_short(self):
qos = QoS(Mock(), PREFETCH_COUNT_MAX - 1)
qos.update()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1)
qos.increment_eventually()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX)
qos.increment_eventually()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX + 1)
qos.decrement_eventually()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX)
qos.decrement_eventually()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1)
def test_consumer_increment_decrement(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.update()
self.assertEqual(qos.value, 10)
mconsumer.qos.assert_called_with(prefetch_count=10)
qos.decrement_eventually()
qos.update()
self.assertEqual(qos.value, 9)
mconsumer.qos.assert_called_with(prefetch_count=9)
qos.decrement_eventually()
self.assertEqual(qos.value, 8)
mconsumer.qos.assert_called_with(prefetch_count=9)
self.assertIn({'prefetch_count': 9}, mconsumer.qos.call_args)
# Does not decrement 0 value
qos.value = 0
qos.decrement_eventually()
self.assertEqual(qos.value, 0)
qos.increment_eventually()
self.assertEqual(qos.value, 0)
def test_consumer_decrement_eventually(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.decrement_eventually()
self.assertEqual(qos.value, 9)
qos.value = 0
qos.decrement_eventually()
self.assertEqual(qos.value, 0)
def test_set(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.set(12)
self.assertEqual(qos.prev, 12)
qos.set(qos.prev)
| mathom/kombu | kombu/tests/test_common.py | Python | bsd-3-clause | 15,811 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 jaidev <jaidev@newton>
#
# Distributed under terms of the BSD 3-clause license.
"""The Project class."""
import os
import textwrap
import pprint
import logging
import json
from ConfigParser import RawConfigParser
import os.path as op
import yaml
import pandas as pd
import numpy as np
from pysemantic.validator import SchemaValidator, DataFrameValidator, \
ParseErrorHandler
from pysemantic.errors import MissingProject, MissingConfigError, \
ParserArgumentError
from pysemantic.loggers import setup_logging
from pysemantic.utils import TypeEncoder
from pysemantic.exporters import AerospikeExporter
try:
from yaml import CDumper as Dumper
from yaml import CLoader as Loader
except ImportError:
from yaml import Dumper
from yaml import Loader
CONF_FILE_NAME = os.environ.get("PYSEMANTIC_CONFIG", "pysemantic.conf")
logger = logging.getLogger(__name__)
def locate_config_file():
"""Locates the configuration file used by semantic.
:return: Path of the pysemantic config file.
:rtype: str
:Example:
>>> locate_config_file()
'/home/username/pysemantic.conf'
"""
paths = [op.join(os.getcwd(), CONF_FILE_NAME),
op.join(op.expanduser('~'), CONF_FILE_NAME)]
for path in paths:
if op.exists(path):
logger.info("Config file found at {0}".format(path))
return path
raise MissingConfigError("No pysemantic configuration file was fount at"
" {0} or {1}".format(*paths))
def get_default_specfile(project_name):
"""Returns the specifications file used by the given project. The \
configuration file is searched for first in the current directory \
and then in the home directory.
:param project_name: Name of the project for which to get the spcfile.
:type project_name: str
:return: Path to the data dictionary of the project.
:rtype: str
:Example:
>>> get_default_specfile('skynet')
'/home/username/projects/skynet/schema.yaml'
"""
path = locate_config_file()
parser = RawConfigParser()
parser.read(path)
return parser.get(project_name, 'specfile')
def add_project(project_name, specfile):
"""Add a project to the global configuration file.
:param project_name: Name of the project
:param specfile: path to the data dictionary used by the project.
:type project_name: str
:type specfile: str
:return: None
"""
if not op.isabs(specfile):
raise ValueError("Path to the schema should be absolute.")
path = locate_config_file()
parser = RawConfigParser()
parser.read(path)
parser.add_section(project_name)
parser.set(project_name, "specfile", specfile)
with open(path, "w") as f:
parser.write(f)
def add_dataset(project_name, dataset_name, dataset_specs):
"""Add a dataset to a project.
:param project_name: Name of the project to which the dataset is to be \
added.
:param dataset_name: Name of the dataset to be added.
:param dataset_specs: Specifications of the dataset.
:type project_name: str
:type dataset_name: str
:type dataset_specs: dict
:return: None
"""
data_dict = get_default_specfile(project_name)
with open(data_dict, "r") as f:
spec = yaml.load(f, Loader=Loader)
spec[dataset_name] = dataset_specs
with open(data_dict, "w") as f:
yaml.dump(spec, f, Dumper=Dumper, default_flow_style=False)
def remove_dataset(project_name, dataset_name):
"""Removes a dataset from a project.
:param project_name: Name of the project
:param dataset_name: Name of the dataset to remove
:type project_name: str
:type dataset_name: str
:return: None
"""
data_dict = get_default_specfile(project_name)
with open(data_dict, "r") as f:
spec = yaml.load(f, Loader=Loader)
del spec[dataset_name]
with open(data_dict, "w") as f:
yaml.dump(spec, f, Dumper=Dumper, default_flow_style=False)
def get_datasets(project_name=None):
"""Get names of all datasets registered under the project `project_name`.
:param project_name: name of the projects to list the datasets from. If \
`None` (default), datasets under all projects are returned.
:type project_name: str
:return: List of datasets listed under `project_name`, or if \
`project_name` is `None`, returns dictionary such that \
{project_name: [list of projects]}
:rtype: dict or list
:Example:
>>> get_datasets('skynet')
['sarah_connor', 'john_connor', 'kyle_reese']
>>> get_datasets()
{'skynet': ['sarah_connor', 'john_connor', 'kyle_reese'],
'south park': ['stan', 'kyle', 'cartman', 'kenny']}
"""
if project_name is not None:
specs = get_schema_specs(project_name)
return specs.keys()
else:
dataset_names = {}
projects = get_projects()
for project_name, _ in projects:
dataset_names[project_name] = get_datasets(project_name)
return dataset_names
def set_schema_fpath(project_name, schema_fpath):
"""Set the schema path for a given project.
:param project_name: Name of the project
:param schema_fpath: path to the yaml file to be used as the schema for \
the project.
:type project_name: str
:type schema_fpath: str
:return: True, if setting the schema path was successful.
:Example:
>>> set_schema_fpath('skynet', '/path/to/new/schema.yaml')
True
"""
path = locate_config_file()
parser = RawConfigParser()
parser.read(path)
if project_name in parser.sections():
if not parser.remove_option(project_name, "specfile"):
raise MissingProject
else:
parser.set(project_name, "specfile", schema_fpath)
with open(path, "w") as f:
parser.write(f)
return True
raise MissingProject
def get_projects():
"""Get the list of projects currently registered with pysemantic as a
list.
:return: List of tuples, such that each tuple is (project_name, \
location_of_specfile)
:rtype: list
:Example:
>>> get_projects()
['skynet', 'south park']
"""
path = locate_config_file()
parser = RawConfigParser()
parser.read(path)
projects = []
for section in parser.sections():
project_name = section
specfile = parser.get(section, "specfile")
projects.append((project_name, specfile))
return projects
def get_schema_specs(project_name, dataset_name=None):
"""Get the specifications of a dataset as specified in the schema.
:param project_name: Name of project
:param dataset_name: name of the dataset for which to get the schema. If \
None (default), schema for all datasets is returned.
:type project_name: str
:type dataset_name: str
:return: schema for dataset
:rtype: dict
:Example:
>>> get_schema_specs('skynet')
{'sarah connor': {'path': '/path/to/sarah_connor.csv',
'delimiter': ','},
'kyle reese': {'path': '/path/to/kyle_reese.tsv',
'delimiter':, '\t'}
'john connor': {'path': '/path/to/john_connor.txt',
'delimiter':, ' '}
}
"""
schema_file = get_default_specfile(project_name)
with open(schema_file, "r") as f:
specs = yaml.load(f, Loader=Loader)
if dataset_name is not None:
return specs[dataset_name]
return specs
def set_schema_specs(project_name, dataset_name, **kwargs):
"""Set the schema specifications for a dataset.
:param project_name: Name of the project containing the dataset.
:param dataset_name: Name of the dataset of which the schema is being set.
:param kwargs: Schema fields that are dumped into the schema files.
:type project_name: str
:type dataset_name: str
:return: None
:Example:
>>> set_schema_specs('skynet', 'kyle reese',
path='/path/to/new/file.csv', delimiter=new_delimiter)
"""
schema_file = get_default_specfile(project_name)
with open(schema_file, "r") as f:
specs = yaml.load(f, Loader=Loader)
for key, value in kwargs.iteritems():
specs[dataset_name][key] = value
with open(schema_file, "w") as f:
yaml.dump(specs, f, Dumper=Dumper, default_flow_style=False)
def view_projects():
"""View a list of all projects currently registered with pysemantic.
:Example:
>>> view_projects()
Project skynet with specfile at /path/to/skynet.yaml
Project south park with specfile at /path/to/south_park.yaml
"""
projects = get_projects()
if len(projects) > 0:
for project_name, specfile in projects:
print "Project {0} with specfile at {1}".format(project_name,
specfile)
else:
msg = textwrap.dedent("""\
No projects found. You can add projects using the
$ semantic list
command.
""")
print msg
def remove_project(project_name):
"""Remove a project from the global configuration file.
:param project_name: Name of the project to remove.
:type project_name: str
:return: True if the project existed
:rtype: bool
:Example:
>>> view_projects()
Project skynet with specfile at /path/to/skynet.yaml
Project south park with specfile at /path/to/south_park.yaml
>>> remove_project('skynet')
>>> view_projects()
Project south park with specfile at /path/to/south_park.yaml
"""
path = locate_config_file()
parser = RawConfigParser()
parser.read(path)
result = parser.remove_section(project_name)
if result:
with open(path, "w") as f:
parser.write(f)
return result
class Project(object):
"""The Project class, the entry point for most things in this module."""
def __init__(self, project_name=None, parser=None, schema=None):
"""The Project class.
:param project_name: Name of the project as specified in the \
pysemantic configuration file. If this is ``None``, then the
``schema`` parameter is expected to contain the schema
dictionary. (see below)
:param parser: The parser to be used for reading dataset files. The \
default is `pandas.read_table`.
:param schema: Dictionary containing the schema for the project. When
this argument is supplied (not ``None``), the ``project_name`` is
ignored, no specfile is read, and all the specifications for the data
are inferred from this dictionary.
"""
if project_name is not None:
setup_logging(project_name)
self.project_name = project_name
self.specfile = get_default_specfile(self.project_name)
logger.info(
"Schema for project {0} found at {1}".format(project_name,
self.specfile))
else:
setup_logging("no_name")
logger.info("Schema defined by user at runtime. Not reading any "
"specfile.")
self.specfile = None
self.validators = {}
if parser is not None:
self.user_specified_parser = True
else:
self.user_specified_parser = False
self.parser = parser
if self.specfile is not None:
with open(self.specfile, 'r') as f:
specifications = yaml.load(f, Loader=Loader)
else:
specifications = schema
self.column_rules = {}
self.df_rules = {}
for name, specs in specifications.iteritems():
self.column_rules[name] = specs.get('column_rules', {})
self.df_rules[name] = specs.get('dataframe_rules', {})
self.specifications = specifications
def export_dataset(self, dataset_name, dataframe=None, outpath=None):
"""Export a dataset to an exporter defined in the schema. If nothing is
specified in the schema, simply export to a CSV file such named
<dataset_name>.csv
:param dataset_name: Name of the dataset to exporter.
:param dataframe: Pandas dataframe to export. If None (default), this \
dataframe is loaded using the `load_dataset` method.
:type dataset_name: Str
"""
if dataframe is None:
dataframe = self.load_dataset(dataset_name)
config = self.specifications[dataset_name].get('exporter')
if outpath is None:
outpath = dataset_name + ".csv"
if config is not None:
if config['kind'] == "aerospike":
config['namespace'] = self.project_name
config['set'] = dataset_name
exporter = AerospikeExporter(config, dataframe)
exporter.run()
else:
suffix = outpath.split('.')[-1]
if suffix in ("h5", "hdf"):
group = r'/{0}/{1}'.format(self.project_name, dataset_name)
dataframe.to_hdf(outpath, group)
elif suffix == "csv":
dataframe.to_csv(outpath, index=False)
def reload_data_dict(self):
"""Reload the data dictionary and re-populate the schema."""
with open(self.specfile, "r") as f:
specifications = yaml.load(f, Loader=Loader)
self.validators = {}
self.column_rules = {}
self.df_rules = {}
logger.info("Reloading project information.")
self.specifications = specifications
for name, specs in specifications.iteritems():
logger.info("Schema for dataset {0}:".format(name))
logger.info(json.dumps(specs, cls=TypeEncoder))
self._init_validate(name)
self.column_rules[name] = specs.get('column_rules', {})
self.df_rules[name] = specs.get('dataframe_rules', {})
@property
def datasets(self):
""""List the datasets registered under the parent project.
:Example:
>>> project = Project('skynet')
>>> project.datasets
['sarah connor', 'john connor', 'kyle reese']
"""
return self.specifications.keys()
def _init_validate(self, dataset_name):
"""Given a dataset name, create a SchemaValidator object and add to the
cache.
:param dataset_name: Name of the dataset
"""
specs = self.specifications.get(dataset_name)
is_pickled = specs.get("pickle", False)
if self.specfile is not None:
validator = SchemaValidator.from_specfile(specfile=self.specfile,
name=dataset_name,
is_pickled=is_pickled)
else:
validator = SchemaValidator(specification=specs,
name=dataset_name,
is_pickled=is_pickled)
self.validators[dataset_name] = validator
def get_dataset_specs(self, dataset_name):
"""Returns the specifications for the specified dataset in the project.
:param dataset_name: Name of the dataset
:type dataset_name: str
:return: Parser arguments required to import the dataset in pandas.
:rtype: dict
"""
if dataset_name not in self.validators:
self._init_validate(dataset_name)
return self.validators[dataset_name].get_parser_args()
def get_project_specs(self):
"""Returns a dictionary containing the schema for all datasets listed
under this project.
:return: Parser arguments for all datasets listed under the project.
:rtype: dict
"""
specs = {}
for name, basespecs in self.specifications.iteritems():
if name not in self.validators:
self._init_validate(name)
validator = self.validators[name]
specs[name] = validator.get_parser_args()
return specs
def view_dataset_specs(self, dataset_name):
"""Pretty print the specifications for a dataset.
:param dataset_name: Name of the dataset
:type dataset_name: str
"""
specs = self.get_dataset_specs(dataset_name)
pprint.pprint(specs)
def update_dataset(self, dataset_name, dataframe, path=None, **kwargs):
"""This is tricky."""
org_specs = self.get_dataset_specs(dataset_name)
if path is None:
path = org_specs['filepath_or_buffer']
sep = kwargs.get('sep', org_specs['sep'])
index = kwargs.get('index', False)
dataframe.to_csv(path, sep=sep, index=index)
dtypes = {}
for col in dataframe:
dtype = dataframe[col].dtype
if dtype == np.dtype('O'):
dtypes[col] = str
elif dtype == np.dtype('float'):
dtypes[col] = float
elif dtype == np.dtype('int'):
dtypes[col] = int
else:
dtypes[col] = dtype
new_specs = {'path': path, 'delimiter': sep, 'dtypes': dtypes}
with open(self.specfile, "r") as fid:
specs = yaml.load(fid, Loader=Loader)
dataset_specs = specs[dataset_name]
dataset_specs.update(new_specs)
if "column_rules" in dataset_specs:
col_rules = dataset_specs['column_rules']
cols_to_remove = []
for colname in col_rules.iterkeys():
if colname not in dataframe.columns:
cols_to_remove.append(colname)
for colname in cols_to_remove:
del col_rules[colname]
logger.info("Attempting to update schema for dataset {0} to:".format(
dataset_name))
logger.info(json.dumps(dataset_specs, cls=TypeEncoder))
with open(self.specfile, "w") as fid:
yaml.dump(specs, fid, Dumper=Dumper,
default_flow_style=False)
def _sql_read(self, parser_args):
if parser_args.get('table_name'):
if parser_args.get('query'):
return pd.read_sql_query(sql=parser_args.get('query'),
con=parser_args['con'])
return pd.read_sql_table(
table_name=parser_args.get('table_name'),
con=parser_args.get('con'),
columns=parser_args.get('use_columns'),
index_col=parser_args.get('index_col')
)
elif parser_args.get('query'):
return pd.read_sql_query(sql=parser_args.get('query'),
con=parser_args['con'])
def _sql_iterator(self, parser_args):
dfs = []
if parser_args.get('table_name'):
if parser_args.get('query'):
iterator = pd.read_sql_query(sql=parser_args.get('query'),
con=parser_args['con'],
chunksize=parser_args['chunksize'])
else:
iterator = pd.read_sql_table(
table_name=parser_args.get('table_name'),
con=parser_args.get('con'),
chunksize=parser_args.get('chunksize'),
columns=parser_args.get('use_columns'),
index_col=parser_args.get('index_col')
)
else:
iterator = pd.read_sql_query(sql=parser_args.get('query'),
con=parser_args['con'],
chunksize=parser_args['chunksize'])
while True:
try:
dfs.append(iterator.next())
except StopIteration:
break
except Exception as err:
logger.debug("SQL iterator failed: {}".format(err))
break
dfs.append(None)
return pd.concat(dfs)
def load_dataset(self, dataset_name):
"""Load and return a dataset.
:param dataset_name: Name of the dataset
:type dataset_name: str
:return: A pandas DataFrame containing the dataset.
:rtype: pandas.DataFrame
:Example:
>>> demo_project = Project('pysemantic_demo')
>>> iris = demo_project.load_dataset('iris')
>>> type(iris)
pandas.core.DataFrame
"""
if dataset_name not in self.validators:
self._init_validate(dataset_name)
validator = self.validators[dataset_name]
column_rules = self.column_rules.get(dataset_name, {})
df_rules = self.df_rules.get(dataset_name, {})
parser_args = validator.get_parser_args()
df_rules.update(validator.df_rules)
logger.info("Attempting to load dataset {} with args:".format(
dataset_name))
if validator.is_spreadsheet:
parser_args.pop('usecols', None)
logger.info(json.dumps(parser_args, cls=TypeEncoder))
if isinstance(parser_args, dict):
if validator.is_mysql or validator.is_postgresql:
if not (
parser_args.get('table_name') or parser_args.get('query')):
raise ParserArgumentError(
"No table_name or query was provided for the "
"postgres configuration.")
elif validator.sql_validator.chunksize is not None:
df = self._sql_iterator(parser_args)
else:
df = self._sql_read(parser_args)
else:
with ParseErrorHandler(parser_args, self) as handler:
df = handler.load()
if df is None:
raise ParserArgumentError("No valid parser arguments were " +
"inferred from the schema.")
if validator.is_spreadsheet and isinstance(validator.sheetname,
list):
df = pd.concat(df.itervalues(), axis=0)
logger.info("Success!")
df_validator = DataFrameValidator(data=df, rules=df_rules,
column_rules=column_rules)
logger.info("Commence cleaning dataset:")
logger.info("DataFrame rules:")
logger.info(json.dumps(df_rules, cls=TypeEncoder))
logger.info("Column rules:")
logger.info(json.dumps(column_rules, cls=TypeEncoder))
return df_validator.clean()
else:
dfs = []
for argset in parser_args:
with ParseErrorHandler(argset, self) as handler:
_df = handler.load()
df_validator = DataFrameValidator(data=_df,
column_rules=column_rules)
dfs.append(df_validator.clean())
df = pd.concat(dfs, axis=0)
return df.set_index(np.arange(df.shape[0]))
def load_datasets(self):
"""Load and return all datasets.
:return: dictionary like {dataset_name: dataframe}
:rtype: dict
"""
datasets = {}
for name in self.specifications.iterkeys():
if name not in self.validators:
self._init_validate(name)
datasets[name] = self.load_dataset(name)
return datasets
| jaidevd/pysemantic | pysemantic/project.py | Python | bsd-3-clause | 23,738 |
from __future__ import print_function
import unittest
import abstract_rendering.util as util
class EmptyListTests(unittest.TestCase):
def test_without_length(self):
ls = util.EmptyList()
self.assertIsNone(ls[0])
self.assertIsNone(ls[-1])
self.assertIsNone(ls[30])
self.assertIsNone(ls[9048])
self.assertIsNone(ls[100398384])
self.assertIsNone(ls[3])
self.assertIsNone(ls[490])
def test_with_length(self):
ls = util.EmptyList(7)
self.assertIsNone(ls[0])
self.assertIsNone(ls[1])
self.assertIsNone(ls[2])
self.assertIsNone(ls[3])
self.assertIsNone(ls[4])
self.assertIsNone(ls[5])
self.assertIsNone(ls[6])
self.assertRaises(IndexError, ls.__getitem__, -1)
self.assertRaises(IndexError, ls.__getitem__, 8)
class ColorTest(unittest.TestCase):
def _test(self, r, g, b, a):
c = util.Color(r, g, b, a)
self.assertEqual(util.Color(r, g, b, a), c)
self.assertEqual(c, [r, g, b, a])
def test1(self): self._test(0, 0, 0, 0)
def test2(self): self._test(10, 30, 40, 20)
def test3(self): self._test(255, 255, 255, 255)
def test_RedError(self):
self.assertRaises(ValueError, util.Color, 256, 0, 0, 0)
self.assertRaises(ValueError, util.Color, -1, 0, 0, 0)
def test_GreenError(self):
self.assertRaises(ValueError, util.Color, 0, 256, 0, 0)
self.assertRaises(ValueError, util.Color, 0, -1, 0, 0)
def test_BlueError(self):
self.assertRaises(ValueError, util.Color, 0, 0, 256, 0)
self.assertRaises(ValueError, util.Color, 0, 0, -1, 0)
def test_AlphaError(self):
self.assertRaises(ValueError, util.Color, 0, 0, 0, 256)
self.assertRaises(ValueError, util.Color, 0, 0, 0, -1)
class ZoomFitTest(unittest.TestCase):
def test_scale(self):
self.assertEqual(util.zoom_fit((10, 10), (0, 0, 10, 10)),
[0., 0., 1., 1.])
self.assertEqual(util.zoom_fit((10, 10), (0, 0, 20, 20)),
[0., 0., .5, .5])
self.assertEqual(util.zoom_fit((10, 10), (0, 0, 5, 5)),
[0., 0., 2., 2.])
self.assertEqual(util.zoom_fit((10, 10), (0, 0, 10, 20)),
[0., 0., .5, .5])
self.assertEqual(util.zoom_fit((10, 10), (0, 0, 10, 20), False),
[0., 0., 1., .5])
def test_pan(self):
self.assertEqual(util.zoom_fit((10, 10), (0, 0, 10, 10)),
[0., 0., 1., 1.])
self.assertEqual(util.zoom_fit((10, 10), (5, 5, 10, 10)),
[-5., -5., 1., 1.])
self.assertEqual(util.zoom_fit((10, 10), (-4, -7, 10, 10)),
[4., 7., 1., 1.])
def test_pan_scale(self):
self.assertEqual(util.zoom_fit((10, 10), (0, 0, 20, 20)),
[0., 0., .5, .5])
self.assertEqual(util.zoom_fit((10, 10), (5, 5, 20, 20)),
[-2.5, -2.5, .5, .5])
self.assertEqual(util.zoom_fit((10, 10), (-4, -7, 20, 20)),
[2., 3.5, .5, .5])
if __name__ == '__main__':
unittest.main()
| kcompher/abstract_rendering | abstract_rendering/test/utilTests.py | Python | bsd-3-clause | 3,232 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from operator import or_
from copy import deepcopy
from itertools import combinations
from functools import reduce
from collections import defaultdict
import numpy as np
from scipy.stats import pearsonr
from future.builtins import zip
from six import StringIO
from skbio._base import SkbioObject
from skbio.stats.distance import DistanceMatrix
from ._exception import (NoLengthError, DuplicateNodeError, NoParentError,
MissingNodeError, TreeError)
def distance_from_r(m1, m2):
r"""Estimates distance as (1-r)/2: neg correl = max distance
Parameters
----------
m1 : DistanceMatrix
a distance matrix to compare
m2 : DistanceMatrix
a distance matrix to compare
Returns
-------
float
The distance between m1 and m2
"""
return (1-pearsonr(m1.data.flat, m2.data.flat)[0])/2
class TreeNode(SkbioObject):
r"""Representation of a node within a tree
A `TreeNode` instance stores links to its parent and optional children
nodes. In addition, the `TreeNode` can represent a `length` (e.g., a
branch length) between itself and its parent. Within this object, the use
of "children" and "descendants" is frequent in the documentation. A child
is a direct descendant of a node, while descendants are all nodes that are
below a given node (e.g., grand-children, etc).
Parameters
----------
name : str or None
A node can have a name. It is common for tips in particular to have
names, for instance, in a phylogenetic tree where the tips correspond
to species.
length : float, int, or None
Distances between nodes can be used to represent evolutionary
distances, time, etc.
parent : TreeNode or None
Connect this node to a parent
children : list of TreeNode or None
Connect this node to existing children
Attributes
----------
name
length
parent
children
id
"""
default_write_format = 'newick'
_exclude_from_copy = set(['parent', 'children', '_tip_cache',
'_non_tip_cache'])
def __init__(self, name=None, length=None, parent=None, children=None):
self.name = name
self.length = length
self.parent = parent
self._tip_cache = {}
self._non_tip_cache = {}
self._registered_caches = set()
self.children = []
self.id = None
if children is not None:
self.extend(children)
def __repr__(self):
r"""Returns summary of the tree
Returns
-------
str
A summary of this node and all descendants
Notes
-----
This method returns the name of the node and a count of tips and the
number of internal nodes in the tree
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c, d)root;"))
>>> repr(tree)
'<TreeNode, name: root, internal node count: 1, tips count: 3>'
"""
nodes = [n for n in self.traverse(include_self=False)]
n_tips = sum([n.is_tip() for n in nodes])
n_nontips = len(nodes) - n_tips
classname = self.__class__.__name__
name = self.name if self.name is not None else "unnamed"
return "<%s, name: %s, internal node count: %d, tips count: %d>" % \
(classname, name, n_nontips, n_tips)
def __str__(self):
r"""Returns string version of self, with names and distances
Returns
-------
str
Returns a Newick representation of the tree
See Also
--------
read
write
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c);"))
>>> str(tree)
'((a,b)c);\n'
"""
fh = StringIO()
self.write(fh)
string = fh.getvalue()
fh.close()
return string
def __iter__(self):
r"""Node iter iterates over the `children`."""
return iter(self.children)
def __len__(self):
return len(self.children)
def __getitem__(self, i):
r"""Node delegates slicing to `children`."""
return self.children[i]
def _adopt(self, node):
r"""Update `parent` references but does NOT update `children`."""
self.invalidate_caches()
if node.parent is not None:
node.parent.remove(node)
node.parent = self
return node
def append(self, node):
r"""Appends a node to `children`, in-place, cleaning up refs
`append` will invalidate any node lookup caches, remove an existing
parent on `node` if one exists, set the parent of `node` to self
and add the `node` to `self` `children`.
Parameters
----------
node : TreeNode
An existing TreeNode object
See Also
--------
extend
Examples
--------
>>> from skbio import TreeNode
>>> root = TreeNode(name="root")
>>> child1 = TreeNode(name="child1")
>>> child2 = TreeNode(name="child2")
>>> root.append(child1)
>>> root.append(child2)
>>> print(root)
(child1,child2)root;
<BLANKLINE>
"""
self.children.append(self._adopt(node))
def extend(self, nodes):
r"""Append a `list` of `TreeNode` to `self`.
`extend` will invalidate any node lookup caches, remove existing
parents of the `nodes` if they have any, set their parents to self
and add the nodes to `self` `children`.
Parameters
----------
nodes : list of TreeNode
A list of TreeNode objects
See Also
--------
append
Examples
--------
>>> from skbio import TreeNode
>>> root = TreeNode(name="root")
>>> root.extend([TreeNode(name="child1"), TreeNode(name="child2")])
>>> print(root)
(child1,child2)root;
<BLANKLINE>
"""
self.children.extend([self._adopt(n) for n in nodes[:]])
def pop(self, index=-1):
r"""Remove a `TreeNode` from `self`.
Remove a child node by its index position. All node lookup caches
are invalidated, and the parent reference for the popped node will be
set to `None`.
Parameters
----------
index : int
The index position in `children` to pop
Returns
-------
TreeNode
The popped child
See Also
--------
remove
remove_deleted
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("(a,b)c;"))
>>> print(tree.pop(0))
a;
<BLANKLINE>
"""
return self._remove_node(index)
def _remove_node(self, idx):
r"""The actual (and only) method that performs node removal"""
self.invalidate_caches()
node = self.children.pop(idx)
node.parent = None
return node
def remove(self, node):
r"""Remove a node from self
Remove a `node` from `self` by identity of the node.
Parameters
----------
node : TreeNode
The node to remove from self's children
Returns
-------
bool
`True` if the node was removed, `False` otherwise
See Also
--------
pop
remove_deleted
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("(a,b)c;"))
>>> tree.remove(tree.children[0])
True
"""
for (i, curr_node) in enumerate(self.children):
if curr_node is node:
self._remove_node(i)
return True
return False
def remove_deleted(self, func):
r"""Delete nodes in which `func(node)` evaluates `True`.
Remove all descendants from `self` that evaluate `True` from `func`.
This has the potential to drop clades.
Parameters
----------
func : a function
A function that evaluates `True` when a node should be deleted
See Also
--------
pop
remove
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("(a,b)c;"))
>>> tree.remove_deleted(lambda x: x.name == 'b')
>>> print(tree)
(a)c;
<BLANKLINE>
"""
for node in self.traverse(include_self=False):
if func(node):
node.parent.remove(node)
def prune(self):
r"""Reconstructs correct topology after nodes have been removed.
Internal nodes with only one child will be removed and new connections
will be made to reflect change. This method is useful to call
following node removals as it will clean up nodes with singular
children.
Names and properties of singular children will override the names and
properties of their parents following the prune.
Node lookup caches are invalidated.
See Also
--------
shear
remove
pop
remove_deleted
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
>>> to_delete = tree.find('b')
>>> tree.remove_deleted(lambda x: x == to_delete)
>>> print(tree)
((a)c,(d,e)f)root;
<BLANKLINE>
>>> tree.prune()
>>> print(tree)
((d,e)f,a)root;
<BLANKLINE>
"""
# build up the list of nodes to remove so the topology is not altered
# while traversing
nodes_to_remove = []
for node in self.traverse(include_self=False):
if len(node.children) == 1:
nodes_to_remove.append(node)
# clean up the single children nodes
for node in nodes_to_remove:
child = node.children[0]
if child.length is None or node.length is None:
child.length = child.length or node.length
else:
child.length += node.length
node.parent.append(child)
node.parent.remove(node)
def shear(self, names):
"""Lop off tips until the tree just has the desired tip names.
Parameters
----------
names : Iterable of str
The tip names on the tree to keep
Returns
-------
TreeNode
The resulting tree
Raises
------
ValueError
If the names do not exist in the tree
See Also
--------
prune
remove
pop
remove_deleted
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> t = TreeNode.read(StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
>>> sheared = t.shear(['G', 'M'])
>>> print(sheared)
(G:3.0,M:3.7);
<BLANKLINE>
"""
tcopy = self.deepcopy()
all_tips = {n.name for n in tcopy.tips()}
ids = set(names)
if not ids.issubset(all_tips):
raise ValueError("ids are not a subset of the tree!")
while len(list(tcopy.tips())) != len(ids):
for n in list(tcopy.tips()):
if n.name not in ids:
n.parent.remove(n)
tcopy.prune()
return tcopy
def copy(self):
r"""Returns a copy of self using an iterative approach
Perform an iterative deepcopy of self. It is not assured that the copy
of node attributes will be performed iteratively as that depends on
the copy method of the types being copied
Returns
-------
TreeNode
A new copy of self
See Also
--------
unrooted_deepcopy
unrooted_copy
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
>>> tree_copy = tree.copy()
>>> tree_nodes = set([id(n) for n in tree.traverse()])
>>> tree_copy_nodes = set([id(n) for n in tree_copy.traverse()])
>>> print(len(tree_nodes.intersection(tree_copy_nodes)))
0
"""
def __copy_node(node_to_copy):
r"""Helper method to copy a node"""
# this is _possibly_ dangerous, we're assuming the node to copy is
# of the same class as self, and has the same exclusion criteria.
# however, it is potentially dangerous to mix TreeNode subclasses
# within a tree, so...
result = self.__class__()
efc = self._exclude_from_copy
for key in node_to_copy.__dict__:
if key not in efc:
result.__dict__[key] = deepcopy(node_to_copy.__dict__[key])
return result
root = __copy_node(self)
nodes_stack = [[root, self, len(self.children)]]
while nodes_stack:
# check the top node, any children left unvisited?
top = nodes_stack[-1]
new_top_node, old_top_node, unvisited_children = top
if unvisited_children:
top[2] -= 1
old_child = old_top_node.children[-unvisited_children]
new_child = __copy_node(old_child)
new_top_node.append(new_child)
nodes_stack.append([new_child, old_child,
len(old_child.children)])
else: # no unvisited children
nodes_stack.pop()
return root
__copy__ = copy
__deepcopy__ = deepcopy = copy
def unrooted_deepcopy(self, parent=None):
r"""Walks the tree unrooted-style and returns a new copy
Perform a deepcopy of self and return a new copy of the tree as an
unrooted copy. This is useful for defining new roots of the tree as
the `TreeNode`.
This method calls `TreeNode.unrooted_copy` which is recursive.
Parameters
----------
parent : TreeNode or None
Used to avoid infinite loops when performing the unrooted traverse
Returns
-------
TreeNode
A new copy of the tree
See Also
--------
copy
unrooted_copy
root_at
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,(b,c)d)e,(f,g)h)i;"))
>>> new_tree = tree.find('d').unrooted_deepcopy()
>>> print(new_tree)
(b,c,(a,((f,g)h)e)d)root;
<BLANKLINE>
"""
root = self.root()
root.assign_ids()
new_tree = root.copy()
new_tree.assign_ids()
new_tree_self = new_tree.find_by_id(self.id)
return new_tree_self.unrooted_copy(parent)
def unrooted_copy(self, parent=None):
r"""Walks the tree unrooted-style and returns a copy
Perform a copy of self and return a new copy of the tree as an
unrooted copy. This is useful for defining new roots of the tree as
the `TreeNode`.
This method is recursive.
Warning, this is _NOT_ a deepcopy
Parameters
----------
parent : TreeNode or None
Used to avoid infinite loops when performing the unrooted traverse
Returns
-------
TreeNode
A new copy of the tree
See Also
--------
copy
unrooted_deepcopy
root_at
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,(b,c)d)e,(f,g)h)i;"))
>>> new_tree = tree.find('d').unrooted_copy()
>>> print(new_tree)
(b,c,(a,((f,g)h)e)d)root;
<BLANKLINE>
"""
neighbors = self.neighbors(ignore=parent)
children = [c.unrooted_copy(parent=self) for c in neighbors]
# we might be walking UP the tree, so:
if parent is None:
# base edge
edgename = None
length = None
elif parent.parent is self:
# self's parent is becoming self's child
edgename = parent.name
length = parent.length
else:
assert parent is self.parent
edgename = self.name
length = self.length
result = self.__class__(name=edgename, children=children,
length=length)
if parent is None:
result.name = "root"
return result
def count(self, tips=False):
"""Get the count of nodes in the tree
Parameters
----------
tips : bool
If `True`, only return the count of the number of tips
Returns
-------
int
The number of nodes or tips
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,(b,c)d)e,(f,g)h)i;"))
>>> print(tree.count())
9
>>> print(tree.count(tips=True))
5
"""
if tips:
return len(list(self.tips()))
else:
return len(list(self.traverse(include_self=True)))
def subtree(self, tip_list=None):
r"""Make a copy of the subtree"""
raise NotImplementedError()
def subset(self):
r"""Returns set of names that descend from specified node
Get the set of `name` on tips that descend from this node.
Returns
-------
frozenset
The set of names at the tips of the clade that descends from self
See Also
--------
subsets
compare_subsets
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,(b,c)d)e,(f,g)h)i;"))
>>> sorted(tree.subset())
['a', 'b', 'c', 'f', 'g']
"""
return frozenset({i.name for i in self.tips()})
def subsets(self):
r"""Return all sets of names that come from self and its descendants
Compute all subsets of tip names over `self`, or, represent a tree as a
set of nested sets.
Returns
-------
frozenset
A frozenset of frozensets of str
See Also
--------
subset
compare_subsets
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("(((a,b)c,(d,e)f)h)root;"))
>>> for s in sorted(tree.subsets()):
... print(sorted(s))
['a', 'b']
['d', 'e']
['a', 'b', 'd', 'e']
"""
sets = []
for i in self.postorder(include_self=False):
if not i.children:
i.__leaf_set = frozenset([i.name])
else:
leaf_set = reduce(or_, [c.__leaf_set for c in i.children])
if len(leaf_set) > 1:
sets.append(leaf_set)
i.__leaf_set = leaf_set
return frozenset(sets)
def root_at(self, node):
r"""Return a new tree rooted at the provided node.
This can be useful for drawing unrooted trees with an orientation that
reflects knowledge of the true root location.
Parameters
----------
node : TreeNode or str
The node to root at
Returns
-------
TreeNode
A new copy of the tree
Raises
------
TreeError
Raises a `TreeError` if a tip is specified as the new root
See Also
--------
root_at_midpoint
unrooted_deepcopy
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("(((a,b)c,(d,e)f)g,h)i;"))
>>> print(tree.root_at('c'))
(a,b,((d,e)f,(h)g)c)root;
<BLANKLINE>
"""
if isinstance(node, str):
node = self.find(node)
if not node.children:
raise TreeError("Can't use a tip (%s) as the root" %
repr(node.name))
return node.unrooted_deepcopy()
def root_at_midpoint(self):
r"""Return a new tree rooted at midpoint of the two tips farthest apart
This method doesn't preserve the internal node naming or structure,
but does keep tip to tip distances correct. Uses `unrooted_copy` but
operates on a full copy of the tree.
Raises
------
TreeError
If a tip ends up being the mid point
Returns
-------
TreeNode
A tree rooted at its midpoint
LengthError
Midpoint rooting requires `length` and will raise (indirectly) if
evaluated nodes don't have length.
See Also
--------
root_at
unrooted_deepcopy
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("(((d:1,e:1,(g:1)f:1)c:1)b:1,h:1)"
... "a:1;"))
>>> print(tree.root_at_midpoint())
((d:1.0,e:1.0,(g:1.0)f:1.0)c:0.5,((h:1.0)b:1.0):0.5)root;
<BLANKLINE>
"""
tree = self.copy()
max_dist, tips = tree.get_max_distance()
half_max_dist = max_dist / 2.0
if max_dist == 0.0: # only pathological cases with no lengths
return tree
tip1 = tree.find(tips[0])
tip2 = tree.find(tips[1])
lca = tree.lowest_common_ancestor([tip1, tip2])
if tip1.accumulate_to_ancestor(lca) > half_max_dist:
climb_node = tip1
else:
climb_node = tip2
dist_climbed = 0.0
while dist_climbed + climb_node.length < half_max_dist:
dist_climbed += climb_node.length
climb_node = climb_node.parent
# now midpt is either at on the branch to climb_node's parent
# or midpt is at climb_node's parent
if dist_climbed + climb_node.length == half_max_dist:
# climb to midpoint spot
climb_node = climb_node.parent
if climb_node.is_tip():
raise TreeError('error trying to root tree at tip')
else:
return climb_node.unrooted_copy()
else:
# make a new node on climb_node's branch to its parent
old_br_len = climb_node.length
new_root = tree.__class__()
climb_node.parent.append(new_root)
new_root.append(climb_node)
climb_node.length = half_max_dist - dist_climbed
new_root.length = old_br_len - climb_node.length
return new_root.unrooted_copy()
def is_tip(self):
r"""Returns `True` if the current node has no `children`.
Returns
-------
bool
`True` if the node is a tip
See Also
--------
is_root
has_children
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c);"))
>>> print(tree.is_tip())
False
>>> print(tree.find('a').is_tip())
True
"""
return not self.children
def is_root(self):
r"""Returns `True` if the current is a root, i.e. has no `parent`.
Returns
-------
bool
`True` if the node is the root
See Also
--------
is_tip
has_children
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c);"))
>>> print(tree.is_root())
True
>>> print(tree.find('a').is_root())
False
"""
return self.parent is None
def has_children(self):
r"""Returns `True` if the node has `children`.
Returns
-------
bool
`True` if the node has children.
See Also
--------
is_tip
is_root
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c);"))
>>> print(tree.has_children())
True
>>> print(tree.find('a').has_children())
False
"""
return not self.is_tip()
def traverse(self, self_before=True, self_after=False, include_self=True):
r"""Returns iterator over descendants
This is a depth-first traversal. Since the trees are not binary,
preorder and postorder traversals are possible, but inorder traversals
would depend on the data in the tree and are not handled here.
Parameters
----------
self_before : bool
includes each node before its descendants if True
self_after : bool
includes each node after its descendants if True
include_self : bool
include the initial node if True
`self_before` and `self_after` are independent. If neither is `True`,
only terminal nodes will be returned.
Note that if self is terminal, it will only be included once even if
`self_before` and `self_after` are both `True`.
Yields
------
TreeNode
Traversed node.
See Also
--------
preorder
postorder
pre_and_postorder
levelorder
tips
non_tips
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c);"))
>>> for node in tree.traverse():
... print(node.name)
None
c
a
b
"""
if self_before:
if self_after:
return self.pre_and_postorder(include_self=include_self)
else:
return self.preorder(include_self=include_self)
else:
if self_after:
return self.postorder(include_self=include_self)
else:
return self.tips(include_self=include_self)
def preorder(self, include_self=True):
r"""Performs preorder iteration over tree
Parameters
----------
include_self : bool
include the initial node if True
Yields
------
TreeNode
Traversed node.
See Also
--------
traverse
postorder
pre_and_postorder
levelorder
tips
non_tips
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c);"))
>>> for node in tree.preorder():
... print(node.name)
None
c
a
b
"""
stack = [self]
while stack:
curr = stack.pop()
if include_self or (curr is not self):
yield curr
if curr.children:
stack.extend(curr.children[::-1])
def postorder(self, include_self=True):
r"""Performs postorder iteration over tree.
This is somewhat inelegant compared to saving the node and its index
on the stack, but is 30% faster in the average case and 3x faster in
the worst case (for a comb tree).
Parameters
----------
include_self : bool
include the initial node if True
Yields
------
TreeNode
Traversed node.
See Also
--------
traverse
preorder
pre_and_postorder
levelorder
tips
non_tips
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c);"))
>>> for node in tree.postorder():
... print(node.name)
a
b
c
None
"""
child_index_stack = [0]
curr = self
curr_children = self.children
curr_children_len = len(curr_children)
while 1:
curr_index = child_index_stack[-1]
# if there are children left, process them
if curr_index < curr_children_len:
curr_child = curr_children[curr_index]
# if the current child has children, go there
if curr_child.children:
child_index_stack.append(0)
curr = curr_child
curr_children = curr.children
curr_children_len = len(curr_children)
curr_index = 0
# otherwise, yield that child
else:
yield curr_child
child_index_stack[-1] += 1
# if there are no children left, return self, and move to
# self's parent
else:
if include_self or (curr is not self):
yield curr
if curr is self:
break
curr = curr.parent
curr_children = curr.children
curr_children_len = len(curr_children)
child_index_stack.pop()
child_index_stack[-1] += 1
def pre_and_postorder(self, include_self=True):
r"""Performs iteration over tree, visiting node before and after
Parameters
----------
include_self : bool
include the initial node if True
Yields
------
TreeNode
Traversed node.
See Also
--------
traverse
postorder
preorder
levelorder
tips
non_tips
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c);"))
>>> for node in tree.pre_and_postorder():
... print(node.name)
None
c
a
b
c
None
"""
# handle simple case first
if not self.children:
if include_self:
yield self
raise StopIteration
child_index_stack = [0]
curr = self
curr_children = self.children
while 1:
curr_index = child_index_stack[-1]
if not curr_index:
if include_self or (curr is not self):
yield curr
# if there are children left, process them
if curr_index < len(curr_children):
curr_child = curr_children[curr_index]
# if the current child has children, go there
if curr_child.children:
child_index_stack.append(0)
curr = curr_child
curr_children = curr.children
curr_index = 0
# otherwise, yield that child
else:
yield curr_child
child_index_stack[-1] += 1
# if there are no children left, return self, and move to
# self's parent
else:
if include_self or (curr is not self):
yield curr
if curr is self:
break
curr = curr.parent
curr_children = curr.children
child_index_stack.pop()
child_index_stack[-1] += 1
def levelorder(self, include_self=True):
r"""Performs levelorder iteration over tree
Parameters
----------
include_self : bool
include the initial node if True
Yields
------
TreeNode
Traversed node.
See Also
--------
traverse
postorder
preorder
pre_and_postorder
tips
non_tips
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f);"))
>>> for node in tree.levelorder():
... print(node.name)
None
c
f
a
b
d
e
"""
queue = [self]
while queue:
curr = queue.pop(0)
if include_self or (curr is not self):
yield curr
if curr.children:
queue.extend(curr.children)
def tips(self, include_self=False):
r"""Iterates over tips descended from `self`.
Node order is consistent between calls and is ordered by a
postorder traversal of the tree.
Parameters
----------
include_self : bool
include the initial node if True
Yields
------
TreeNode
Traversed node.
See Also
--------
traverse
postorder
preorder
pre_and_postorder
levelorder
non_tips
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f);"))
>>> for node in tree.tips():
... print(node.name)
a
b
d
e
"""
for n in self.postorder(include_self=False):
if n.is_tip():
yield n
def non_tips(self, include_self=False):
r"""Iterates over nontips descended from self
`include_self`, if `True` (default is False), will return the current
node as part of non_tips if it is a non_tip. Node order is consistent
between calls and is ordered by a postorder traversal of the tree.
Parameters
----------
include_self : bool
include the initial node if True
Yields
------
TreeNode
Traversed node.
See Also
--------
traverse
postorder
preorder
pre_and_postorder
levelorder
tips
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f);"))
>>> for node in tree.non_tips():
... print(node.name)
c
f
"""
for n in self.postorder(include_self):
if not n.is_tip():
yield n
def invalidate_caches(self, attr=True):
r"""Delete lookup and attribute caches
Parameters
----------
attr : bool, optional
If ``True``, invalidate attribute caches created by
`TreeNode.cache_attr`.
See Also
--------
create_caches
cache_attr
find
"""
if not self.is_root():
self.root().invalidate_caches()
else:
self._tip_cache = {}
self._non_tip_cache = {}
if self._registered_caches and attr:
for n in self.traverse():
for cache in self._registered_caches:
if hasattr(n, cache):
delattr(n, cache)
def create_caches(self):
r"""Construct an internal lookups to facilitate searching by name
This method will not cache nodes in which the .name is None. This
method will raise `DuplicateNodeError` if a name conflict in the tips
is discovered, but will not raise if on internal nodes. This is
because, in practice, the tips of a tree are required to be unique
while no such requirement holds for internal nodes.
Raises
------
DuplicateNodeError
The tip cache requires that names are unique (with the exception of
names that are None)
See Also
--------
invalidate_caches
cache_attr
find
"""
if not self.is_root():
self.root().create_caches()
else:
if self._tip_cache and self._non_tip_cache:
return
self.invalidate_caches(attr=False)
tip_cache = {}
non_tip_cache = defaultdict(list)
for node in self.postorder():
name = node.name
if name is None:
continue
if node.is_tip():
if name in tip_cache:
raise DuplicateNodeError("Tip with name '%s' already "
"exists!" % name)
tip_cache[name] = node
else:
non_tip_cache[name].append(node)
self._tip_cache = tip_cache
self._non_tip_cache = non_tip_cache
def find_all(self, name):
r"""Find all nodes that match `name`
The first call to `find_all` will cache all nodes in the tree on the
assumption that additional calls to `find_all` will be made.
Parameters
----------
name : TreeNode or str
The name or node to find. If `name` is `TreeNode` then all other
nodes with the same name will be returned.
Raises
------
MissingNodeError
Raises if the node to be searched for is not found
Returns
-------
list of TreeNode
The nodes found
See Also
--------
find
find_by_id
find_by_func
Examples
--------
>>> from six import StringIO
>>> from skbio.tree import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)d,(f,g)c);"))
>>> for node in tree.find_all('c'):
... print(node.name, node.children[0].name, node.children[1].name)
c a b
c f g
>>> for node in tree.find_all('d'):
... print(node.name, str(node))
d (d,e)d;
<BLANKLINE>
d d;
<BLANKLINE>
"""
root = self.root()
# if what is being passed in looks like a node, just return it
if isinstance(name, root.__class__):
return [name]
root.create_caches()
tip = root._tip_cache.get(name, None)
nodes = root._non_tip_cache.get(name, [])
nodes.append(tip) if tip is not None else None
if not nodes:
raise MissingNodeError("Node %s is not in self" % name)
else:
return nodes
def find(self, name):
r"""Find a node by `name`.
The first call to `find` will cache all nodes in the tree on the
assumption that additional calls to `find` will be made.
`find` will first attempt to find the node in the tips. If it cannot
find a corresponding tip, then it will search through the internal
nodes of the tree. In practice, phylogenetic trees and other common
trees in biology do not have unique internal node names. As a result,
this find method will only return the first occurance of an internal
node encountered on a postorder traversal of the tree.
Parameters
----------
name : TreeNode or str
The name or node to find. If `name` is `TreeNode` then it is
simply returned
Raises
------
MissingNodeError
Raises if the node to be searched for is not found
Returns
-------
TreeNode
The found node
See Also
--------
find_all
find_by_id
find_by_func
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f);"))
>>> print(tree.find('c').name)
c
"""
root = self.root()
# if what is being passed in looks like a node, just return it
if isinstance(name, root.__class__):
return name
root.create_caches()
node = root._tip_cache.get(name, None)
if node is None:
node = root._non_tip_cache.get(name, [None])[0]
if node is None:
raise MissingNodeError("Node %s is not in self" % name)
else:
return node
def find_by_id(self, node_id):
r"""Find a node by `id`.
This search method is based from the root.
Parameters
----------
node_id : int
The `id` of a node in the tree
Returns
-------
TreeNode
The tree node with the matcing id
Notes
-----
This method does not cache id associations. A full traversal of the
tree is performed to find a node by an id on every call.
Raises
------
MissingNodeError
This method will raise if the `id` cannot be found
See Also
--------
find
find_all
find_by_func
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f);"))
>>> print(tree.find_by_id(2).name)
d
"""
# if this method gets used frequently, then we should cache by ID
# as well
root = self.root()
root.assign_ids()
node = None
for n in self.traverse(include_self=True):
if n.id == node_id:
node = n
break
if node is None:
raise MissingNodeError("ID %d is not in self" % node_id)
else:
return node
def find_by_func(self, func):
r"""Find all nodes given a function
This search method is based on the current subtree, not the root.
Parameters
----------
func : a function
A function that accepts a TreeNode and returns `True` or `False`,
where `True` indicates the node is to be yielded
Yields
------
TreeNode
Node found by `func`.
See Also
--------
find
find_all
find_by_id
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f);"))
>>> func = lambda x: x.parent == tree.find('c')
>>> [n.name for n in tree.find_by_func(func)]
['a', 'b']
"""
for node in self.traverse(include_self=True):
if func(node):
yield node
def ancestors(self):
r"""Returns all ancestors back to the root
This call will return all nodes in the path back to root, but does not
include the node instance that the call was made from.
Returns
-------
list of TreeNode
The path, toward the root, from self
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
>>> [node.name for node in tree.find('a').ancestors()]
['c', 'root']
"""
result = []
curr = self
while not curr.is_root():
result.append(curr.parent)
curr = curr.parent
return result
def root(self):
r"""Returns root of the tree `self` is in
Returns
-------
TreeNode
The root of the tree
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
>>> tip_a = tree.find('a')
>>> root = tip_a.root()
>>> root == tree
True
"""
curr = self
while not curr.is_root():
curr = curr.parent
return curr
def siblings(self):
r"""Returns all nodes that are `children` of `self` `parent`.
This call excludes `self` from the list.
Returns
-------
list of TreeNode
The list of sibling nodes relative to self
See Also
--------
neighbors
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c,(d,e,f)g)root;"))
>>> tip_e = tree.find('e')
>>> [n.name for n in tip_e.siblings()]
['d', 'f']
"""
if self.is_root():
return []
result = self.parent.children[:]
result.remove(self)
return result
def neighbors(self, ignore=None):
r"""Returns all nodes that are connected to self
This call does not include `self` in the result
Parameters
----------
ignore : TreeNode
A node to ignore
Returns
-------
list of TreeNode
The list of all nodes that are connected to self
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
>>> node_c = tree.find('c')
>>> [n.name for n in node_c.neighbors()]
['a', 'b', 'root']
"""
nodes = [n for n in self.children + [self.parent] if n is not None]
if ignore is None:
return nodes
else:
return [n for n in nodes if n is not ignore]
def lowest_common_ancestor(self, tipnames):
r"""Lowest common ancestor for a list of tips
Parameters
----------
tipnames : list of TreeNode or str
The nodes of interest
Returns
-------
TreeNode
The lowest common ancestor of the passed in nodes
Raises
------
ValueError
If no tips could be found in the tree
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
>>> nodes = [tree.find('a'), tree.find('b')]
>>> lca = tree.lowest_common_ancestor(nodes)
>>> print(lca.name)
c
>>> nodes = [tree.find('a'), tree.find('e')]
>>> lca = tree.lca(nodes) # lca is an alias for convience
>>> print(lca.name)
root
"""
if len(tipnames) == 1:
return self.find(tipnames[0])
tips = [self.find(name) for name in tipnames]
if len(tips) == 0:
raise ValueError("No tips found!")
nodes_to_scrub = []
for t in tips:
if t.is_root():
# has to be the LCA...
return t
prev = t
curr = t.parent
while curr and not hasattr(curr, 'black'):
setattr(curr, 'black', [prev])
nodes_to_scrub.append(curr)
prev = curr
curr = curr.parent
# increase black count, multiple children lead to here
if curr:
curr.black.append(prev)
curr = self
while len(curr.black) == 1:
curr = curr.black[0]
# clean up tree
for n in nodes_to_scrub:
delattr(n, 'black')
return curr
lca = lowest_common_ancestor # for convenience
@classmethod
def from_taxonomy(cls, lineage_map):
"""Construct a tree from a taxonomy
Parameters
----------
lineage_map : iterable of tuple
A id to lineage mapping where the first index is an ID and the
second index is an iterable of the lineage.
Returns
-------
TreeNode
The constructed taxonomy
Examples
--------
>>> from skbio.tree import TreeNode
>>> lineages = {'1': ['Bacteria', 'Firmicutes', 'Clostridia'],
... '2': ['Bacteria', 'Firmicutes', 'Bacilli'],
... '3': ['Bacteria', 'Bacteroidetes', 'Sphingobacteria'],
... '4': ['Archaea', 'Euryarchaeota', 'Thermoplasmata'],
... '5': ['Archaea', 'Euryarchaeota', 'Thermoplasmata'],
... '6': ['Archaea', 'Euryarchaeota', 'Halobacteria'],
... '7': ['Archaea', 'Euryarchaeota', 'Halobacteria'],
... '8': ['Bacteria', 'Bacteroidetes', 'Sphingobacteria'],
... '9': ['Bacteria', 'Bacteroidetes', 'Cytophagia']}
>>> tree = TreeNode.from_taxonomy(lineages.items())
>>> print(tree.ascii_art())
/Clostridia-1
/Firmicutes
| \Bacilli- /-2
/Bacteria|
| | /-3
| | /Sphingobacteria
| \Bacteroidetes \-8
| |
---------| \Cytophagia-9
|
| /-5
| /Thermoplasmata
| | \-4
\Archaea- /Euryarchaeota
| /-7
\Halobacteria
\-6
"""
root = cls(name=None)
root._lookup = {}
for id_, lineage in lineage_map:
cur_node = root
# for each name, see if we've seen it, if not, add that puppy on
for name in lineage:
if name in cur_node._lookup:
cur_node = cur_node._lookup[name]
else:
new_node = TreeNode(name=name)
new_node._lookup = {}
cur_node._lookup[name] = new_node
cur_node.append(new_node)
cur_node = new_node
cur_node.append(TreeNode(name=id_))
# scrub the lookups
for node in root.non_tips(include_self=True):
del node._lookup
return root
def _balanced_distance_to_tip(self):
"""Return the distance to tip from this node.
The distance to every tip from this node must be equal for this to
return a correct result.
Returns
-------
int
The distance to tip of a length-balanced tree
"""
node = self
distance = 0
while node.has_children():
distance += node.children[0].length
node = node.children[0]
return distance
@classmethod
def from_linkage_matrix(cls, linkage_matrix, id_list):
"""Return tree from SciPy linkage matrix.
Parameters
----------
linkage_matrix : ndarray
A SciPy linkage matrix as returned by
`scipy.cluster.hierarchy.linkage`
id_list : list
The indices of the `id_list` will be used in the linkage_matrix
Returns
-------
TreeNode
An unrooted bifurcated tree
See Also
--------
scipy.cluster.hierarchy.linkage
"""
tip_width = len(id_list)
cluster_count = len(linkage_matrix)
lookup_len = cluster_count + tip_width
node_lookup = np.empty(lookup_len, dtype=TreeNode)
for i, name in enumerate(id_list):
node_lookup[i] = TreeNode(name=name)
for i in range(tip_width, lookup_len):
node_lookup[i] = TreeNode()
newest_cluster_index = cluster_count + 1
for link in linkage_matrix:
child_a = node_lookup[int(link[0])]
child_b = node_lookup[int(link[1])]
path_length = link[2] / 2
child_a.length = path_length - child_a._balanced_distance_to_tip()
child_b.length = path_length - child_b._balanced_distance_to_tip()
new_cluster = node_lookup[newest_cluster_index]
new_cluster.append(child_a)
new_cluster.append(child_b)
newest_cluster_index += 1
return node_lookup[-1]
def to_taxonomy(self, allow_empty=False, filter_f=None):
"""Returns a taxonomy representation of self
Parameters
----------
allow_empty : bool, optional
Allow gaps the taxonomy (e.g., internal nodes without names).
filter_f : function, optional
Specify a filtering function that returns True if the lineage is
to be returned. This function must accept a ``TreeNode`` as its
first parameter, and a ``list`` that represents the lineage as the
second parameter.
Yields
------
tuple
``(tip, [lineage])`` where ``tip`` corresponds to a tip in the tree
and ``[lineage]`` is the expanded names from root to tip. ``None``
and empty strings are omitted from the lineage.
Notes
-----
If ``allow_empty`` is ``True`` and the root node does not have a name,
then that name will not be included. This is because it is common to
have multiple domains represented in the taxonomy, which would result
in a root node that does not have a name and does not make sense to
represent in the output.
Examples
--------
>>> from skbio.tree import TreeNode
>>> lineages = {'1': ['Bacteria', 'Firmicutes', 'Clostridia'],
... '2': ['Bacteria', 'Firmicutes', 'Bacilli'],
... '3': ['Bacteria', 'Bacteroidetes', 'Sphingobacteria'],
... '4': ['Archaea', 'Euryarchaeota', 'Thermoplasmata'],
... '5': ['Archaea', 'Euryarchaeota', 'Thermoplasmata'],
... '6': ['Archaea', 'Euryarchaeota', 'Halobacteria'],
... '7': ['Archaea', 'Euryarchaeota', 'Halobacteria'],
... '8': ['Bacteria', 'Bacteroidetes', 'Sphingobacteria'],
... '9': ['Bacteria', 'Bacteroidetes', 'Cytophagia']}
>>> tree = TreeNode.from_taxonomy(lineages.items())
>>> lineages = sorted([(n.name, l) for n, l in tree.to_taxonomy()])
>>> for name, lineage in lineages:
... print(name, '; '.join(lineage))
1 Bacteria; Firmicutes; Clostridia
2 Bacteria; Firmicutes; Bacilli
3 Bacteria; Bacteroidetes; Sphingobacteria
4 Archaea; Euryarchaeota; Thermoplasmata
5 Archaea; Euryarchaeota; Thermoplasmata
6 Archaea; Euryarchaeota; Halobacteria
7 Archaea; Euryarchaeota; Halobacteria
8 Bacteria; Bacteroidetes; Sphingobacteria
9 Bacteria; Bacteroidetes; Cytophagia
"""
if filter_f is None:
def filter_f(a, b):
return True
self.assign_ids()
seen = set()
lineage = []
# visit internal nodes while traversing out to the tips, and on the
# way back up
for node in self.traverse(self_before=True, self_after=True):
if node.is_tip():
if filter_f(node, lineage):
yield (node, lineage[:])
else:
if allow_empty:
if node.is_root() and not node.name:
continue
else:
if not node.name:
continue
if node.id in seen:
lineage.pop(-1)
else:
lineage.append(node.name)
seen.add(node.id)
def to_array(self, attrs=None):
"""Return an array representation of self
Parameters
----------
attrs : list of tuple or None
The attributes and types to return. The expected form is
[(attribute_name, type)]. If `None`, then `name`, `length`, and
`id` are returned.
Returns
-------
dict of array
{id_index: {id: TreeNode},
child_index: [(node_id, left_child_id, right_child_id)],
attr_1: array(...),
...
attr_N: array(...)}
Notes
-----
Attribute arrays are in index order such that TreeNode.id can be used
as a lookup into the the array
If `length` is an attribute, this will also record the length off the
root which is `nan`. Take care when summing.
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> t = TreeNode.read(StringIO('(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7);'))
>>> res = t.to_array()
>>> res.keys()
['child_index', 'length', 'name', 'id_index', 'id']
>>> res['child_index']
[(4, 0, 2), (5, 3, 3), (6, 4, 5), (7, 6, 6)]
>>> for k, v in res['id_index'].items():
... print(k, v)
...
0 a:1.0;
<BLANKLINE>
1 b:2.0;
<BLANKLINE>
2 c:3.0;
<BLANKLINE>
3 d:5.0;
<BLANKLINE>
4 (a:1.0,b:2.0,c:3.0)x:4.0;
<BLANKLINE>
5 (d:5.0)y:6.0;
<BLANKLINE>
6 ((a:1.0,b:2.0,c:3.0)x:4.0,(d:5.0)y:6.0)z:7.0;
<BLANKLINE>
7 (((a:1.0,b:2.0,c:3.0)x:4.0,(d:5.0)y:6.0)z:7.0);
<BLANKLINE>
>>> res['id']
array([0, 1, 2, 3, 4, 5, 6, 7])
>>> res['name']
array(['a', 'b', 'c', 'd', 'x', 'y', 'z', None], dtype=object)
"""
if attrs is None:
attrs = [('name', object), ('length', float), ('id', int)]
else:
for attr, dtype in attrs:
if not hasattr(self, attr):
raise AttributeError("Invalid attribute '%s'." % attr)
id_index, child_index = self.index_tree()
n = self.id + 1 # assign_ids starts at 0
tmp = [np.zeros(n, dtype=dtype) for attr, dtype in attrs]
for node in self.traverse(include_self=True):
n_id = node.id
for idx, (attr, dtype) in enumerate(attrs):
tmp[idx][n_id] = getattr(node, attr)
results = {'id_index': id_index, 'child_index': child_index}
results.update({attr: arr for (attr, dtype), arr in zip(attrs, tmp)})
return results
def _ascii_art(self, char1='-', show_internal=True, compact=False):
LEN = 10
PAD = ' ' * LEN
PA = ' ' * (LEN - 1)
namestr = self.name or '' # prevents name of NoneType
if self.children:
mids = []
result = []
for c in self.children:
if c is self.children[0]:
char2 = '/'
elif c is self.children[-1]:
char2 = '\\'
else:
char2 = '-'
(clines, mid) = c._ascii_art(char2, show_internal, compact)
mids.append(mid + len(result))
result.extend(clines)
if not compact:
result.append('')
if not compact:
result.pop()
(lo, hi, end) = (mids[0], mids[-1], len(result))
prefixes = [PAD] * (lo + 1) + [PA + '|'] * \
(hi - lo - 1) + [PAD] * (end - hi)
mid = np.int(np.trunc((lo + hi) / 2))
prefixes[mid] = char1 + '-' * (LEN - 2) + prefixes[mid][-1]
result = [p + l for (p, l) in zip(prefixes, result)]
if show_internal:
stem = result[mid]
result[mid] = stem[0] + namestr + stem[len(namestr) + 1:]
return (result, mid)
else:
return ([char1 + '-' + namestr], 0)
def ascii_art(self, show_internal=True, compact=False):
r"""Returns a string containing an ascii drawing of the tree
Note, this method calls a private recursive function and is not safe
for large trees.
Parameters
----------
show_internal : bool
includes internal edge names
compact : bool
use exactly one line per tip
Returns
-------
str
an ASCII formatted version of the tree
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b)c,(d,e)f)root;"))
>>> print(tree.ascii_art())
/-a
/c-------|
| \-b
-root----|
| /-d
\f-------|
\-e
"""
(lines, mid) = self._ascii_art(show_internal=show_internal,
compact=compact)
return '\n'.join(lines)
def accumulate_to_ancestor(self, ancestor):
r"""Return the sum of the distance between self and ancestor
Parameters
----------
ancestor : TreeNode
The node of the ancestor to accumulate distance too
Returns
-------
float
The sum of lengths between self and ancestor
Raises
------
NoParentError
A NoParentError is raised if the ancestor is not an ancestor of
self
NoLengthError
A NoLengthError is raised if one of the nodes between self and
ancestor (including self) lacks a `length` attribute
See Also
--------
distance
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a:1,b:2)c:3,(d:4,e:5)f:6)root;"))
>>> root = tree
>>> tree.find('a').accumulate_to_ancestor(root)
4.0
"""
accum = 0.0
curr = self
while curr is not ancestor:
if curr.is_root():
raise NoParentError("Provided ancestor is not in the path")
if curr.length is None:
raise NoLengthError("No length on node %s found!" %
curr.name or "unnamed")
accum += curr.length
curr = curr.parent
return accum
def distance(self, other):
"""Return the distance between self and other
This method can be used to compute the distances between two tips,
however, it is not optimized for computing pairwise tip distances.
Parameters
----------
other : TreeNode
The node to compute a distance to
Returns
-------
float
The distance between two nodes
Raises
------
NoLengthError
A NoLengthError will be raised if a node without `length` is
encountered
See Also
--------
tip_tip_distances
accumulate_to_ancestor
compare_tip_distances
get_max_distance
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a:1,b:2)c:3,(d:4,e:5)f:6)root;"))
>>> tip_a = tree.find('a')
>>> tip_d = tree.find('d')
>>> tip_a.distance(tip_d)
14.0
"""
if self is other:
return 0.0
root = self.root()
lca = root.lowest_common_ancestor([self, other])
accum = self.accumulate_to_ancestor(lca)
accum += other.accumulate_to_ancestor(lca)
return accum
def _set_max_distance(self):
"""Propagate tip distance information up the tree
This method was originally implemented by Julia Goodrich with the
intent of being able to determine max tip to tip distances between
nodes on large trees efficiently. The code has been modified to track
the specific tips the distance is between
"""
for n in self.postorder():
if n.is_tip():
n.MaxDistTips = [[0.0, n], [0.0, n]]
else:
if len(n.children) == 1:
raise TreeError("No support for single descedent nodes")
else:
tip_info = [(max(c.MaxDistTips), c) for c in n.children]
dists = [i[0][0] for i in tip_info]
best_idx = np.argsort(dists)[-2:]
tip_a, child_a = tip_info[best_idx[0]]
tip_b, child_b = tip_info[best_idx[1]]
tip_a[0] += child_a.length or 0.0
tip_b[0] += child_b.length or 0.0
n.MaxDistTips = [tip_a, tip_b]
def _get_max_distance_singledesc(self):
"""returns the max distance between any pair of tips
Also returns the tip names that it is between as a tuple"""
distmtx = self.tip_tip_distances()
idx_max = divmod(distmtx.data.argmax(), distmtx.shape[1])
max_pair = (distmtx.ids[idx_max[0]], distmtx.ids[idx_max[1]])
return distmtx[idx_max], max_pair
def get_max_distance(self):
"""Returns the max tip tip distance between any pair of tips
Returns
-------
float
The distance between the two most distant tips in the tree
tuple of TreeNode
The two most distant tips in the tree
Raises
------
NoLengthError
A NoLengthError will be thrown if a node without length is
encountered
See Also
--------
distance
tip_tip_distances
compare_tip_distances
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a:1,b:2)c:3,(d:4,e:5)f:6)root;"))
>>> dist, tips = tree.get_max_distance()
>>> dist
16.0
>>> [n.name for n in tips]
['b', 'e']
"""
if not hasattr(self, 'MaxDistTips'):
# _set_max_distance will throw a TreeError if a node with a single
# child is encountered
try:
self._set_max_distance()
except TreeError: #
return self._get_max_distance_singledesc()
longest = 0.0
tips = [None, None]
for n in self.non_tips(include_self=True):
tip_a, tip_b = n.MaxDistTips
dist = (tip_a[0] + tip_b[0])
if dist > longest:
longest = dist
tips = [tip_a[1], tip_b[1]]
return longest, tips
def tip_tip_distances(self, endpoints=None):
"""Returns distance matrix between pairs of tips, and a tip order.
By default, all pairwise distances are calculated in the tree. If
`endpoints` are specified, then only the distances between those tips
are computed.
Parameters
----------
endpoints : list of TreeNode or str, or None
A list of TreeNode objects or names of TreeNode objects
Returns
-------
DistanceMatrix
The distance matrix
Raises
------
ValueError
If any of the specified `endpoints` are not tips
NoLengthError
If a node without length is encountered
See Also
--------
distance
compare_tip_distances
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a:1,b:2)c:3,(d:4,e:5)f:6)root;"))
>>> mat = tree.tip_tip_distances()
>>> print(mat)
4x4 distance matrix
IDs:
'a', 'b', 'd', 'e'
Data:
[[ 0. 3. 14. 15.]
[ 3. 0. 15. 16.]
[ 14. 15. 0. 9.]
[ 15. 16. 9. 0.]]
"""
all_tips = list(self.tips())
if endpoints is None:
tip_order = all_tips
else:
tip_order = [self.find(n) for n in endpoints]
for n in tip_order:
if not n.is_tip():
raise ValueError("Node with name '%s' is not a tip." %
n.name)
# linearize all tips in postorder
# .__start, .__stop compose the slice in tip_order.
for i, node in enumerate(all_tips):
node.__start, node.__stop = i, i + 1
# the result map provides index in the result matrix
result_map = {n.__start: i for i, n in enumerate(tip_order)}
num_all_tips = len(all_tips) # total number of tips
num_tips = len(tip_order) # total number of tips in result
result = np.zeros((num_tips, num_tips), float) # tip by tip matrix
distances = np.zeros((num_all_tips), float) # dist from tip to tip
def update_result():
# set tip_tip distance between tips of different child
for child1, child2 in combinations(node.children, 2):
for tip1 in range(child1.__start, child1.__stop):
if tip1 not in result_map:
continue
t1idx = result_map[tip1]
for tip2 in range(child2.__start, child2.__stop):
if tip2 not in result_map:
continue
t2idx = result_map[tip2]
result[t1idx, t2idx] = distances[
tip1] + distances[tip2]
for node in self.postorder():
if not node.children:
continue
# subtree with solved child wedges
# can possibly use np.zeros
starts, stops = [], [] # to calc ._start and ._stop for curr node
for child in node.children:
if child.length is None:
raise NoLengthError("Node with name '%s' doesn't have a "
"length." % child.name)
distances[child.__start:child.__stop] += child.length
starts.append(child.__start)
stops.append(child.__stop)
node.__start, node.__stop = min(starts), max(stops)
if len(node.children) > 1:
update_result()
return DistanceMatrix(result + result.T, [n.name for n in tip_order])
def compare_rfd(self, other, proportion=False):
"""Calculates the Robinson and Foulds symmetric difference
Parameters
----------
other : TreeNode
A tree to compare against
proportion : bool
Return a proportional difference
Returns
-------
float
The distance between the trees
Notes
-----
Implementation based off of code by Julia Goodrich. The original
description of the algorithm can be found in [1]_.
Raises
------
ValueError
If the tip names between `self` and `other` are equal.
See Also
--------
compare_subsets
compare_tip_distances
References
----------
.. [1] Comparison of phylogenetic trees. Robinson and Foulds.
Mathematical Biosciences. 1981. 53:131-141
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree1 = TreeNode.read(StringIO("((a,b),(c,d));"))
>>> tree2 = TreeNode.read(StringIO("(((a,b),c),d);"))
>>> tree1.compare_rfd(tree2)
2.0
"""
t1names = {n.name for n in self.tips()}
t2names = {n.name for n in other.tips()}
if t1names != t2names:
if t1names < t2names:
tree1 = self
tree2 = other.shear(t1names)
else:
tree1 = self.shear(t2names)
tree2 = other
else:
tree1 = self
tree2 = other
tree1_sets = tree1.subsets()
tree2_sets = tree2.subsets()
not_in_both = tree1_sets.symmetric_difference(tree2_sets)
dist = float(len(not_in_both))
if proportion:
total_subsets = len(tree1_sets) + len(tree2_sets)
dist = dist / total_subsets
return dist
def compare_subsets(self, other, exclude_absent_taxa=False):
"""Returns fraction of overlapping subsets where self and other differ.
Names present in only one of the two trees will count as mismatches,
if you don't want this behavior, strip out the non-matching tips first.
Parameters
----------
other : TreeNode
The tree to compare
exclude_absent_taxa : bool
Strip out names that don't occur in both trees
Returns
-------
float
The fraction of overlapping subsets that differ between the trees
See Also
--------
compare_rfd
compare_tip_distances
subsets
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree1 = TreeNode.read(StringIO("((a,b),(c,d));"))
>>> tree2 = TreeNode.read(StringIO("(((a,b),c),d);"))
>>> tree1.compare_subsets(tree2)
0.5
"""
self_sets, other_sets = self.subsets(), other.subsets()
if exclude_absent_taxa:
in_both = self.subset() & other.subset()
self_sets = (i & in_both for i in self_sets)
self_sets = frozenset({i for i in self_sets if len(i) > 1})
other_sets = (i & in_both for i in other_sets)
other_sets = frozenset({i for i in other_sets if len(i) > 1})
total_subsets = len(self_sets) + len(other_sets)
intersection_length = len(self_sets & other_sets)
if not total_subsets: # no common subsets after filtering, so max dist
return 1
return 1 - (2 * intersection_length / float(total_subsets))
def compare_tip_distances(self, other, sample=None, dist_f=distance_from_r,
shuffle_f=np.random.shuffle):
"""Compares self to other using tip-to-tip distance matrices.
Value returned is `dist_f(m1, m2)` for the two matrices. Default is
to use the Pearson correlation coefficient, with +1 giving a distance
of 0 and -1 giving a distance of +1 (the maximum possible value).
Depending on the application, you might instead want to use
distance_from_r_squared, which counts correlations of both +1 and -1
as identical (0 distance).
Note: automatically strips out the names that don't match (this is
necessary for this method because the distance between non-matching
names and matching names is undefined in the tree where they don't
match, and because we need to reorder the names in the two trees to
match up the distance matrices).
Parameters
----------
other : TreeNode
The tree to compare
sample : int or None
Randomly subsample the tips in common between the trees to
compare. This is useful when comparing very large trees.
dist_f : function
The distance function used to compare two the tip-tip distance
matrices
shuffle_f : function
The shuffling function used if `sample` is not None
Returns
-------
float
The distance between the trees
Raises
------
ValueError
A ValueError is raised if there does not exist common tips
between the trees
See Also
--------
compare_subsets
compare_rfd
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> # note, only three common taxa between the trees
>>> tree1 = TreeNode.read(StringIO("((a:1,b:1):2,(c:0.5,X:0.7):3);"))
>>> tree2 = TreeNode.read(StringIO("(((a:1,b:1,Y:1):2,c:3):1,Z:4);"))
>>> dist = tree1.compare_tip_distances(tree2)
>>> print("%.9f" % dist)
0.000133446
"""
self_names = {i.name: i for i in self.tips()}
other_names = {i.name: i for i in other.tips()}
common_names = frozenset(self_names) & frozenset(other_names)
common_names = list(common_names)
if not common_names:
raise ValueError("No tip names in common between the two trees.")
if len(common_names) <= 2:
return 1 # the two trees must match by definition in this case
if sample is not None:
shuffle_f(common_names)
common_names = common_names[:sample]
self_nodes = [self_names[k] for k in common_names]
other_nodes = [other_names[k] for k in common_names]
self_matrix = self.tip_tip_distances(endpoints=self_nodes)
other_matrix = other.tip_tip_distances(endpoints=other_nodes)
return dist_f(self_matrix, other_matrix)
def index_tree(self):
"""Index a tree for rapid lookups within a tree array
Indexes nodes in-place as `n._leaf_index`.
Returns
-------
dict
A mapping {node_id: TreeNode}
list of tuple of (int, int, int)
The first index in each tuple is the corresponding node_id. The
second index is the left most leaf index. The third index is the
right most leaf index
"""
self.assign_ids()
id_index = {}
child_index = []
for n in self.postorder():
for c in n.children:
id_index[c.id] = c
if c:
# c has children itself, so need to add to result
child_index.append((c.id,
c.children[0].id,
c.children[-1].id))
# handle root, which should be t itself
id_index[self.id] = self
# only want to add to the child_index if self has children...
if self.children:
child_index.append((self.id,
self.children[0].id,
self.children[-1].id))
return id_index, child_index
def assign_ids(self):
"""Assign topologically stable unique ids to self
Following the call, all nodes in the tree will have their id
attribute set
"""
curr_index = 0
for n in self.postorder():
for c in n.children:
c.id = curr_index
curr_index += 1
self.id = curr_index
def descending_branch_length(self, tip_subset=None):
"""Find total descending branch length from self or subset of self tips
Parameters
----------
tip_subset : Iterable, or None
If None, the total descending branch length for all tips in the
tree will be returned. If a list of tips is provided then only the
total descending branch length associated with those tips will be
returned.
Returns
-------
float
The total descending branch length for the specified set of tips.
Raises
------
ValueError
A ValueError is raised if the list of tips supplied to tip_subset
contains internal nodes or non-tips.
Notes
-----
This function replicates cogent's totalDescendingBranch Length method
and extends that method to allow the calculation of total descending
branch length of a subset of the tips if requested. The postorder
guarantees that the function will always be able to add the descending
branch length if the node is not a tip.
Nodes with no length will have their length set to 0. The root length
(if it exists) is ignored.
Examples
--------
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tr = TreeNode.read(StringIO("(((A:.1,B:1.2)C:.6,(D:.9,E:.6)F:.9)G"
... ":2.4,(H:.4,I:.5)J:1.3)K;"))
>>> tdbl = tr.descending_branch_length()
>>> sdbl = tr.descending_branch_length(['A','E'])
>>> print(tdbl, sdbl)
8.9 2.2
"""
self.assign_ids()
if tip_subset is not None:
all_tips = self.subset()
if not set(tip_subset).issubset(all_tips):
raise ValueError('tip_subset contains ids that arent tip '
'names.')
lca = self.lowest_common_ancestor(tip_subset)
ancestors = {}
for tip in tip_subset:
curr = self.find(tip)
while curr is not lca:
ancestors[curr.id] = curr.length if curr.length is not \
None else 0.0
curr = curr.parent
return sum(ancestors.values())
else:
return sum(n.length for n in self.postorder(include_self=True) if
n.length is not None)
def cache_attr(self, func, cache_attrname, cache_type=list):
"""Cache attributes on internal nodes of the tree
Parameters
----------
func : function
func will be provided the node currently being evaluated and must
return a list of item (or items) to cache from that node or an
empty list.
cache_attrname : str
Name of the attribute to decorate on containing the cached values
cache_type : {set, frozenset, list}
The type of the cache
Notes
-----
This method is particularly useful if you need to frequently look up
attributes that would normally require a traversal of the tree.
WARNING: any cache created by this method will be invalidated if the
topology of the tree changes (e.g., if `TreeNode.invalidate_caches` is
called).
Raises
------
TypeError
If an cache_type that is not a `set` or a `list` is specified.
Examples
--------
Cache the tip names of the tree on its internal nodes
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b,(c,d)e)f,(g,h)i)root;"))
>>> f = lambda n: [n.name] if n.is_tip() else []
>>> tree.cache_attr(f, 'tip_names')
>>> for n in tree.traverse(include_self=True):
... print("Node name: %s, cache: %r" % (n.name, n.tip_names))
Node name: root, cache: ['a', 'b', 'c', 'd', 'g', 'h']
Node name: f, cache: ['a', 'b', 'c', 'd']
Node name: a, cache: ['a']
Node name: b, cache: ['b']
Node name: e, cache: ['c', 'd']
Node name: c, cache: ['c']
Node name: d, cache: ['d']
Node name: i, cache: ['g', 'h']
Node name: g, cache: ['g']
Node name: h, cache: ['h']
"""
if cache_type in [set, frozenset]:
def reduce_f(a, b):
return a | b
elif cache_type == list:
def reduce_f(a, b):
return a + b
else:
raise TypeError("Only list, set and frozenset are supported!")
for node in self.postorder(include_self=True):
node._registered_caches.add(cache_attrname)
cached = [getattr(c, cache_attrname) for c in node.children]
cached.append(cache_type(func(node)))
setattr(node, cache_attrname, reduce(reduce_f, cached))
def shuffle(self, k=None, names=None, shuffle_f=np.random.shuffle, n=1):
"""Yield trees with shuffled tip names
Parameters
----------
k : int, optional
The number of tips to shuffle. If k is not `None`, k tips are
randomly selected, and only those names will be shuffled.
names : list, optional
The specific tip names to shuffle. k and names cannot be specified
at the same time.
shuffle_f : func
Shuffle method, this function must accept a list and modify
inplace.
n : int, optional
The number of iterations to perform. Value must be > 0 and `np.inf`
can be specified for an infinite number of iterations.
Notes
-----
Tip names are shuffled inplace. If neither `k` nor `names` are
provided, all tips are shuffled.
Yields
------
TreeNode
Tree with shuffled tip names.
Raises
------
ValueError
If `k` is < 2
If `n` is < 1
ValueError
If both `k` and `names` are specified
MissingNodeError
If `names` is specified but one of the names cannot be found
Examples
--------
Alternate the names on two of the tips, 'a', and 'b', and do this 5
times.
>>> from six import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO("((a,b),(c,d));"))
>>> rev = lambda items: items.reverse()
>>> shuffler = tree.shuffle(names=['a', 'b'], shuffle_f=rev, n=5)
>>> for shuffled_tree in shuffler:
... print(shuffled_tree)
((b,a),(c,d));
<BLANKLINE>
((a,b),(c,d));
<BLANKLINE>
((b,a),(c,d));
<BLANKLINE>
((a,b),(c,d));
<BLANKLINE>
((b,a),(c,d));
<BLANKLINE>
"""
if k is not None and k < 2:
raise ValueError("k must be None or >= 2")
if k is not None and names is not None:
raise ValueError("n and names cannot be specified at the sametime")
if n < 1:
raise ValueError("n must be > 0")
self.assign_ids()
if names is None:
all_tips = list(self.tips())
if n is None:
n = len(all_tips)
shuffle_f(all_tips)
names = [tip.name for tip in all_tips[:k]]
nodes = [self.find(name) for name in names]
# Since the names are being shuffled, the association between ID and
# name is no longer reliable
self.invalidate_caches()
counter = 0
while counter < n:
shuffle_f(names)
for node, name in zip(nodes, names):
node.name = name
yield self
counter += 1
| jensreeder/scikit-bio | skbio/tree/_tree.py | Python | bsd-3-clause | 88,386 |
from django.conf.urls import url
from corehq.messaging.smsbackends.twilio.views import (TwilioIncomingSMSView,
TwilioIncomingIVRView)
urlpatterns = [
url(r'^sms/(?P<api_key>[\w-]+)/?$', TwilioIncomingSMSView.as_view(),
name=TwilioIncomingSMSView.urlname),
url(r'^ivr/(?P<api_key>[\w-]+)/?$', TwilioIncomingIVRView.as_view(),
name=TwilioIncomingIVRView.urlname),
]
| dimagi/commcare-hq | corehq/messaging/smsbackends/twilio/urls.py | Python | bsd-3-clause | 394 |
"""
Plot the columns of the output files
"""
import sys
from matplotlib import pyplot as plt
import numpy as np
data = np.loadtxt(sys.argv[1], unpack=True)
shape = (int(sys.argv[2]), int(sys.argv[3]))
lon = np.reshape(data[0], shape)
lat = np.reshape(data[1], shape)
for i, value in enumerate(data[3:]):
value = np.reshape(value, shape)
plt.figure(figsize=(4, 3))
plt.title("Column %d" % (i + 4))
plt.contourf(lon, lat, value, 50)
plt.colorbar()
plt.savefig('column%d.png' % (i + 4))
| leouieda/tesseroids-original | cookbook/custom_ratio/plot.py | Python | bsd-3-clause | 509 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.template.defaultfilters import slugify
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for park in orm.Park.objects.all():
park.slug = '%s-%d' % (slugify(park.name), park.id)
park.save()
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'parks.activity': {
'Meta': {'ordering': "['name']", 'object_name': 'Activity'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'parks.event': {
'Meta': {'object_name': 'Event'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'parks.facility': {
'Meta': {'object_name': 'Facility'},
'access': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'activity': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'activity'", 'symmetrical': 'False', 'to': u"orm['parks.Activity']"}),
'facilitytype': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parks.Facilitytype']"}),
'geometry': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '26986'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'park': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parks.Park']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
u'parks.facilitytype': {
'Meta': {'object_name': 'Facilitytype'},
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
u'parks.friendsgroup': {
'Meta': {'object_name': 'Friendsgroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'parks.neighborhood': {
'Meta': {'ordering': "['name']", 'object_name': 'Neighborhood'},
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '26986'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'n_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'parks.park': {
'Meta': {'object_name': 'Park'},
'access': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'alt_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'events': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'events'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['parks.Event']"}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'friendsgroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parks.Friendsgroup']", 'null': 'True', 'blank': 'True'}),
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '26986'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'parks'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['parks.Parkimage']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'neighborhoods': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'neighborhoods'", 'blank': 'True', 'to': u"orm['parks.Neighborhood']"}),
'os_id': ('django.db.models.fields.CharField', [], {'max_length': '9', 'null': 'True', 'blank': 'True'}),
'parkowner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parks.Parkowner']", 'null': 'True', 'blank': 'True'}),
'parktype': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parks.Parktype']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'parks.parkimage': {
'Meta': {'ordering': "['pk']", 'object_name': 'Parkimage'},
'caption': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})
},
u'parks.parkowner': {
'Meta': {'object_name': 'Parkowner'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
u'parks.parktype': {
'Meta': {'object_name': 'Parktype'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
u'parks.story': {
'Meta': {'ordering': "('-date',)", 'object_name': 'Story'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'objectionable_content': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'park': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parks.Park']", 'blank': 'True'}),
'rating': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '1'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['parks']
symmetrical = True
| codeforboston/bostongreenmap | parks/migrations/0003_fix_slugify.py | Python | bsd-3-clause | 8,645 |
"""Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: Simplified BSD
import os
from os.path import join, exists
import re
from scipy import io
from shutil import copyfileobj
import urllib2
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> iris = fetch_mldata('iris')
>>> iris.target[0]
1
>>> print iris.data[0]
[-0.555556 0.25 -0.864407 -0.916667]
Load the 'leukemia' dataset from mldata.org, which respects the
sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=False)
>>> print leuk.data.shape[0]
7129
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0')
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % urllib2.quote(dataname)
try:
mldata_url = urllib2.urlopen(urlname)
except urllib2.HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, int):
target_name = col_names[target_name]
if isinstance(data_name, int):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
| cdegroc/scikit-learn | sklearn/datasets/mldata.py | Python | bsd-3-clause | 6,501 |
"""
System commands
These are the default commands called by the system commandhandler
when various exceptions occur. If one of these commands are not
implemented and part of the current cmdset, the engine falls back
to a default solution instead.
Some system commands are shown in this module
as a REFERENCE only (they are not all added to Evennia's
default cmdset since they don't currently do anything differently from the
default backup systems hard-wired in the engine).
Overloading these commands in a cmdset can be used to create
interesting effects. An example is using the NoMatch system command
to implement a line-editor where you don't have to start each
line with a command (if there is no match to a known command,
the line is just added to the editor buffer).
"""
from evennia.comms.models import ChannelDB
from evennia.utils import create
from evennia.utils.utils import at_search_result
# The command keys the engine is calling
# (the actual names all start with __)
from evennia.commands.cmdhandler import CMD_NOINPUT
from evennia.commands.cmdhandler import CMD_NOMATCH
from evennia.commands.cmdhandler import CMD_MULTIMATCH
from evennia.commands.cmdhandler import CMD_CHANNEL
from evennia.utils import utils
from django.conf import settings
COMMAND_DEFAULT_CLASS = utils.class_from_module(settings.COMMAND_DEFAULT_CLASS)
# Command called when there is no input at line
# (i.e. an lone return key)
class SystemNoInput(COMMAND_DEFAULT_CLASS):
"""
This is called when there is no input given
"""
key = CMD_NOINPUT
locks = "cmd:all()"
def func(self):
"Do nothing."
pass
#
# Command called when there was no match to the
# command name
#
class SystemNoMatch(COMMAND_DEFAULT_CLASS):
"""
No command was found matching the given input.
"""
key = CMD_NOMATCH
locks = "cmd:all()"
def func(self):
"""
This is given the failed raw string as input.
"""
self.msg("Huh?")
#
# Command called when there were multiple matches to the command.
#
class SystemMultimatch(COMMAND_DEFAULT_CLASS):
"""
Multiple command matches.
The cmdhandler adds a special attribute 'matches' to this
system command.
matches = [(cmdname, args, cmdobj, cmdlen, mratio, raw_cmdname) , (cmdname, ...), ...]
Here, `cmdname` is the command's name and `args` the rest of the incoming string,
without said command name. `cmdobj` is the Command instance, the cmdlen is
the same as len(cmdname) and mratio is a measure of how big a part of the
full input string the cmdname takes up - an exact match would be 1.0. Finally,
the `raw_cmdname` is the cmdname unmodified by eventual prefix-stripping.
"""
key = CMD_MULTIMATCH
locks = "cmd:all()"
def func(self):
"""
Handle multiple-matches by using the at_search_result default handler.
"""
# this was set by the cmdparser and is a tuple
# (cmdname, args, cmdobj, cmdlen, mratio, raw_cmdname). See
# evennia.commands.cmdparse.create_match for more details.
matches = self.matches
# at_search_result will itself msg the multimatch options to the caller.
at_search_result([match[2] for match in matches], self.caller, query=matches[0][0])
# Command called when the command given at the command line
# was identified as a channel name, like there existing a
# channel named 'ooc' and the user wrote
# > ooc Hello!
class SystemSendToChannel(COMMAND_DEFAULT_CLASS):
"""
This is a special command that the cmdhandler calls
when it detects that the command given matches
an existing Channel object key (or alias).
"""
key = CMD_CHANNEL
locks = "cmd:all()"
def parse(self):
channelname, msg = self.args.split(":", 1)
self.args = channelname.strip(), msg.strip()
def func(self):
"""
Create a new message and send it to channel, using
the already formatted input.
"""
caller = self.caller
channelkey, msg = self.args
if not msg:
caller.msg("Say what?")
return
channel = ChannelDB.objects.get_channel(channelkey)
if not channel:
caller.msg("Channel '%s' not found." % channelkey)
return
if not channel.has_connection(caller):
string = "You are not connected to channel '%s'."
caller.msg(string % channelkey)
return
if not channel.access(caller, "send"):
string = "You are not permitted to send to channel '%s'."
caller.msg(string % channelkey)
return
msg = "[%s] %s: %s" % (channel.key, caller.name, msg)
msgobj = create.create_message(caller, msg, channels=[channel])
channel.msg(msgobj)
| jamesbeebop/evennia | evennia/commands/default/syscommands.py | Python | bsd-3-clause | 4,841 |
""" An action that sets the active perspective. """
# Enthought library imports.
from pyface.workbench.api import IPerspective
from traits.api import Delegate, Instance, on_trait_change
# Local imports.
from workbench_action import WorkbenchAction
class SetActivePerspectiveAction(WorkbenchAction):
""" An action that sets the active perspective. """
#### 'Action' interface ###################################################
# Is the action enabled?
enabled = Delegate('perspective')
# The action's unique identifier (may be None).
id = Delegate('perspective')
# The action's name (displayed on menus/tool bar tools etc).
name = Delegate('perspective')
# The action's style.
style = 'radio'
#### 'SetActivePerspectiveAction' interface ###############################
# The perspective that we set the active perspective to.
perspective = Instance(IPerspective)
###########################################################################
# 'Action' interface.
###########################################################################
def destroy(self):
""" Destroy the action. """
self.window = None
return
def perform(self, event):
""" Perform the action. """
self.window.active_perspective = self.perspective
return
###########################################################################
# Private interface.
###########################################################################
@on_trait_change('perspective,window.active_perspective')
def _refresh_checked(self):
""" Refresh the checked state of the action. """
self.checked = self.perspective is not None \
and self.window is not None \
and self.window.active_perspective is not None \
and self.perspective.id is self.window.active_perspective.id
return
#### EOF ######################################################################
| brett-patterson/pyface | pyface/workbench/action/set_active_perspective_action.py | Python | bsd-3-clause | 2,020 |
from pyexcel.internal.common import get_book_headers_in_array
from pyexcel.internal.generators import BookStream
from nose.tools import eq_
def test_book_stream():
bs = BookStream()
assert bs.number_of_sheets() == 0
def test_load_from_empty_sheets():
bs = BookStream()
bs.load_from_sheets(None)
assert bs.number_of_sheets() == 0
def test_key_sorting():
adict = {"cd": [[1, 3]], "ab": [[2, 3]]}
bs = BookStream(adict)
# bs[0] should be 'ab' : SheetStream([[2,3]])
assert bs[0].payload == [[2, 3]]
def test_get_book_headers_in_array():
data = iter([["a", "b", "c"], [1, 2, 3]])
book_stream = BookStream({"test": data})
colnames_array = get_book_headers_in_array(book_stream)
eq_(colnames_array, [["a", "b", "c"]])
| chfw/pyexcel | tests/test_book_stream.py | Python | bsd-3-clause | 773 |
#
# Copyright (c) 2014, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
__author__ = 'Nick Shortway'
__author_email__ = '[email protected]'
__versioninfo__ = (1, 1, 7)
__version__ = '.'.join(map(str, __versioninfo__))
| Instagram/neti | neti/__init__.py | Python | bsd-3-clause | 444 |
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
import copy
from vistrails.db.versions.v0_9_3.domain import DBVistrail, DBAction, DBTag, DBModule, \
DBConnection, DBPortSpec, DBFunction, DBParameter, DBLocation, DBAdd, \
DBChange, DBDelete, DBAnnotation, DBPort, DBAbstractionRef, DBGroup, \
DBWorkflow, DBLog
def translateVistrail(_vistrail):
def update_key(old_obj, translate_dict):
return '__notes__'
def update_annotation(old_obj, translate_dict):
new_dict = {'DBAnnotation': {'key': update_key}}
new_list = []
for annotation in old_obj.db_annotations:
if annotation.db_key == 'notes':
new_list.append(DBAnnotation.update_version(annotation,
new_dict))
else:
new_list.append(DBAnnotation.update_version(annotation,
{}))
return new_list
def update_session(old_obj, translate_dict):
if not old_obj.db_session:
session = None
else:
session = long(old_obj.db_session)
return session
def update_workflow(old_obj, translate_dict):
return DBWorkflow.update_version(old_obj.db_workflow, translate_dict)
translate_dict = {'DBAction': {'annotations': update_annotation,
'session': update_session},
'DBGroup': {'workflow': update_workflow}}
# pass DBVistrail because domain contains enriched version of the auto_gen
vistrail = DBVistrail.update_version(_vistrail, translate_dict)
vistrail.db_version = '0.9.3'
return vistrail
def translateWorkflow(_workflow):
def update_workflow(old_obj, translate_dict):
return DBWorkflow.update_version(old_obj.db_workflow, translate_dict)
translate_dict = {'DBGroup': {'workflow': update_workflow}}
workflow = update_workflow(_workflow, translate_dict)
workflow.db_version = '0.9.3'
return workflow
def translateLog(_log):
log = DBLog.update_version(_log, translate_dict)
log.db_version = '0.9.3'
return log
| Nikea/VisTrails | vistrails/db/versions/v0_9_3/translate/v0_9_1.py | Python | bsd-3-clause | 4,011 |
# -*- test-case-name: twisted.web.test.test_web -*-
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""A twisted web component framework.
This module is DEPRECATED.
"""
import warnings
warnings.warn("This module is deprecated, please use Woven instead.", DeprecationWarning)
# System Imports
import string, time, types, traceback, pprint, sys, os
import linecache
import re
from cStringIO import StringIO
# Twisted Imports
from twisted.python import failure, log, rebuild, reflect, util
from twisted.internet import defer
from twisted.web import http
# Sibling Imports
import html, resource, error
import util as webutil
#backwards compatibility
from util import formatFailure, htmlrepr, htmlUnknown, htmlDict, htmlList,\
htmlInst, htmlString, htmlReprTypes
from server import NOT_DONE_YET
True = (1==1)
False = not True
# magic value that sez a widget needs to take over the whole page.
FORGET_IT = 99
def listify(x):
return [x]
def _ellipsize(x):
y = repr(x)
if len(y) > 1024:
return y[:1024]+"..."
return y
class Widget:
"""A component of a web page.
"""
title = None
def getTitle(self, request):
return self.title or reflect.qual(self.__class__)
def display(self, request):
"""Implement me to represent your widget.
I must return a list of strings and twisted.internet.defer.Deferred
instances.
"""
raise NotImplementedError("%s.display" % reflect.qual(self.__class__))
class StreamWidget(Widget):
"""A 'streamable' component of a webpage.
"""
def stream(self, write, request):
"""Call 'write' multiple times with a string argument to represent this widget.
"""
raise NotImplementedError("%s.stream" % reflect.qual(self.__class__))
def display(self, request):
"""Produce a list containing a single string.
"""
l = []
try:
result = self.stream(l.append, request)
if result is not None:
return result
return l
except:
return [webutil.formatFailure(failure.Failure())]
class WidgetMixin(Widget):
"""A mix-in wrapper for a Widget.
This mixin can be used to wrap functionality in any other widget with a
method of your choosing. It is designed to be used for mix-in classes that
can be mixed in to Form, StreamWidget, Presentation, etc, to augment the
data available to the 'display' methods of those classes, usually by adding
it to a Session.
"""
def display(self):
raise NotImplementedError("%s.display" % self.__class__)
def displayMixedWidget(self, request):
for base in reflect.allYourBase(self.__class__):
if issubclass(base, Widget) and not issubclass(base, WidgetMixin):
return base.display(self, request)
class Presentation(Widget):
"""I am a widget which formats a template with interspersed python expressions.
"""
template = '''
Hello, %%%%world%%%%.
'''
world = "you didn't assign to the 'template' attribute"
def __init__(self, template=None, filename=None):
if filename:
self.template = open(filename).read()
elif template:
self.template = template
self.variables = {}
self.tmpl = string.split(self.template, "%%%%")
def addClassVars(self, namespace, Class):
for base in Class.__bases__:
# Traverse only superclasses that know about Presentation.
if issubclass(base, Presentation) and base is not Presentation:
self.addClassVars(namespace, base)
# 'lower' classes in the class heirarchy take precedence.
for k in Class.__dict__.keys():
namespace[k] = getattr(self, k)
def addVariables(self, namespace, request):
self.addClassVars(namespace, self.__class__)
def prePresent(self, request):
"""Perform any tasks which must be done before presenting the page.
"""
def formatTraceback(self, tb):
return [html.PRE(tb)]
def streamCall(self, call, *args, **kw):
"""Utility: Call a method like StreamWidget's 'stream'.
"""
io = StringIO()
apply(call, (io.write,) + args, kw)
return io.getvalue()
def display(self, request):
tm = []
flip = 0
namespace = {}
self.prePresent(request)
self.addVariables(namespace, request)
# This variable may not be obscured...
namespace['request'] = request
namespace['self'] = self
for elem in self.tmpl:
flip = not flip
if flip:
if elem:
tm.append(elem)
else:
try:
x = eval(elem, namespace, namespace)
except:
log.deferr()
tm.append(webutil.formatFailure(failure.Failure()))
else:
if isinstance(x, types.ListType):
tm.extend(x)
elif isinstance(x, Widget):
val = x.display(request)
if not isinstance(val, types.ListType):
raise Exception("%s.display did not return a list, it returned %s!" % (x.__class__, repr(val)))
tm.extend(val)
else:
# Only two allowed types here should be deferred and
# string.
tm.append(x)
return tm
def htmlFor_hidden(write, name, value):
write('<INPUT TYPE="hidden" NAME="%s" VALUE="%s" />' % (name, value))
def htmlFor_file(write, name, value):
write('<INPUT SIZE="60" TYPE="file" NAME="%s" />' % name)
def htmlFor_string(write, name, value):
write('<INPUT SIZE="60" TYPE="text" NAME="%s" VALUE="%s" />' % (name, value))
def htmlFor_password(write, name, value):
write('<INPUT SIZE="60" TYPE="password" NAME="%s" />' % name)
def htmlFor_text(write, name, value):
write('<textarea COLS="60" ROWS="10" NAME="%s" WRAP="virtual">%s</textarea>' % (name, value))
def htmlFor_menu(write, name, value, allowMultiple=False):
"Value of the format [(optionName, displayName[, selected]), ...]"
write(' <select NAME="%s"%s>\n' %
(name, (allowMultiple and " multiple") or ''))
for v in value:
optionName, displayName, selected = util.padTo(3, v)
selected = (selected and " selected") or ''
write(' <option VALUE="%s"%s>%s</option>\n' %
(optionName, selected, displayName))
if not value:
write(' <option VALUE=""></option>\n')
write(" </select>\n")
def htmlFor_multimenu(write, name, value):
"Value of the format [(optionName, displayName[, selected]), ...]"
return htmlFor_menu(write, name, value, True)
def htmlFor_checkbox(write, name, value):
"A checkbox."
if value:
value = 'checked = "1"'
else:
value = ''
write('<INPUT TYPE="checkbox" NAME="__checkboxes__" VALUE="%s" %s />\n' % (name, value))
def htmlFor_checkgroup(write, name, value):
"A check-group."
for optionName, displayName, checked in value:
checked = (checked and 'checked = "1"') or ''
write('<INPUT TYPE="checkbox" NAME="%s" VALUE="%s" %s />%s<br />\n' % (name, optionName, checked, displayName))
def htmlFor_radio(write, name, value):
"A radio button group."
for optionName, displayName, checked in value:
checked = (checked and 'checked = "1"') or ''
write('<INPUT TYPE="radio" NAME="%s" VALUE="%s" %s />%s<br />\n' % (name, optionName, checked, displayName))
class FormInputError(Exception):
pass
class Form(Widget):
"""I am a web form.
In order to use me, you probably want to set self.formFields (or override
'getFormFields') and override 'process'. In order to demonstrate how this
is done, here is a small sample Form subclass::
| from twisted.web import widgets
| class HelloForm(widgets.Form):
| formFields = [
| ['string', 'Who to greet?', 'whoToGreet', 'World',
| 'This is for choosing who to greet.'],
| ['menu', 'How to greet?', 'how', [('cheerfully', 'with a smile'),
| ('sullenly', 'without enthusiasm'),
| ('spontaneously', 'on the spur of the moment')]]
| 'This is for choosing how to greet them.']
| def process(self, write, request, submit, whoToGreet, how):
| write('The web wakes up and %s says, \"Hello, %s!\"' % (how, whoToGreet))
If you load this widget, you will see that it displays a form with 2 inputs
derived from data in formFields. Note the argument names to 'process':
after 'write' and 'request', they are the same as the 3rd elements ('Input
Name' parameters) of the formFields list.
"""
formGen = {
'hidden': htmlFor_hidden,
'file': htmlFor_file,
'string': htmlFor_string,
'int': htmlFor_string,
'float': htmlFor_string,
'text': htmlFor_text,
'menu': htmlFor_menu,
'multimenu': htmlFor_multimenu,
'password': htmlFor_password,
'checkbox': htmlFor_checkbox,
'checkgroup': htmlFor_checkgroup,
'radio': htmlFor_radio,
}
formParse = {
'int': int,
'float': float,
}
formFields = [
]
# do we raise an error when we get extra args or not?
formAcceptExtraArgs = 0
def getFormFields(self, request, fieldSet = None):
"""I return a list of lists describing this form, or a Deferred.
This information is used both to display the form and to process it.
The list is in the following format::
| [['Input Type', 'Display Name', 'Input Name', 'Input Value', 'Description'],
| ['Input Type 2', 'Display Name 2', 'Input Name 2', 'Input Value 2', 'Description 2']
| ...]
Valid values for 'Input Type' are:
- 'hidden': a hidden field that contains a string that the user won't change
- 'string': a short string
- 'int': an integer, e.g. 1, 0, 25 or -23
- 'float': a float, e.g. 1.0, 2, -3.45, or 28.4324231
- 'text': a longer text field, suitable for entering paragraphs
- 'menu': an HTML SELECT input, a list of choices
- 'multimenu': an HTML SELECT input allowing multiple choices
- 'checkgroup': a group of checkboxes
- 'radio': a group of radio buttons
- 'password': a 'string' field where the contents are not visible as the user types
- 'file': a file-upload form (EXPERIMENTAL)
'Display Name' is a descriptive string that will be used to
identify the field to the user.
The 'Input Name' must be a legal Python identifier that describes both
the value's name on the HTML form and the name of an argument to
'self.process()'.
The 'Input Value' is usually a string, but its value can depend on the
'Input Type'. 'int' it is an integer, 'menu' it is a list of pairs of
strings, representing (value, name) pairs for the menu options. Input
value for 'checkgroup' and 'radio' should be a list of ('inputName',
'Display Name', 'checked') triplets.
The 'Description' field is an (optional) string which describes the form
item to the user.
If this result is statically determined for your Form subclass, you can
assign it to FormSubclass.formFields; if you need to determine it
dynamically, you can override this method.
Note: In many cases it is desirable to use user input for defaults in
the form rather than those supplied by your calculations, which is what
this method will do to self.formFields. If this is the case for you,
but you still need to dynamically calculate some fields, pass your
results back through this method by doing::
| def getFormFields(self, request):
| myFormFields = [self.myFieldCalculator()]
| return widgets.Form.getFormFields(self, request, myFormFields)
"""
fields = []
if fieldSet is None:
fieldSet = self.formFields
if not self.shouldProcess(request):
return fieldSet
for field in fieldSet:
if len(field)==5:
inputType, displayName, inputName, inputValue, description = field
else:
inputType, displayName, inputName, inputValue = field
description = ""
if inputType == 'checkbox':
if request.args.has_key('__checkboxes__'):
if inputName in request.args['__checkboxes__']:
inputValue = 1
else:
inputValue = 0
else:
inputValue = 0
elif inputType in ('checkgroup', 'radio'):
if request.args.has_key(inputName):
keys = request.args[inputName]
else:
keys = []
iv = inputValue
inputValue = []
for optionName, optionDisplayName, checked in iv:
checked = optionName in keys
inputValue.append([optionName, optionDisplayName, checked])
elif request.args.has_key(inputName):
iv = request.args[inputName][0]
if inputType in ['menu', 'multimenu']:
if iv in inputValue:
inputValue.remove(iv)
inputValue.insert(0, iv)
else:
inputValue = iv
fields.append([inputType, displayName, inputName, inputValue, description])
return fields
submitNames = ['Submit']
actionURI = ''
def format(self, form, write, request):
"""I display an HTML FORM according to the result of self.getFormFields.
"""
write('<form ENCTYPE="multipart/form-data" METHOD="post" ACTION="%s">\n'
'<table BORDER="0">\n' % (self.actionURI or request.uri))
for field in form:
if len(field) == 5:
inputType, displayName, inputName, inputValue, description = field
else:
inputType, displayName, inputName, inputValue = field
description = ""
write('<tr>\n<td ALIGN="right" VALIGN="top"><B>%s</B></td>\n'
'<td VALIGN="%s">\n' %
(displayName, ((inputType == 'text') and 'top') or 'middle'))
self.formGen[inputType](write, inputName, inputValue)
write('\n<br />\n<font size="-1">%s</font></td>\n</tr>\n' % description)
write('<tr><td></td><td ALIGN="left"><hr />\n')
for submitName in self.submitNames:
write('<INPUT TYPE="submit" NAME="submit" VALUE="%s" />\n' % submitName)
write('</td></tr>\n</table>\n'
'<INPUT TYPE="hidden" NAME="__formtype__" VALUE="%s" />\n'
% (reflect.qual(self.__class__)))
fid = self.getFormID()
if fid:
write('<INPUT TYPE="hidden" NAME="__formid__" VALUE="%s" />\n' % fid)
write("</form>\n")
def getFormID(self):
"""Override me: I disambiguate between multiple forms of the same type.
In order to determine which form an HTTP POST request is for, you must
have some unique identifier which distinguishes your form from other
forms of the same class. An example of such a unique identifier would
be: on a page with multiple FrobConf forms, each FrobConf form refers
to a particular Frobnitz instance, which has a unique id(). The
FrobConf form's getFormID would probably look like this::
| def getFormID(self):
| return str(id(self.frobnitz))
By default, this method will return None, since distinct Form instances
may be identical as far as the application is concerned.
"""
def process(self, write, request, submit, **kw):
"""Override me: I process a form.
I will only be called when the correct form input data to process this
form has been received.
I take a variable number of arguments, beginning with 'write',
'request', and 'submit'. 'write' is a callable object that will append
a string to the response, 'request' is a twisted.web.request.Request
instance, and 'submit' is the name of the submit action taken.
The remainder of my arguments must be correctly named. They will each be named after one of the
"""
write("<pre>Submit: %s <br /> %s</pre>" % (submit, html.PRE(pprint.PrettyPrinter().pformat(kw))))
def _doProcess(self, form, write, request):
"""(internal) Prepare arguments for self.process.
"""
args = request.args.copy()
kw = {}
for field in form:
inputType, displayName, inputName, inputValue = field[:4]
if inputType == 'checkbox':
if request.args.has_key('__checkboxes__'):
if inputName in request.args['__checkboxes__']:
formData = 1
else:
formData = 0
else:
formData = 0
elif inputType in ['checkgroup', 'radio', 'multimenu']:
if args.has_key(inputName):
formData = args[inputName]
del args[inputName]
else:
formData = []
else:
if not args.has_key(inputName):
raise FormInputError("missing field %s." % repr(inputName))
formData = args[inputName]
del args[inputName]
if not len(formData) == 1:
raise FormInputError("multiple values for field %s." %repr(inputName))
formData = formData[0]
method = self.formParse.get(inputType)
if method:
try:
formData = method(formData)
except:
raise FormInputError("%s: %s" % (displayName, "error"))
kw[inputName] = formData
submitAction = args.get('submit')
if submitAction:
submitAction = submitAction[0]
for field in ['submit', '__formtype__', '__checkboxes__']:
if args.has_key(field):
del args[field]
if args and not self.formAcceptExtraArgs:
raise FormInputError("unknown fields: %s" % repr(args))
return apply(self.process, (write, request, submitAction), kw)
def formatError(self,error):
"""Format an error message.
By default, this will make the message appear in red, bold italics.
"""
return '<font color="#f00"><b><i>%s</i></b></font><br />\n' % error
def shouldProcess(self, request):
args = request.args
fid = self.getFormID()
return (args and # there are arguments to the request
args.has_key('__formtype__') and # this is a widgets.Form request
args['__formtype__'][0] == reflect.qual(self.__class__) and # it is for a form of my type
((not fid) or # I am only allowed one form per page
(args.has_key('__formid__') and # if I distinguish myself from others, the request must too
args['__formid__'][0] == fid))) # I am in fact the same
def tryAgain(self, err, req):
"""Utility method for re-drawing the form with an error message.
This is handy in forms that process Deferred results. Normally you can
just raise a FormInputError() and this will happen by default.
"""
l = []
w = l.append
w(self.formatError(err))
self.format(self.getFormFields(req), w, req)
return l
def display(self, request):
"""Display the form."""
form = self.getFormFields(request)
if isinstance(form, defer.Deferred):
if self.shouldProcess(request):
form.addCallback(lambda form, f=self._displayProcess, r=request: f(r, form))
else:
form.addCallback(lambda form, f=self._displayFormat, r=request: f(r, form))
return [form]
else:
if self.shouldProcess(request):
return self._displayProcess(request, form)
else:
return self._displayFormat(request, form)
def _displayProcess(self, request, form):
l = []
write = l.append
try:
val = self._doProcess(form, write, request)
if val:
l.extend(val)
except FormInputError, fie:
write(self.formatError(str(fie)))
return l
def _displayFormat(self, request, form):
l = []
self.format(form, l.append, request)
return l
class DataWidget(Widget):
def __init__(self, data):
self.data = data
def display(self, request):
return [self.data]
class Time(Widget):
def display(self, request):
return [time.ctime(time.time())]
class Container(Widget):
def __init__(self, *widgets):
self.widgets = widgets
def display(self, request):
value = []
for widget in self.widgets:
d = widget.display(request)
value.extend(d)
return value
class _RequestDeferral:
def __init__(self):
self.deferred = defer.Deferred()
self.io = StringIO()
self.write = self.io.write
def finish(self):
self.deferred.callback([self.io.getvalue()])
def possiblyDeferWidget(widget, request):
# web in my head get it out get it out
try:
disp = widget.display(request)
# if this widget wants to defer anything -- well, I guess we've got to
# defer it.
for elem in disp:
if isinstance(elem, defer.Deferred):
req = _RequestDeferral()
RenderSession(disp, req)
return req.deferred
return string.join(disp, '')
except:
io = StringIO()
traceback.print_exc(file=io)
return html.PRE(io.getvalue())
class RenderSession:
"""I handle rendering of a list of deferreds, outputting their
results in correct order."""
class Sentinel:
pass
def __init__(self, lst, request):
self.lst = lst
self.request = request
self.needsHeaders = 0
self.beforeBody = 1
self.forgotten = 0
self.pauseList = []
for i in range(len(self.lst)):
item = self.lst[i]
if isinstance(item, defer.Deferred):
self._addDeferred(item, self.lst, i)
self.keepRendering()
def _addDeferred(self, deferred, lst, idx):
sentinel = self.Sentinel()
if hasattr(deferred, 'needsHeader'):
# You might want to set a header from a deferred, in which
# case you have to set an attribute -- needsHeader.
self.needsHeaders = self.needsHeaders + 1
args = (sentinel, 1)
else:
args = (sentinel, 0)
lst[idx] = sentinel, deferred
deferred.pause()
self.pauseList.append(deferred)
deferred.addCallbacks(self.callback, self.callback,
callbackArgs=args, errbackArgs=args)
def callback(self, result, sentinel, decNeedsHeaders):
if self.forgotten:
return
if result != FORGET_IT:
self.needsHeaders = self.needsHeaders - decNeedsHeaders
else:
result = [FORGET_IT]
# Make sure result is a sequence,
if not type(result) in (types.ListType, types.TupleType):
result = [result]
# If the deferred does not wish to produce its result all at
# once, it can give us a partial result as
# (NOT_DONE_YET, partial_result)
## XXX: How would a deferred go about producing the result in multiple
## stages?? --glyph
if result[0] is NOT_DONE_YET:
done = 0
result = result[1]
if not type(result) in (types.ListType, types.TupleType):
result = [result]
else:
done = 1
for i in xrange(len(result)):
item = result[i]
if isinstance(item, defer.Deferred):
self._addDeferred(item, result, i)
for position in range(len(self.lst)):
item = self.lst[position]
if type(item) is types.TupleType and len(item) > 0:
if item[0] is sentinel:
break
else:
raise AssertionError('Sentinel for Deferred not found!')
if done:
self.lst[position:position+1] = result
else:
self.lst[position:position] = result
self.keepRendering()
def keepRendering(self):
while self.pauseList:
pl = self.pauseList
self.pauseList = []
for deferred in pl:
deferred.unpause()
return
if self.needsHeaders:
# short circuit actual rendering process until we're sure no
# more deferreds need to set headers...
return
assert self.lst is not None, "This shouldn't happen."
while 1:
item = self.lst[0]
if self.beforeBody and FORGET_IT in self.lst:
# If I haven't moved yet, and the widget wants to take
# over the page, let it do so!
self.forgotten = 1
return
if isinstance(item, types.StringType):
self.beforeBody = 0
self.request.write(item)
elif type(item) is types.TupleType and len(item) > 0:
if isinstance(item[0], self.Sentinel):
return
elif isinstance(item, failure.Failure):
self.request.write(webutil.formatFailure(item))
else:
self.beforeBody = 0
unknown = html.PRE(repr(item))
self.request.write("RENDERING UNKNOWN: %s" % unknown)
del self.lst[0]
if len(self.lst) == 0:
self.lst = None
self.request.finish()
return
## XXX: is this needed?
class WidgetResource(resource.Resource):
def __init__(self, widget):
self.widget = widget
resource.Resource.__init__(self)
def render(self, request):
RenderSession(self.widget.display(request), request)
return NOT_DONE_YET
class Page(resource.Resource, Presentation):
def __init__(self):
resource.Resource.__init__(self)
Presentation.__init__(self)
def render(self, request):
displayed = self.display(request)
RenderSession(displayed, request)
return NOT_DONE_YET
class WidgetPage(Page):
"""
I am a Page that takes a Widget in its constructor, and displays that
Widget wrapped up in a simple HTML template.
"""
stylesheet = '''
a
{
font-family: Lucida, Verdana, Helvetica, Arial, sans-serif;
color: #369;
text-decoration: none;
}
th
{
font-family: Lucida, Verdana, Helvetica, Arial, sans-serif;
font-weight: bold;
text-decoration: none;
text-align: left;
}
pre, code
{
font-family: "Courier New", Courier, monospace;
}
p, body, td, ol, ul, menu, blockquote, div
{
font-family: Lucida, Verdana, Helvetica, Arial, sans-serif;
color: #000;
}
'''
template = '''<html>
<head>
<title>%%%%self.title%%%%</title>
<style>
%%%%self.stylesheet%%%%
</style>
<base href="%%%%request.prePathURL()%%%%">
</head>
<body>
<h1>%%%%self.title%%%%</h1>
%%%%self.widget%%%%
</body>
</html>
'''
title = 'No Title'
widget = 'No Widget'
def __init__(self, widget):
Page.__init__(self)
self.widget = widget
if hasattr(widget, 'stylesheet'):
self.stylesheet = widget.stylesheet
def prePresent(self, request):
self.title = self.widget.getTitle(request)
def render(self, request):
displayed = self.display(request)
RenderSession(displayed, request)
return NOT_DONE_YET
class Gadget(resource.Resource):
"""I am a collection of Widgets, to be rendered through a Page Factory.
self.pageFactory should be a Resource that takes a Widget in its
constructor. The default is twisted.web.widgets.WidgetPage.
"""
isLeaf = 0
def __init__(self):
resource.Resource.__init__(self)
self.widgets = {}
self.files = []
self.modules = []
self.paths = {}
def render(self, request):
#Redirect to view this entity as a collection.
request.setResponseCode(http.FOUND)
# TODO who says it's not https?
request.setHeader("location","http%s://%s%s/" % (
request.isSecure() and 's' or '',
request.getHeader("host"),
(string.split(request.uri,'?')[0])))
return "NO DICE!"
def putWidget(self, path, widget):
"""
Gadget.putWidget(path, widget)
Add a Widget to this Gadget. It will be rendered through the
pageFactory associated with this Gadget, whenever 'path' is requested.
"""
self.widgets[path] = widget
#this is an obsolete function
def addFile(self, path):
"""
Gadget.addFile(path)
Add a static path to this Gadget. This method is obsolete, use
Gadget.putPath instead.
"""
log.msg("Gadget.addFile() is deprecated.")
self.paths[path] = path
def putPath(self, path, pathname):
"""
Gadget.putPath(path, pathname)
Add a static path to this Gadget. Whenever 'path' is requested,
twisted.web.static.File(pathname) is sent.
"""
self.paths[path] = pathname
def getWidget(self, path, request):
return self.widgets.get(path)
def pageFactory(self, *args, **kwargs):
"""
Gadget.pageFactory(*args, **kwargs) -> Resource
By default, this method returns self.page(*args, **kwargs). It
is only for backwards-compatibility -- you should set the 'pageFactory'
attribute on your Gadget inside of its __init__ method.
"""
#XXX: delete this after a while.
if hasattr(self, "page"):
log.msg("Gadget.page is deprecated, use Gadget.pageFactory instead")
return apply(self.page, args, kwargs)
else:
return apply(WidgetPage, args, kwargs)
def getChild(self, path, request):
if path == '':
# ZOOP!
if isinstance(self, Widget):
return self.pageFactory(self)
widget = self.getWidget(path, request)
if widget:
if isinstance(widget, resource.Resource):
return widget
else:
p = self.pageFactory(widget)
p.isLeaf = getattr(widget,'isLeaf',0)
return p
elif self.paths.has_key(path):
prefix = getattr(sys.modules[self.__module__], '__file__', '')
if prefix:
prefix = os.path.abspath(os.path.dirname(prefix))
return static.File(os.path.join(prefix, self.paths[path]))
elif path == '__reload__':
return self.pageFactory(Reloader(map(reflect.namedModule, [self.__module__] + self.modules)))
else:
return error.NoResource("No such child resource in gadget.")
class TitleBox(Presentation):
template = '''\
<table %%%%self.widthOption%%%% cellpadding="1" cellspacing="0" border="0"><tr>\
<td bgcolor="%%%%self.borderColor%%%%"><center><font color="%%%%self.titleTextColor%%%%">%%%%self.title%%%%</font></center>\
<table width="100%" cellpadding="3" cellspacing="0" border="0"><tr>\
<td bgcolor="%%%%self.boxColor%%%%"><font color="%%%%self.boxTextColor%%%%">%%%%self.widget%%%%</font></td>\
</tr></table></td></tr></table>\
'''
borderColor = '#000000'
titleTextColor = '#ffffff'
boxTextColor = '#000000'
boxColor = '#ffffff'
widthOption = 'width="100%"'
title = 'No Title'
widget = 'No Widget'
def __init__(self, title, widget):
"""Wrap a widget with a given title.
"""
self.widget = widget
self.title = title
Presentation.__init__(self)
class Reloader(Presentation):
template = '''
Reloading...
<ul>
%%%%reload(request)%%%%
</ul> ... reloaded!
'''
def __init__(self, modules):
Presentation.__init__(self)
self.modules = modules
def reload(self, request):
request.redirect("..")
x = []
write = x.append
for module in self.modules:
rebuild.rebuild(module)
write('<li>reloaded %s<br />' % module.__name__)
return x
class Sidebar(StreamWidget):
bar = [
['Twisted',
['mirror', 'http://coopweb.org/ssd/twisted/'],
['mailing list', 'cgi-bin/mailman/listinfo/twisted-python']
]
]
headingColor = 'ffffff'
headingTextColor = '000000'
activeHeadingColor = '000000'
activeHeadingTextColor = 'ffffff'
sectionColor = '000088'
sectionTextColor = '008888'
activeSectionColor = '0000ff'
activeSectionTextColor = '00ffff'
def __init__(self, highlightHeading, highlightSection):
self.highlightHeading = highlightHeading
self.highlightSection = highlightSection
def getList(self):
return self.bar
def stream(self, write, request):
write("<table width=120 cellspacing=1 cellpadding=1 border=0>")
for each in self.getList():
if each[0] == self.highlightHeading:
headingColor = self.activeHeadingColor
headingTextColor = self.activeHeadingTextColor
canHighlight = 1
else:
headingColor = self.headingColor
headingTextColor = self.headingTextColor
canHighlight = 0
write('<tr><td colspan=2 bgcolor="#%s"><font color="%s">'
'<strong>%s</strong>'
'</font></td></td></tr>\n' % (headingColor, headingTextColor, each[0]))
for name, link in each[1:]:
if canHighlight and (name == self.highlightSection):
sectionColor = self.activeSectionColor
sectionTextColor = self.activeSectionTextColor
else:
sectionColor = self.sectionColor
sectionTextColor = self.sectionTextColor
write('<tr><td align=right bgcolor="#%s" width=6>-</td>'
'<td bgcolor="#%s"><a href="%s"><font color="#%s">%s'
'</font></a></td></tr>'
% (sectionColor, sectionColor, request.sibLink(link), sectionTextColor, name))
write("</table>")
# moved from template.py
from twisted.web.woven import template
from twisted.python import components
class WebWidgetNodeMutator(template.NodeMutator):
"""A WebWidgetNodeMutator replaces the node that is passed in to generate
with the result of generating the twisted.web.widget instance it adapts.
"""
def generate(self, request, node):
widget = self.data
displayed = widget.display(request)
try:
html = string.join(displayed)
except:
pr = Presentation()
pr.tmpl = displayed
#strList = pr.display(request)
html = string.join(displayed)
stringMutator = template.StringNodeMutator(html)
return stringMutator.generate(request, node)
components.registerAdapter(WebWidgetNodeMutator, Widget, template.INodeMutator)
import static
| santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/twisted/web/widgets.py | Python | bsd-3-clause | 36,525 |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__all__=["radar_sim"]
from .radar_sim import *
| barney-NG/pyCAMTracker | src/filterpy/examples/__init__.py | Python | mit | 183 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('transtech_directory', '0004_address_contactinfo'),
]
operations = [
migrations.AddField(
model_name='contactinfo',
name='value',
field=models.CharField(verbose_name='Value', default=' ', max_length=400),
preserve_default=False,
),
migrations.AlterField(
model_name='address',
name='city',
field=models.CharField(verbose_name='City', max_length=100),
),
migrations.AlterField(
model_name='address',
name='country',
field=django_countries.fields.CountryField(verbose_name='Country', max_length=2),
),
migrations.AlterField(
model_name='address',
name='directory',
field=models.ForeignKey(related_name='addresses', verbose_name='Service provider', to='transtech_directory.Directory'),
),
migrations.AlterField(
model_name='address',
name='latitude',
field=models.FloatField(verbose_name='Latitude', null=True, blank=True),
),
migrations.AlterField(
model_name='address',
name='longitude',
field=models.FloatField(verbose_name='Longitude', null=True, blank=True),
),
migrations.AlterField(
model_name='address',
name='postal_code',
field=models.CharField(verbose_name='postal_code', max_length=10, null=True, blank=True),
),
migrations.AlterField(
model_name='address',
name='street',
field=models.CharField(verbose_name='Street address', max_length=255),
),
migrations.AlterField(
model_name='address',
name='street2',
field=models.CharField(verbose_name='Second line', max_length=255, null=True, blank=True),
),
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(verbose_name='Category name', max_length=100),
),
migrations.AlterField(
model_name='category',
name='slug',
field=models.SlugField(verbose_name='Slug', max_length=100),
),
migrations.AlterField(
model_name='contactinfo',
name='directory',
field=models.ForeignKey(related_name='contacts', verbose_name='Service provider', to='transtech_directory.Directory'),
),
migrations.AlterField(
model_name='contactinfo',
name='type',
field=models.CharField(verbose_name='Type', choices=[('phone', 'Phone'), ('email', 'E-Mail'), ('other', 'Other')], max_length=5),
),
migrations.AlterField(
model_name='directory',
name='link',
field=models.URLField(verbose_name='Link', null=True, blank=True),
),
migrations.AlterField(
model_name='directory',
name='service_category',
field=models.ManyToManyField(verbose_name='Categories', to='transtech_directory.Category'),
),
migrations.AlterField(
model_name='directory',
name='service_provider',
field=models.CharField(verbose_name='Service provider name', max_length=255),
),
migrations.AlterField(
model_name='directory',
name='slug',
field=models.SlugField(verbose_name='slug', max_length=255),
),
]
| nanuxbe/transtech-directory | transtech_directory/migrations/0005_auto_20150921_1047.py | Python | mit | 3,744 |
Subsets and Splits