gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import environment_format
from heat.common import grouputils
from heat.common.i18n import _
from heat.common import short_id
from heat.common import timeutils as iso8601utils
from heat.engine import attributes
from heat.engine import environment
from heat.engine import function
from heat.engine import properties
from heat.engine.resources import stack_resource
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.scaling import lbutils
from heat.scaling import rolling_update
from heat.scaling import template
(SCALED_RESOURCE_TYPE,) = ('OS::Heat::ScaledResource',)
class InstanceGroup(stack_resource.StackResource):
PROPERTIES = (
AVAILABILITY_ZONES, LAUNCH_CONFIGURATION_NAME, SIZE,
LOAD_BALANCER_NAMES, TAGS,
) = (
'AvailabilityZones', 'LaunchConfigurationName', 'Size',
'LoadBalancerNames', 'Tags',
)
_TAG_KEYS = (
TAG_KEY, TAG_VALUE,
) = (
'Key', 'Value',
)
_ROLLING_UPDATE_SCHEMA_KEYS = (
MIN_INSTANCES_IN_SERVICE, MAX_BATCH_SIZE, PAUSE_TIME
) = (
'MinInstancesInService', 'MaxBatchSize', 'PauseTime'
)
_UPDATE_POLICY_SCHEMA_KEYS = (ROLLING_UPDATE,) = ('RollingUpdate',)
ATTRIBUTES = (
INSTANCE_LIST,
) = (
'InstanceList',
)
properties_schema = {
AVAILABILITY_ZONES: properties.Schema(
properties.Schema.LIST,
_('Not Implemented.'),
required=True
),
LAUNCH_CONFIGURATION_NAME: properties.Schema(
properties.Schema.STRING,
_('The reference to a LaunchConfiguration resource.'),
required=True,
update_allowed=True
),
SIZE: properties.Schema(
properties.Schema.INTEGER,
_('Desired number of instances.'),
required=True,
update_allowed=True
),
LOAD_BALANCER_NAMES: properties.Schema(
properties.Schema.LIST,
_('List of LoadBalancer resources.')
),
TAGS: properties.Schema(
properties.Schema.LIST,
_('Tags to attach to this group.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
TAG_KEY: properties.Schema(
properties.Schema.STRING,
required=True
),
TAG_VALUE: properties.Schema(
properties.Schema.STRING,
required=True
),
},
)
),
}
attributes_schema = {
INSTANCE_LIST: attributes.Schema(
_("A comma-delimited list of server ip addresses. "
"(Heat extension)."),
type=attributes.Schema.STRING
),
}
rolling_update_schema = {
MIN_INSTANCES_IN_SERVICE: properties.Schema(properties.Schema.INTEGER,
default=0),
MAX_BATCH_SIZE: properties.Schema(properties.Schema.INTEGER,
default=1),
PAUSE_TIME: properties.Schema(properties.Schema.STRING,
default='PT0S')
}
update_policy_schema = {
ROLLING_UPDATE: properties.Schema(properties.Schema.MAP,
schema=rolling_update_schema)
}
def validate(self):
"""Add validation for update_policy."""
super(InstanceGroup, self).validate()
if self.update_policy is not None:
policy_name = self.ROLLING_UPDATE
if (policy_name in self.update_policy and
self.update_policy[policy_name] is not None):
pause_time = self.update_policy[policy_name][self.PAUSE_TIME]
if iso8601utils.parse_isoduration(pause_time) > 3600:
msg = _('Maximum %s is 1 hour.') % self.PAUSE_TIME
raise ValueError(msg)
def validate_launchconfig(self):
# It seems to be a common error to not have a dependency on the
# launchconfiguration. This can happen if the the actual resource
# name is used instead of {get_resource: launch_conf} and no
# depends_on is used.
conf_refid = self.properties.get(self.LAUNCH_CONFIGURATION_NAME)
if conf_refid:
conf = self.stack.resource_by_refid(conf_refid)
if conf is None:
raise ValueError(_('%(lc)s (%(ref)s)'
' reference can not be found.')
% dict(lc=self.LAUNCH_CONFIGURATION_NAME,
ref=conf_refid))
if self.name not in conf.required_by():
raise ValueError(_('%(lc)s (%(ref)s)'
' requires a reference to the'
' configuration not just the name of the'
' resource.') % dict(
lc=self.LAUNCH_CONFIGURATION_NAME,
ref=conf_refid))
def handle_create(self):
"""Create a nested stack and add the initial resources to it."""
self.validate_launchconfig()
num_instances = self.properties[self.SIZE]
initial_template = self._create_template(num_instances)
return self.create_with_template(initial_template)
def check_create_complete(self, task):
"""When stack creation is done, update the loadbalancer.
If any instances failed to be created, delete them.
"""
done = super(InstanceGroup, self).check_create_complete(task)
if done:
self._lb_reload()
return done
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
"""Updates self.properties, if Properties has changed.
If Properties has changed, update self.properties, so we
get the new values during any subsequent adjustment.
"""
if tmpl_diff:
# parse update policy
if rsrc_defn.UPDATE_POLICY in tmpl_diff:
up = json_snippet.update_policy(self.update_policy_schema,
self.context)
self.update_policy = up
self.properties = json_snippet.properties(self.properties_schema,
self.context)
if prop_diff:
# Replace instances first if launch configuration has changed
self._try_rolling_update(prop_diff)
# Get the current capacity, we may need to adjust if
# Size has changed
if self.properties[self.SIZE] is not None:
self.resize(self.properties[self.SIZE])
else:
curr_size = grouputils.get_size(self)
self.resize(curr_size)
def _tags(self):
"""Make sure that we add a tag that Ceilometer can pick up.
These need to be prepended with 'metering.'.
"""
tags = self.properties.get(self.TAGS) or []
for t in tags:
if t[self.TAG_KEY].startswith('metering.'):
# the user has added one, don't add another.
return tags
return tags + [{self.TAG_KEY: 'metering.groupname',
self.TAG_VALUE: self.FnGetRefId()}]
def _get_conf_properties(self):
conf_refid = self.properties[self.LAUNCH_CONFIGURATION_NAME]
conf = self.stack.resource_by_refid(conf_refid)
props = function.resolve(conf.properties.data)
if 'InstanceId' in props:
props = conf.rebuild_lc_properties(props['InstanceId'])
props['Tags'] = self._tags()
# if the launch configuration is created from an existing instance.
# delete the 'InstanceId' property
props.pop('InstanceId', None)
return conf, props
def _get_resource_definition(self):
conf, props = self._get_conf_properties()
return rsrc_defn.ResourceDefinition(None,
SCALED_RESOURCE_TYPE,
props,
conf.t.metadata())
def _create_template(self, num_instances, num_replace=0,
template_version=('HeatTemplateFormatVersion',
'2012-12-12')):
"""Create a template to represent autoscaled instances.
Also see heat.scaling.template.member_definitions.
"""
instance_definition = self._get_resource_definition()
old_resources = grouputils.get_member_definitions(self)
definitions = template.member_definitions(
old_resources, instance_definition, num_instances, num_replace,
short_id.generate_id)
child_env = environment.get_child_environment(
self.stack.env,
self.child_params(), item_to_remove=self.resource_info)
return template.make_template(definitions, version=template_version,
child_env=child_env)
def _try_rolling_update(self, prop_diff):
if (self.update_policy[self.ROLLING_UPDATE] and
self.LAUNCH_CONFIGURATION_NAME in prop_diff):
policy = self.update_policy[self.ROLLING_UPDATE]
pause_sec = iso8601utils.parse_isoduration(policy[self.PAUSE_TIME])
self._replace(policy[self.MIN_INSTANCES_IN_SERVICE],
policy[self.MAX_BATCH_SIZE],
pause_sec)
def _update_timeout(self, batch_cnt, pause_sec):
total_pause_time = pause_sec * max(batch_cnt - 1, 0)
if total_pause_time >= self.stack.timeout_secs():
msg = _('The current %s will result in stack update '
'timeout.') % rsrc_defn.UPDATE_POLICY
raise ValueError(msg)
return self.stack.timeout_secs() - total_pause_time
def _replace(self, min_in_service, batch_size, pause_sec):
"""Replace the instances in the group.
Replace the instances in the group using updated launch configuration.
"""
def changing_instances(tmpl):
instances = grouputils.get_members(self)
current = set((i.name, i.t) for i in instances)
updated = set(tmpl.resource_definitions(self.nested()).items())
# includes instances to be updated and deleted
affected = set(k for k, v in current ^ updated)
return set(i.FnGetRefId() for i in instances if i.name in affected)
def pause_between_batch():
while True:
try:
yield
except scheduler.Timeout:
return
capacity = len(self.nested()) if self.nested() else 0
batches = list(self._get_batches(capacity, batch_size, min_in_service))
update_timeout = self._update_timeout(len(batches), pause_sec)
try:
for index, (total_capacity, efft_bat_sz) in enumerate(batches):
template = self._create_template(total_capacity, efft_bat_sz)
self._lb_reload(exclude=changing_instances(template))
updater = self.update_with_template(template)
checker = scheduler.TaskRunner(self._check_for_completion,
updater)
checker(timeout=update_timeout)
if index < (len(batches) - 1) and pause_sec > 0:
self._lb_reload()
waiter = scheduler.TaskRunner(pause_between_batch)
waiter(timeout=pause_sec)
finally:
self._lb_reload()
@staticmethod
def _get_batches(capacity, batch_size, min_in_service):
"""Return an iterator over the batches in a batched update.
Each batch is a tuple comprising the total size of the group after
processing the batch, and the number of members that can receive the
new definition in that batch (either by creating a new member or
updating an existing one).
"""
efft_capacity = capacity
updated = 0
while rolling_update.needs_update(capacity, efft_capacity, updated):
batch = rolling_update.next_batch(capacity, efft_capacity,
updated, batch_size,
min_in_service)
yield batch
efft_capacity, num_updates = batch
updated += num_updates
def _check_for_completion(self, updater):
while not self.check_update_complete(updater):
yield
def resize(self, new_capacity):
"""Resize the instance group to the new capacity.
When shrinking, the oldest instances will be removed.
"""
new_template = self._create_template(new_capacity)
try:
updater = self.update_with_template(new_template)
checker = scheduler.TaskRunner(self._check_for_completion, updater)
checker(timeout=self.stack.timeout_secs())
finally:
# Reload the LB in any case, so it's only pointing at healthy
# nodes.
self._lb_reload()
def _lb_reload(self, exclude=None):
lb_names = self.properties.get(self.LOAD_BALANCER_NAMES, None)
if lb_names:
lb_dict = dict((name, self.stack[name]) for name in lb_names)
lbutils.reload_loadbalancers(self, lb_dict, exclude)
def get_reference_id(self):
return self.physical_resource_name_or_FnGetRefId()
def _resolve_attribute(self, name):
"""Resolves the resource's attributes.
heat extension: "InstanceList" returns comma delimited list of server
ip addresses.
"""
if name == self.INSTANCE_LIST:
return u','.join(inst.FnGetAtt('PublicIp')
for inst in grouputils.get_members(self)) or None
def child_template(self):
num_instances = int(self.properties[self.SIZE])
return self._create_template(num_instances)
def child_params(self):
"""Return the environment for the nested stack."""
return {
environment_format.PARAMETERS: {},
environment_format.RESOURCE_REGISTRY: {
SCALED_RESOURCE_TYPE: 'AWS::EC2::Instance',
},
}
def resource_mapping():
return {
'OS::Heat::InstanceGroup': InstanceGroup,
}
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class ipset(base_resource) :
""" Configuration for network ipset resource. """
def __init__(self) :
self._name = ""
self._td = 0
self.___count = 0
@property
def name(self) :
ur"""Name for the IP set. Must begin with a letter, number, or the underscore character (_), and can consist of letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore characters. Cannot be changed after the IP set is created. Choose a name that helps identify the IP set.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name for the IP set. Must begin with a letter, number, or the underscore character (_), and can consist of letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore characters. Cannot be changed after the IP set is created. Choose a name that helps identify the IP set.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def td(self) :
ur"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094.
"""
try :
return self._td
except Exception as e:
raise e
@td.setter
def td(self, td) :
ur"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094
"""
try :
self._td = td
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(ipset_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.ipset
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add ipset.
"""
try :
if type(resource) is not list :
addresource = ipset()
addresource.name = resource.name
addresource.td = resource.td
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ ipset() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].td = resource[i].td
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete ipset.
"""
try :
if type(resource) is not list :
deleteresource = ipset()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ ipset() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ ipset() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the ipset resources that are configured on netscaler.
"""
try :
if not name :
obj = ipset()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = ipset()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [ipset() for _ in range(len(name))]
obj = [ipset() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = ipset()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of ipset resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = ipset()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the ipset resources configured on NetScaler.
"""
try :
obj = ipset()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of ipset resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = ipset()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class ipset_response(base_response) :
def __init__(self, length=1) :
self.ipset = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.ipset = [ipset() for _ in range(length)]
|
|
import copy
import json
import logging
import os
import subprocess
import uuid
import pytest
import retrying
import test_helpers
from dcos_test_utils import marathon, recordio
__maintainer__ = 'Gilbert88'
__contact__ = '[email protected]'
# Creates and yields the initial ATTACH_CONTAINER_INPUT message, then a data message,
# then an empty data chunk to indicate end-of-stream.
def input_streamer(nested_container_id):
encoder = recordio.Encoder(lambda s: bytes(json.dumps(s, ensure_ascii=False), "UTF-8"))
message = {
'type': 'ATTACH_CONTAINER_INPUT',
'attach_container_input': {
'type': 'CONTAINER_ID',
'container_id': nested_container_id}}
yield encoder.encode(message)
message['attach_container_input'] = {
'type': 'PROCESS_IO',
'process_io': {
'type': 'DATA',
'data': {'type': 'STDIN', 'data': 'meow'}}}
yield encoder.encode(message)
# Place an empty string to indicate EOF to the server and push
# 'None' to our queue to indicate that we are done processing input.
message['attach_container_input']['process_io']['data']['data'] = ''
yield encoder.encode(message)
def test_if_marathon_app_can_be_debugged(dcos_api_session):
# Launch a basic marathon app (no image), so we can debug into it!
# Cannot use deploy_and_cleanup because we must attach to a running app/task/container.
app, test_uuid = test_helpers.marathon_test_app()
app_id = 'integration-test-{}'.format(test_uuid)
with dcos_api_session.marathon.deploy_and_cleanup(app):
# Fetch the mesos master state once the task is running
master_ip = dcos_api_session.masters[0]
r = dcos_api_session.get('/state', host=master_ip, port=5050)
assert r.status_code == 200
state = r.json()
# Find the agent_id and container_id from master state
container_id = None
agent_id = None
for framework in state['frameworks']:
for task in framework['tasks']:
if app_id in task['id']:
container_id = task['statuses'][0]['container_status']['container_id']['value']
agent_id = task['slave_id']
assert container_id is not None, 'Container ID not found for instance of app_id {}'.format(app_id)
assert agent_id is not None, 'Agent ID not found for instance of app_id {}'.format(app_id)
# Find hostname and URL from agent_id
agent_hostname = None
for agent in state['slaves']:
if agent['id'] == agent_id:
agent_hostname = agent['hostname']
assert agent_hostname is not None, 'Agent hostname not found for agent_id {}'.format(agent_id)
logging.debug('Located %s with containerID %s on agent %s', app_id, container_id, agent_hostname)
def _post_agent(url, headers, json=None, data=None, stream=False):
r = dcos_api_session.post(
url,
host=agent_hostname,
port=5051,
headers=headers,
json=json,
data=data,
stream=stream)
assert r.status_code == 200
return r
# Prepare nested container id data
nested_container_id = {
'value': 'debug-%s' % str(uuid.uuid4()),
'parent': {'value': '%s' % container_id}}
# Launch debug session and attach to output stream of debug container
output_headers = {
'Content-Type': 'application/json',
'Accept': 'application/recordio',
'Message-Accept': 'application/json'
}
lncs_data = {
'type': 'LAUNCH_NESTED_CONTAINER_SESSION',
'launch_nested_container_session': {
'command': {'value': 'cat'},
'container_id': nested_container_id}}
launch_output = _post_agent('/api/v1', output_headers, json=lncs_data, stream=True)
# Attach to output stream of nested container
attach_out_data = {
'type': 'ATTACH_CONTAINER_OUTPUT',
'attach_container_output': {'container_id': nested_container_id}}
attached_output = _post_agent('/api/v1', output_headers, json=attach_out_data, stream=True)
# Attach to input stream of debug container and stream a message
input_headers = {
'Content-Type': 'application/recordio',
'Message-Content-Type': 'application/json',
'Accept': 'application/json',
'Transfer-Encoding': 'chunked'
}
_post_agent('/api/v1', input_headers, data=input_streamer(nested_container_id))
# Verify the streamed output from the launch session
meowed = False
decoder = recordio.Decoder(lambda s: json.loads(s.decode("UTF-8")))
for chunk in launch_output.iter_content():
for r in decoder.decode(chunk):
if r['type'] == 'DATA':
logging.debug('Extracted data chunk: %s', r['data'])
assert r['data']['data'] == 'meow', 'Output did not match expected'
meowed = True
assert meowed, 'Read launch output without seeing meow.'
meowed = False
# Verify the message from the attached output stream
for chunk in attached_output.iter_content():
for r in decoder.decode(chunk):
if r['type'] == 'DATA':
logging.debug('Extracted data chunk: %s', r['data'])
assert r['data']['data'] == 'meow', 'Output did not match expected'
meowed = True
assert meowed, 'Read output stream without seeing meow.'
def test_files_api(dcos_api_session):
'''
This test verifies that the standard output and error of a Mesos task can be
read. We check that neither standard output nor error are empty files. Since
the default `marathon_test_app()` does not write to its standard output the
task definition is modified to output something there.
'''
app, test_uuid = test_helpers.marathon_test_app()
app['cmd'] = 'echo $DCOS_TEST_UUID && ' + app['cmd']
with dcos_api_session.marathon.deploy_and_cleanup(app):
marathon_framework_id = dcos_api_session.marathon.get('/v2/info').json()['frameworkId']
app_task = dcos_api_session.marathon.get('/v2/apps/{}/tasks'.format(app['id'])).json()['tasks'][0]
for required_sandbox_file in ('stdout', 'stderr'):
content = dcos_api_session.mesos_sandbox_file(
app_task['slaveId'], marathon_framework_id, app_task['id'], required_sandbox_file)
assert content, 'File {} should not be empty'.format(required_sandbox_file)
def test_if_ucr_app_runs_in_new_pid_namespace(dcos_api_session):
# We run a marathon app instead of a metronome job because metronome
# doesn't support running docker images with the UCR. We need this
# functionality in order to test that the pid namespace isolator
# is functioning correctly.
app, test_uuid = test_helpers.marathon_test_app(container_type=marathon.Container.MESOS)
ps_output_file = 'ps_output'
app['cmd'] = 'ps ax -o pid= > {}; sleep 1000'.format(ps_output_file)
with dcos_api_session.marathon.deploy_and_cleanup(app, check_health=False):
marathon_framework_id = dcos_api_session.marathon.get('/v2/info').json()['frameworkId']
app_task = dcos_api_session.marathon.get('/v2/apps/{}/tasks'.format(app['id'])).json()['tasks'][0]
# There is a short delay between the `app_task` starting and it writing
# its output to the `pd_output_file`. Because of this, we wait up to 10
# seconds for this file to appear before throwing an exception.
@retrying.retry(wait_fixed=1000, stop_max_delay=10000)
def get_ps_output():
return dcos_api_session.mesos_sandbox_file(
app_task['slaveId'], marathon_framework_id, app_task['id'], ps_output_file)
assert len(get_ps_output().split()) <= 4, 'UCR app has more than 4 processes running in its pid namespace'
def test_memory_profiling(dcos_api_session):
# Test that we can fetch raw memory profiles
master_ip = dcos_api_session.masters[0]
r0 = dcos_api_session.get(
'/memory-profiler/start', host=master_ip, port=5050)
assert r0.status_code == 200, r0.text
r1 = dcos_api_session.get(
'/memory-profiler/stop', host=master_ip, port=5050)
assert r1.status_code == 200, r1.text
r2 = dcos_api_session.get(
'/memory-profiler/download/raw', host=master_ip, port=5050)
assert r2.status_code == 200, r2.text
def test_blkio_stats(dcos_api_session):
# Launch a Marathon application to do some disk writes, and then verify that
# the cgroups blkio statistics of the application can be correctly retrieved.
app, test_uuid = test_helpers.marathon_test_app(container_type=marathon.Container.MESOS)
app_id = 'integration-test-{}'.format(test_uuid)
# The application will generate a 10k file with 10 disk writes.
#
# TODO(qianzhang): In some old platforms (CentOS 6 and Ubuntu 14),
# the first disk write of a blkio cgroup will always be missed in
# the blkio throttling statistics, so here we run two `dd` commands,
# the first one which does only one disk write will be missed on
# those platforms, and the second one will be recorded in the blkio
# throttling statistics. When we drop the CentOS 6 and Ubuntu 14
# support in future, we should remove the first `dd` command.
marker_file = 'marker'
app['cmd'] = ('dd if=/dev/zero of=file bs=1024 count=1 oflag=dsync && '
'dd if=/dev/zero of=file bs=1024 count=10 oflag=dsync && '
'echo -n done > {} && sleep 1000').format(marker_file)
with dcos_api_session.marathon.deploy_and_cleanup(app, check_health=False):
marathon_framework_id = dcos_api_session.marathon.get('/v2/info').json()['frameworkId']
app_task = dcos_api_session.marathon.get('/v2/apps/{}/tasks'.format(app['id'])).json()['tasks'][0]
# Wait up to 10 seconds for the marker file to appear which
# indicates the disk writes via `dd` command are done.
@retrying.retry(wait_fixed=1000, stop_max_delay=10000)
def get_marker_file_content():
return dcos_api_session.mesos_sandbox_file(
app_task['slaveId'], marathon_framework_id, app_task['id'], marker_file)
assert get_marker_file_content() == 'done'
# Fetch the Mesos master state
master_ip = dcos_api_session.masters[0]
r = dcos_api_session.get('/state', host=master_ip, port=5050)
assert r.status_code == 200
state = r.json()
# Find the agent_id from master state
agent_id = None
for framework in state['frameworks']:
for task in framework['tasks']:
if app_id in task['id']:
agent_id = task['slave_id']
assert agent_id is not None, 'Agent ID not found for instance of app_id {}'.format(app_id)
# Find hostname from agent_id
agent_hostname = None
for agent in state['slaves']:
if agent['id'] == agent_id:
agent_hostname = agent['hostname']
assert agent_hostname is not None, 'Agent hostname not found for agent_id {}'.format(agent_id)
logging.debug('Located %s on agent %s', app_id, agent_hostname)
# Fetch the Mesos agent statistics
r = dcos_api_session.get('/monitor/statistics', host=agent_hostname, port=5051)
assert r.status_code == 200
stats = r.json()
total_io_serviced = None
total_io_service_bytes = None
for stat in stats:
# Find the statistic for the Marathon application that we deployed. Since what that
# Marathon application launched is a Mesos command task (i.e., using Mesos built-in
# command executor), the executor ID will be same as the task ID, so if we find the
# `app_id` in an executor ID of a statistic, that must be the statistic entry
# corresponding to the application that we deployed.
if app_id in stat['executor_id']:
# We only care about the blkio throttle statistics but not the blkio cfq statistics,
# because in the environment where the disk IO scheduler is not `cfq`, all the cfq
# statistics may be 0.
throttle_stats = stat['statistics']['blkio_statistics']['throttling']
for throttle_stat in throttle_stats:
if 'device' not in throttle_stat:
total_io_serviced = throttle_stat['io_serviced'][0]['value']
total_io_service_bytes = throttle_stat['io_service_bytes'][0]['value']
assert total_io_serviced is not None, ('Total blkio throttling IO serviced not found '
'for app_id {}'.format(app_id))
assert total_io_service_bytes is not None, ('Total blkio throttling IO service bytes '
'not found for app_id {}'.format(app_id))
# We expect the statistics retrieved from Mesos agent are equal or greater than what we
# did with the `dd` command (i.e., 10 and 10240), because:
# 1. Besides the disk writes done by the `dd` command, the statistics may also include
# some disk reads, e.g., to load the necessary executable binary and libraries.
# 2. In the environment where RAID is enabled, there may be multiple disk writes to
# different disks for a single `dd` write.
assert int(total_io_serviced) >= 10, ('Total blkio throttling IO serviced for app_id {} '
'are less than 10'.format(app_id))
assert int(total_io_service_bytes) >= 10240, ('Total blkio throttling IO service bytes for '
'app_id {} are less than 10240'.format(app_id))
def get_region_zone(domain):
assert isinstance(domain, dict), 'input must be dict'
assert 'fault_domain' in domain, 'fault_domain is missing. {}'.format(domain)
# check region set correctly
assert 'region' in domain['fault_domain'], 'missing region. {}'.format(domain)
assert 'name' in domain['fault_domain']['region'], 'missing region. {}'.format(domain)
region = domain['fault_domain']['region']['name']
# check zone set correctly
assert 'zone' in domain['fault_domain'], 'missing zone. {}'.format(domain)
assert 'name' in domain['fault_domain']['zone'], 'missing zone. {}'.format(domain)
zone = domain['fault_domain']['zone']['name']
return region, zone
@pytest.mark.supportedwindows
@pytest.mark.skipif(
test_helpers.expanded_config['fault_domain_enabled'] == 'false',
reason='fault domain is not set')
def test_fault_domain(dcos_api_session):
master_ip = dcos_api_session.masters[0]
r = dcos_api_session.get('/state', host=master_ip, port=5050)
assert r.status_code == 200
state = r.json()
# check flags and get the domain parameters mesos master was started with.
assert 'flags' in state, 'missing flags in state json'
assert 'domain' in state['flags'], 'missing domain in state json flags'
cli_flag = json.loads(state['flags']['domain'])
expected_region, expected_zone = get_region_zone(cli_flag)
# check master top level keys
assert 'leader_info' in state, 'leader_info is missing in state json'
assert 'domain' in state['leader_info'], 'domain is missing in state json'
leader_region, leader_zone = get_region_zone(state['leader_info']['domain'])
assert leader_region == expected_region, 'expect region {}. Got {}'.format(expected_region, leader_region)
assert leader_zone == expected_zone, 'expect zone {}. Got {}'.format(expected_zone, leader_zone)
for agent in state['slaves']:
assert 'domain' in agent, 'missing domain field for agent. {}'.format(agent)
agent_region, agent_zone = get_region_zone(agent['domain'])
assert agent_region == expected_region, 'expect region {}. Got {}'.format(expected_region, agent_region)
# agent_zone might be different on agents, so we just make sure it's a sane value
assert agent_zone, 'agent_zone cannot be empty'
@pytest.fixture
def reserved_disk(dcos_api_session):
"""
Set up an agent with one disk in a role.
Reserve a chunk of `disk` resources on an agent for a role, and the
remaining resources to another role. With that a framework in the first
role will only be offered `disk` resources.
"""
# Setup.
def principal():
is_enterprise = os.getenv('DCOS_ENTERPRISE', 'false').lower() == 'true'
if is_enterprise:
return dcos_api_session.auth_user.uid
else:
return 'reserved_disk_fixture_principal'
dcos_api_session.principal = principal()
# Keep track of all reservations we created so we can clean them up on
# teardown or on error paths.
reserved_resources = []
try:
# Get the ID of a private agent. We some assume that resources on that
# agent are unreserved.
r = dcos_api_session.get('/mesos/slaves')
assert r.status_code == 200, r.text
response = json.loads(r.text)
slaves = [
slave['id'] for slave in response['slaves']
if 'public_ip' not in slave['attributes']]
assert slaves, 'Could not find any private agents'
slave_id = slaves[0]
# Create a unique role to reserve the disk to. The test framework should
# register in this role.
dcos_api_session.role = 'disk-' + uuid.uuid4().hex
resources1 = {
'agent_id': {'value': slave_id},
'resources': [
{
'type': 'SCALAR',
'name': 'disk',
'reservations': [
{
'type': 'DYNAMIC',
'role': dcos_api_session.role,
'principal': dcos_api_session.principal,
}
],
'scalar': {'value': 32}
}
]
}
request = {'type': 'RESERVE_RESOURCES', 'reserve_resources': resources1}
r = dcos_api_session.post('/mesos/api/v1', json=request)
assert r.status_code == 202, r.text
reserved_resources.append(resources1)
# Reserve the remaining agent resources for another role. We let the Mesos
# master perform the calculation of the unreserved resources on the agent
# which requires another query.
r = dcos_api_session.get('/mesos/slaves')
assert r.status_code == 200, r.text
response = json.loads(r.text)
unreserved = [
slave['unreserved_resources_full'] for slave in response['slaves']
if slave['id'] == slave_id]
assert len(unreserved) == 1
unreserved = unreserved[0]
another_role = uuid.uuid4().hex
for resource in unreserved:
resource['reservations'] = [
{
'type': 'DYNAMIC',
'role': another_role,
'principal': dcos_api_session.principal,
}
]
resource.pop('role')
resources2 = copy.deepcopy(resources1)
resources2['resources'] = unreserved
request = {'type': 'RESERVE_RESOURCES', 'reserve_resources': resources2}
r = dcos_api_session.post('/mesos/api/v1', json=request)
assert r.status_code == 202, r.text
reserved_resources.append(resources2)
yield dcos_api_session
finally:
# Teardown.
#
# Remove all reservations this fixture has created in reverse order.
for resources in reversed(reserved_resources):
request = {
'type': 'UNRESERVE_RESOURCES',
'unreserve_resources': resources}
r = dcos_api_session.post('/mesos/api/v1', json=request)
assert r.status_code == 202, r.text
@pytest.mark.skipif(
test_helpers.expanded_config.get('security') == 'strict',
reason='Missing framework authentication for mesos-execute')
def test_min_allocatable_resources(reserved_disk):
"""Test that the Mesos master creates offers for just `disk` resources."""
# We use `mesos-execute` since e.g., Marathon cannot make use of disk-only
# offers.
name = \
'test-min-test_min-allocatable-resources-{}'.format(uuid.uuid4().hex)
argv = [
'/opt/mesosphere/bin/mesos-execute',
'--resources=disk:32',
'--role=' + reserved_disk.role,
'--command=:',
'--master=leader.mesos:5050',
'--name={}'.format(name),
'--env={"LC_ALL":"C"}']
output = subprocess.check_output(
argv,
stderr=subprocess.STDOUT,
universal_newlines=True)
# If the framework received any status update it launched a task which
# means it was offered resources.
assert 'Received status update' in output, output
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" BVT tests for Templates ISO
"""
#Import Local Modules
import marvin
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from integration.lib.utils import *
from integration.lib.base import *
from integration.lib.common import *
import urllib
from random import random
#Import System modules
import time
class Services:
"""Test ISO Services
"""
def __init__(self):
self.services = {
"account": {
"email": "[email protected]",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended in create account to
# ensure unique username generated each time
"password": "fr3sca",
},
"iso_1":
{
"displaytext": "Test ISO 1",
"name": "ISO 1",
"url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso",
# Source URL where ISO is located
"isextractable": True,
"isfeatured": True,
"ispublic": True,
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
},
"iso_2":
{
"displaytext": "Test ISO 2",
"name": "ISO 2",
"url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso",
# Source URL where ISO is located
"isextractable": True,
"isfeatured": True,
"ispublic": True,
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
"mode": 'HTTP_DOWNLOAD',
# Used in Extract template, value must be HTTP_DOWNLOAD
},
"destzoneid": 5,
# Copy ISO from one zone to another (Destination Zone)
"isfeatured": True,
"ispublic": True,
"isextractable": True,
"bootable": True, # For edit template
"passwordenabled": True,
"sleep": 60,
"timeout": 10,
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
# CentOS 5.3 (64 bit)
"mode": 'advanced'
# Networking mode: Basic or Advanced
}
class TestCreateIso(cloudstackTestCase):
def setUp(self):
self.services = Services().services
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
# Get Zone, Domain and templates
self.domain = get_domain(self.apiclient, self.services)
self.zone = get_zone(self.apiclient, self.services)
self.services["domainid"] = self.domain.id
self.services["iso_2"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.dbclient.close()
#Clean up, terminate the created ISOs
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def test_01_create_iso(self):
"""Test create public & private ISO
"""
# Validate the following:
# 1. database (vm_template table) should be
# updated with newly created ISO
# 2. UI should show the newly added ISO
# 3. listIsos API should show the newly added ISO
iso = Iso.create(
self.apiclient,
self.services["iso_2"],
account=self.account.account.name,
domainid=self.account.account.domainid
)
self.debug("ISO created with ID: %s" % iso.id)
try:
iso.download(self.apiclient)
except Exception as e:
self.fail("Exception while downloading ISO %s: %s"\
% (iso.id, e))
list_iso_response = list_isos(
self.apiclient,
id=iso.id
)
self.assertEqual(
isinstance(list_iso_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_iso_response),
0,
"Check template available in List ISOs"
)
iso_response = list_iso_response[0]
self.assertEqual(
iso_response.displaytext,
self.services["iso_2"]["displaytext"],
"Check display text of newly created ISO"
)
self.assertEqual(
iso_response.name,
self.services["iso_2"]["name"],
"Check name of newly created ISO"
)
self.assertEqual(
iso_response.zoneid,
self.services["iso_2"]["zoneid"],
"Check zone ID of newly created ISO"
)
return
class TestISO(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.services = Services().services
cls.api_client = super(TestISO, cls).getClsTestClient().getApiClient()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.services["domainid"] = cls.domain.id
cls.services["iso_1"]["zoneid"] = cls.zone.id
cls.services["iso_2"]["zoneid"] = cls.zone.id
cls.services["sourcezoneid"] = cls.zone.id
#Create an account, ISOs etc.
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.account.name
cls.iso_1 = Iso.create(
cls.api_client,
cls.services["iso_1"],
account=cls.account.account.name,
domainid=cls.account.account.domainid
)
try:
cls.iso_1.download(cls.api_client)
except Exception as e:
raise Exception("Exception while downloading ISO %s: %s"\
% (cls.iso_1.id, e))
cls.iso_2 = Iso.create(
cls.api_client,
cls.services["iso_2"],
account=cls.account.account.name,
domainid=cls.account.account.domainid
)
try:
cls.iso_2.download(cls.api_client)
except Exception as e:
raise Exception("Exception while downloading ISO %s: %s"\
% (cls.iso_2.id, e))
cls._cleanup = [cls.account]
return
@classmethod
def tearDownClass(cls):
try:
cls.api_client = super(TestISO, cls).getClsTestClient().getApiClient()
#Clean up, terminate the created templates
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
try:
self.dbclient.close()
#Clean up, terminate the created ISOs, VMs
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def test_02_edit_iso(self):
"""Test Edit ISO
"""
# Validate the following:
# 1. UI should show the edited values for ISO
# 2. database (vm_template table) should have updated values
#Generate random values for updating ISO name and Display text
new_displayText = random_gen()
new_name = random_gen()
self.debug("Updating ISO permissions for ISO: %s" % self.iso_1.id)
cmd = updateIso.updateIsoCmd()
#Assign new values to attributes
cmd.id = self.iso_1.id
cmd.displaytext = new_displayText
cmd.name = new_name
cmd.bootable = self.services["bootable"]
cmd.passwordenabled = self.services["passwordenabled"]
self.apiclient.updateIso(cmd)
#Check whether attributes are updated in ISO using listIsos
list_iso_response = list_isos(
self.apiclient,
id=self.iso_1.id
)
self.assertEqual(
isinstance(list_iso_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_iso_response),
0,
"Check template available in List ISOs"
)
iso_response = list_iso_response[0]
self.assertEqual(
iso_response.displaytext,
new_displayText,
"Check display text of updated ISO"
)
self.assertEqual(
iso_response.name,
new_name,
"Check name of updated ISO"
)
self.assertEqual(
iso_response.bootable,
self.services["bootable"],
"Check if image is bootable of updated ISO"
)
self.assertEqual(
iso_response.ostypeid,
self.services["ostypeid"],
"Check OSTypeID of updated ISO"
)
return
def test_03_delete_iso(self):
"""Test delete ISO
"""
# Validate the following:
# 1. UI should not show the deleted ISP
# 2. database (vm_template table) should not contain deleted ISO
self.debug("Deleting ISO with ID: %s" % self.iso_1.id)
self.iso_1.delete(self.apiclient)
# Sleep to ensure that ISO state is reflected in other calls
time.sleep(self.services["sleep"])
#ListIsos to verify deleted ISO is properly deleted
list_iso_response = list_isos(
self.apiclient,
id=self.iso_1.id
)
self.assertEqual(
list_iso_response,
None,
"Check if ISO exists in ListIsos"
)
return
def test_04_extract_Iso(self):
"Test for extract ISO"
# Validate the following
# 1. Admin should able extract and download the ISO
# 2. ListIsos should display all the public templates
# for all kind of users
# 3 .ListIsos should not display the system templates
self.debug("Extracting ISO with ID: %s" % self.iso_2.id)
cmd = extractIso.extractIsoCmd()
cmd.id = self.iso_2.id
cmd.mode = self.services["iso_2"]["mode"]
cmd.zoneid = self.services["iso_2"]["zoneid"]
list_extract_response = self.apiclient.extractIso(cmd)
try:
#Format URL to ASCII to retrieve response code
formatted_url = urllib.unquote_plus(list_extract_response.url)
url_response = urllib.urlopen(formatted_url)
response_code = url_response.getcode()
except Exception:
self.fail(
"Extract ISO Failed with invalid URL %s (ISO id: %s)" \
% (formatted_url, self.iso_2.id)
)
self.assertEqual(
list_extract_response.id,
self.iso_2.id,
"Check ID of the downloaded ISO"
)
self.assertEqual(
list_extract_response.extractMode,
self.services["iso_2"]["mode"],
"Check mode of extraction"
)
self.assertEqual(
list_extract_response.zoneid,
self.services["iso_2"]["zoneid"],
"Check zone ID of extraction"
)
self.assertEqual(
response_code,
200,
"Check for a valid response of download URL"
)
return
def test_05_iso_permissions(self):
"""Update & Test for ISO permissions"""
# validate the following
# 1. listIsos returns valid permissions set for ISO
# 2. permission changes should be reflected in vm_template
# table in database
self.debug("Updating permissions for ISO: %s" % self.iso_2.id)
cmd = updateIsoPermissions.updateIsoPermissionsCmd()
cmd.id = self.iso_2.id
#Update ISO permissions
cmd.isfeatured = self.services["isfeatured"]
cmd.ispublic = self.services["ispublic"]
cmd.isextractable = self.services["isextractable"]
self.apiclient.updateIsoPermissions(cmd)
#Verify ListIsos have updated permissions for the ISO for normal user
list_iso_response = list_isos(
self.apiclient,
id=self.iso_2.id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
self.assertEqual(
isinstance(list_iso_response, list),
True,
"Check list response returns a valid list"
)
iso_response = list_iso_response[0]
self.assertEqual(
iso_response.id,
self.iso_2.id,
"Check ISO ID"
)
self.assertEqual(
iso_response.ispublic,
self.services["ispublic"],
"Check ispublic permission of ISO"
)
self.assertEqual(
iso_response.isfeatured,
self.services["isfeatured"],
"Check isfeatured permission of ISO"
)
return
def test_06_copy_iso(self):
"""Test for copy ISO from one zone to another"""
#Validate the following
#1. copy ISO should be successful and secondary storage
# should contain new copied ISO.
self.debug("Copy ISO from %s to %s" % (
self.zone.id,
self.services["destzoneid"]
))
cmd = copyIso.copyIsoCmd()
cmd.id = self.iso_2.id
cmd.destzoneid = self.services["destzoneid"]
cmd.sourcezoneid = self.zone.id
self.apiclient.copyIso(cmd)
#Verify ISO is copied to another zone using ListIsos
list_iso_response = list_isos(
self.apiclient,
id=self.iso_2.id,
zoneid=self.services["destzoneid"]
)
self.assertEqual(
isinstance(list_iso_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_iso_response),
0,
"Check template extracted in List ISO"
)
iso_response = list_iso_response[0]
self.assertEqual(
iso_response.id,
self.iso_2.id,
"Check ID of the downloaded ISO"
)
self.assertEqual(
iso_response.zoneid,
self.services["destzoneid"],
"Check zone ID of the copied ISO"
)
self.debug("Cleanup copied ISO: %s" % iso_response.id)
# Cleanup- Delete the copied ISO
cmd = deleteIso.deleteIsoCmd()
cmd.id = iso_response.id
cmd.zoneid = self.services["destzoneid"]
self.apiclient.deleteIso(cmd)
return
|
|
"""distutils.fancy_getopt
Wrapper around the standard getopt module that provides the following
additional features:
* short and long options are tied together
* options have help strings, so fancy_getopt could potentially
create a complete usage summary
* options set attributes of a passed-in object
"""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: fancy_getopt.py 60923 2008-02-21 18:18:37Z guido.van.rossum $"
import sys, string, re
from types import *
import getopt
from distutils.errors import *
# Much like command_re in distutils.core, this is close to but not quite
# the same as a Python NAME -- except, in the spirit of most GNU
# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)
# The similarities to NAME are again not a coincidence...
longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
longopt_re = re.compile(r'^%s$' % longopt_pat)
# For recognizing "negative alias" options, eg. "quiet=!verbose"
neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat))
# This is used to translate long options to legitimate Python identifiers
# (for use as attributes of some object).
longopt_xlate = string.maketrans('-', '_')
class FancyGetopt:
"""Wrapper around the standard 'getopt()' module that provides some
handy extra functionality:
* short and long options are tied together
* options have help strings, and help text can be assembled
from them
* options set attributes of a passed-in object
* boolean options can have "negative aliases" -- eg. if
--quiet is the "negative alias" of --verbose, then "--quiet"
on the command line sets 'verbose' to false
"""
def __init__ (self, option_table=None):
# The option table is (currently) a list of tuples. The
# tuples may have 3 or four values:
# (long_option, short_option, help_string [, repeatable])
# if an option takes an argument, its long_option should have '='
# appended; short_option should just be a single character, no ':'
# in any case. If a long_option doesn't have a corresponding
# short_option, short_option should be None. All option tuples
# must have long options.
self.option_table = option_table
# 'option_index' maps long option names to entries in the option
# table (ie. those 3-tuples).
self.option_index = {}
if self.option_table:
self._build_index()
# 'alias' records (duh) alias options; {'foo': 'bar'} means
# --foo is an alias for --bar
self.alias = {}
# 'negative_alias' keeps track of options that are the boolean
# opposite of some other option
self.negative_alias = {}
# These keep track of the information in the option table. We
# don't actually populate these structures until we're ready to
# parse the command-line, since the 'option_table' passed in here
# isn't necessarily the final word.
self.short_opts = []
self.long_opts = []
self.short2long = {}
self.attr_name = {}
self.takes_arg = {}
# And 'option_order' is filled up in 'getopt()'; it records the
# original order of options (and their values) on the command-line,
# but expands short options, converts aliases, etc.
self.option_order = []
# __init__ ()
def _build_index (self):
self.option_index.clear()
for option in self.option_table:
self.option_index[option[0]] = option
def set_option_table (self, option_table):
self.option_table = option_table
self._build_index()
def add_option (self, long_option, short_option=None, help_string=None):
if long_option in self.option_index:
raise DistutilsGetoptError, \
"option conflict: already an option '%s'" % long_option
else:
option = (long_option, short_option, help_string)
self.option_table.append(option)
self.option_index[long_option] = option
def has_option (self, long_option):
"""Return true if the option table for this parser has an
option with long name 'long_option'."""
return long_option in self.option_index
def get_attr_name (self, long_option):
"""Translate long option name 'long_option' to the form it
has as an attribute of some object: ie., translate hyphens
to underscores."""
return string.translate(long_option, longopt_xlate)
def _check_alias_dict (self, aliases, what):
assert type(aliases) is DictionaryType
for (alias, opt) in aliases.items():
if alias not in self.option_index:
raise DistutilsGetoptError, \
("invalid %s '%s': "
"option '%s' not defined") % (what, alias, alias)
if opt not in self.option_index:
raise DistutilsGetoptError, \
("invalid %s '%s': "
"aliased option '%s' not defined") % (what, alias, opt)
def set_aliases (self, alias):
"""Set the aliases for this option parser."""
self._check_alias_dict(alias, "alias")
self.alias = alias
def set_negative_aliases (self, negative_alias):
"""Set the negative aliases for this option parser.
'negative_alias' should be a dictionary mapping option names to
option names, both the key and value must already be defined
in the option table."""
self._check_alias_dict(negative_alias, "negative alias")
self.negative_alias = negative_alias
def _grok_option_table (self):
"""Populate the various data structures that keep tabs on the
option table. Called by 'getopt()' before it can do anything
worthwhile.
"""
self.long_opts = []
self.short_opts = []
self.short2long.clear()
self.repeat = {}
for option in self.option_table:
if len(option) == 3:
long, short, help = option
repeat = 0
elif len(option) == 4:
long, short, help, repeat = option
else:
# the option table is part of the code, so simply
# assert that it is correct
raise ValueError, "invalid option tuple: %r" % (option,)
# Type- and value-check the option names
if type(long) is not StringType or len(long) < 2:
raise DistutilsGetoptError, \
("invalid long option '%s': "
"must be a string of length >= 2") % long
if (not ((short is None) or
(type(short) is StringType and len(short) == 1))):
raise DistutilsGetoptError, \
("invalid short option '%s': "
"must a single character or None") % short
self.repeat[long] = repeat
self.long_opts.append(long)
if long[-1] == '=': # option takes an argument?
if short: short = short + ':'
long = long[0:-1]
self.takes_arg[long] = 1
else:
# Is option is a "negative alias" for some other option (eg.
# "quiet" == "!verbose")?
alias_to = self.negative_alias.get(long)
if alias_to is not None:
if self.takes_arg[alias_to]:
raise DistutilsGetoptError, \
("invalid negative alias '%s': "
"aliased option '%s' takes a value") % \
(long, alias_to)
self.long_opts[-1] = long # XXX redundant?!
self.takes_arg[long] = 0
else:
self.takes_arg[long] = 0
# If this is an alias option, make sure its "takes arg" flag is
# the same as the option it's aliased to.
alias_to = self.alias.get(long)
if alias_to is not None:
if self.takes_arg[long] != self.takes_arg[alias_to]:
raise DistutilsGetoptError, \
("invalid alias '%s': inconsistent with "
"aliased option '%s' (one of them takes a value, "
"the other doesn't") % (long, alias_to)
# Now enforce some bondage on the long option name, so we can
# later translate it to an attribute name on some object. Have
# to do this a bit late to make sure we've removed any trailing
# '='.
if not longopt_re.match(long):
raise DistutilsGetoptError, \
("invalid long option name '%s' " +
"(must be letters, numbers, hyphens only") % long
self.attr_name[long] = self.get_attr_name(long)
if short:
self.short_opts.append(short)
self.short2long[short[0]] = long
# for option_table
# _grok_option_table()
def getopt (self, args=None, object=None):
"""Parse command-line options in args. Store as attributes on object.
If 'args' is None or not supplied, uses 'sys.argv[1:]'. If
'object' is None or not supplied, creates a new OptionDummy
object, stores option values there, and returns a tuple (args,
object). If 'object' is supplied, it is modified in place and
'getopt()' just returns 'args'; in both cases, the returned
'args' is a modified copy of the passed-in 'args' list, which
is left untouched.
"""
if args is None:
args = sys.argv[1:]
if object is None:
object = OptionDummy()
created_object = 1
else:
created_object = 0
self._grok_option_table()
short_opts = string.join(self.short_opts)
try:
opts, args = getopt.getopt(args, short_opts, self.long_opts)
except getopt.error, msg:
raise DistutilsArgError, msg
for opt, val in opts:
if len(opt) == 2 and opt[0] == '-': # it's a short option
opt = self.short2long[opt[1]]
else:
assert len(opt) > 2 and opt[:2] == '--'
opt = opt[2:]
alias = self.alias.get(opt)
if alias:
opt = alias
if not self.takes_arg[opt]: # boolean option?
assert val == '', "boolean option can't have value"
alias = self.negative_alias.get(opt)
if alias:
opt = alias
val = 0
else:
val = 1
attr = self.attr_name[opt]
# The only repeating option at the moment is 'verbose'.
# It has a negative option -q quiet, which should set verbose = 0.
if val and self.repeat.get(attr) is not None:
val = getattr(object, attr, 0) + 1
setattr(object, attr, val)
self.option_order.append((opt, val))
# for opts
if created_object:
return args, object
else:
return args
# getopt()
def get_option_order (self):
"""Returns the list of (option, value) tuples processed by the
previous run of 'getopt()'. Raises RuntimeError if
'getopt()' hasn't been called yet.
"""
if self.option_order is None:
raise RuntimeError, "'getopt()' hasn't been called yet"
else:
return self.option_order
def generate_help (self, header=None):
"""Generate help text (a list of strings, one per suggested line of
output) from the option table for this FancyGetopt object.
"""
# Blithely assume the option table is good: probably wouldn't call
# 'generate_help()' unless you've already called 'getopt()'.
# First pass: determine maximum length of long option names
max_opt = 0
for option in self.option_table:
long = option[0]
short = option[1]
l = len(long)
if long[-1] == '=':
l = l - 1
if short is not None:
l = l + 5 # " (-x)" where short == 'x'
if l > max_opt:
max_opt = l
opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter
# Typical help block looks like this:
# --foo controls foonabulation
# Help block for longest option looks like this:
# --flimflam set the flim-flam level
# and with wrapped text:
# --flimflam set the flim-flam level (must be between
# 0 and 100, except on Tuesdays)
# Options with short names will have the short name shown (but
# it doesn't contribute to max_opt):
# --foo (-f) controls foonabulation
# If adding the short option would make the left column too wide,
# we push the explanation off to the next line
# --flimflam (-l)
# set the flim-flam level
# Important parameters:
# - 2 spaces before option block start lines
# - 2 dashes for each long option name
# - min. 2 spaces between option and explanation (gutter)
# - 5 characters (incl. space) for short option name
# Now generate lines of help text. (If 80 columns were good enough
# for Jesus, then 78 columns are good enough for me!)
line_width = 78
text_width = line_width - opt_width
big_indent = ' ' * opt_width
if header:
lines = [header]
else:
lines = ['Option summary:']
for option in self.option_table:
long, short, help = option[:3]
text = wrap_text(help, text_width)
if long[-1] == '=':
long = long[0:-1]
# Case 1: no short option at all (makes life easy)
if short is None:
if text:
lines.append(" --%-*s %s" % (max_opt, long, text[0]))
else:
lines.append(" --%-*s " % (max_opt, long))
# Case 2: we have a short option, so we have to include it
# just after the long option
else:
opt_names = "%s (-%s)" % (long, short)
if text:
lines.append(" --%-*s %s" %
(max_opt, opt_names, text[0]))
else:
lines.append(" --%-*s" % opt_names)
for l in text[1:]:
lines.append(big_indent + l)
# for self.option_table
return lines
# generate_help ()
def print_help (self, header=None, file=None):
if file is None:
file = sys.stdout
for line in self.generate_help(header):
file.write(line + "\n")
# class FancyGetopt
def fancy_getopt (options, negative_opt, object, args):
parser = FancyGetopt(options)
parser.set_negative_aliases(negative_opt)
return parser.getopt(args, object)
WS_TRANS = string.maketrans(string.whitespace, ' ' * len(string.whitespace))
def wrap_text (text, width):
"""wrap_text(text : string, width : int) -> [string]
Split 'text' into multiple lines of no more than 'width' characters
each, and return the list of strings that results.
"""
if text is None:
return []
if len(text) <= width:
return [text]
text = string.expandtabs(text)
text = string.translate(text, WS_TRANS)
chunks = re.split(r'( +|-+)', text)
chunks = filter(None, chunks) # ' - ' results in empty strings
lines = []
while chunks:
cur_line = [] # list of chunks (to-be-joined)
cur_len = 0 # length of current line
while chunks:
l = len(chunks[0])
if cur_len + l <= width: # can squeeze (at least) this chunk in
cur_line.append(chunks[0])
del chunks[0]
cur_len = cur_len + l
else: # this line is full
# drop last chunk if all space
if cur_line and cur_line[-1][0] == ' ':
del cur_line[-1]
break
if chunks: # any chunks left to process?
# if the current line is still empty, then we had a single
# chunk that's too big too fit on a line -- so we break
# down and break it up at the line width
if cur_len == 0:
cur_line.append(chunks[0][0:width])
chunks[0] = chunks[0][width:]
# all-whitespace chunks at the end of a line can be discarded
# (and we know from the re.split above that if a chunk has
# *any* whitespace, it is *all* whitespace)
if chunks[0][0] == ' ':
del chunks[0]
# and store this line in the list-of-all-lines -- as a single
# string, of course!
lines.append(string.join(cur_line, ''))
# while chunks
return lines
# wrap_text ()
def translate_longopt (opt):
"""Convert a long option name to a valid Python identifier by
changing "-" to "_".
"""
return string.translate(opt, longopt_xlate)
class OptionDummy:
"""Dummy class just used as a place to hold command-line option
values as instance attributes."""
def __init__ (self, options=[]):
"""Create a new OptionDummy instance. The attributes listed in
'options' will be initialized to None."""
for opt in options:
setattr(self, opt, None)
# class OptionDummy
if __name__ == "__main__":
text = """\
Tra-la-la, supercalifragilisticexpialidocious.
How *do* you spell that odd word, anyways?
(Someone ask Mary -- she'll know [or she'll
say, "How should I know?"].)"""
for w in (10, 20, 30, 40):
print "width: %d" % w
print string.join(wrap_text(text, w), "\n")
print
|
|
#
# Copyright 2015 Geoff MacGill
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import logging
import os
import collections
import datetime
import six
import tornado.ioloop
import tornado.gen
import tornado.tcpclient
try:
# Tornado 4.2+
import tornado.locks as locks
from tornado.gen import TimeoutError
except ImportError:
import toro as locks
from toro import Timeout as TimeoutError
try:
import simplejson as json
except ImportError:
import json # slower
try:
from flufl.enum import IntEnum
except ImportError:
from enum import IntEnum
from .base import BaseConnector
from ..frames.base import Frame
from ..handlers import DestinationSpec
from ..transaction import Transaction
from ..errors import (
ConnectionError,
ShutdownError,
FrameError,
SubscriptionError,
StompError
)
class TornadoConnector(BaseConnector):
"""STOMP connector for use with a torndao ioloop."""
class ConnectionState(IntEnum):
init = 1
connecting = 2
connected = 3
pre_disconnecting = 4
disconnecting = 5
disconnected = 6
shutdown = 7
def __init__(self,
connection_params,
error_handler,
message_handlers=None,
version='1.1',
heartbeat=(0, 0),
reconnect_attempts=5,
reconnect_delay=0.5,
io_loop=None):
"""Initializes a new connector for tornado.
The message handlers specified for this connector are a list of
iterables (i.e. a list of tuples). Each handler specified in the list
must contain at least 2 members, and an additional mapping type
may be provided to initialize the handler. Examples:
1. Basic: (r'^/queue/foo$', FooHandler)
2. Keyed Args: (r'^/queue/foo$', FooHandler, {'param':'value'})
Each message handler must extend the base
:class:`stimpi.handlers.MessageHandler` class.
Arguments:
connection_params
(:class:`stimpi.connection.ConnectionParameters`):
required connection parameters specifying remote host, port,
vhost, credentials, etc.
error_handler (:class:`stimpi.handlers.ErrorHandler`, optional): An
optional handler for server specified errors that are not
mappable to a sent frame. This can usually occur if receipts
are disabled for one or more messages.
Keyed Arguments:
message_handlers (list, optional): A list of iterables defining
the unbound handlers for MESSAGE frames received from the
broker for one or more subscriptions. These handlers are
unbound in the sense that they could handle messages for any
subcription.
version (str): The STOMP protocol version. One of '1.0', '1.1',
'1.2'. Default '1.1'.
heartbeat (tuple): The transmit and receive requested heartbeats.
Default (0, 0). Currently not supported.
reconnect_attempts (int): Number of attempts to connect to broker
before giving up. Default 5.
reconnect_delay (float): Number of seconds to delay between
reconnect attempts. Default 0.5.
io_loop (:class:`tornado.ioloop.IOLoop`): Depricated
"""
super(TornadoConnector, self).__init__(
connection_params,
version=version,
heartbeat=heartbeat,
reconnect_attempts=reconnect_attempts,
reconnect_delay=reconnect_delay
)
self._logger = logging.getLogger(__name__)
self._io_loop = io_loop or tornado.ioloop.IOLoop.current()
self._stream = None
self._client = tornado.tcpclient.TCPClient()
self._write_lock = locks.BoundedSemaphore(value=1)
self._connected_event = locks.Event()
self._disconnected_event = locks.Event()
self._connection_state = TornadoConnector.ConnectionState.init
self._clean_shutdown = False
self._error_handler = error_handler
self._unbound_message_handlers = []
self._bound_message_handlers = {}
self._waiting_receipts = {}
self._error_receipts = {}
self._disconnect_receipt_timeout = datetime.timedelta(seconds=5)
self.add_handlers(None, message_handlers)
@property
def connected(self):
"""Checks the connected stated of the underlying socket connection
to the broker.
Returns:
True: Connected to broker
False: Not connected to broker
"""
return self._connection_state ==\
TornadoConnector.ConnectionState.connected
def add_handlers(self, subscription, handlers):
if handlers:
for handler in handlers:
self.add_handler(subscription, *handler)
def add_handler(self, subscription, pattern, handler, kwargs=None):
"""Add the specified handler for an optional subscription. If
subscription if None or empty, the handler is registered as
an unbound message handler.
Arguments:
subscription (str, optional): The subscription id that this
handler should be registered against. This is useful
to have handlers for messages when there are multiple
subscriptions.
pattern (str): The destination pattern to match and route messages.
This must be a compilable regular expression and should
match to the end of the string using '$'.
handler (:class:`stimpi.handlers.MessageHandler`): Handler class
reference to construct and pass invocation.
Keyed Arguments
kwargs (:class:`collections.Mapping`, optional): Initialization
parameters for the handler post construction. This should
be a mapping type for keyed arguments. Arguments are passed
to the MessageHandler.initialize(*args, **kwargs) function.
"""
registered_handler = DestinationSpec(pattern, handler, kwargs)
if subscription:
subscription_handlers =\
self._bound_message_handlers.get(subscription, None)
if not subscription_handlers:
subscription_handlers = []
self._bound_message_handlers[subscription] =\
subscription_handlers
subscription_handlers.append(registered_handler)
else:
self._unbound_message_handlers.append(registered_handler)
@tornado.gen.coroutine
def _send_frame(self,
frame,
wait_connected=True,
receipt_event=None):
"""Sends a single frame, and optional body to the server. At most one
concurrent write operation can be performed, so we will use a
bounded semaphore to ensure that only one write occurs at a give time.
Arguments:
frame (:class:`stimpi.frames.base.Frame`): An implementation of
a frame to send across the wire.
Keyed Arguments:
wait_connected (bool): Wait for the connection to come up.
Default True.
receipt_event (Event): Optionally wait for the receive loop to
notify a RECEIPT for the frame. Default None.
"""
if wait_connected:
yield self._ensure_connected()
yield self._write_lock.acquire()
try:
# check if shutdown for any waiting writes
#if self._is_shutdown:
# raise ShutdownError('Connection shutdown')
#if isinstance(frame, self.protocol.module.Disconnect):
# self._is_shutdown = True
# the frame encapsulates the verb and headers
# avoid building a string with the body, instead we will
# write it directly to the stream
yield self._stream.write(frame.dumps(with_body=False))
if frame.body:
yield self._stream.write(frame.body)
# all frames end with a null and line ending
yield self._stream.write(self.protocol.body_end_delim)
self._logger.debug('Sent frame: %s', frame.definition.verb)
self._logger.debug('Raw frame:\n%s', str(frame))
finally:
# always release the semaphore on completion (success or error)
# or suffer a deadlock
self._write_lock.release()
if receipt_event:
yield receipt_event.wait()
@tornado.gen.coroutine
def _ensure_connected(self, start=True):
"""Ensures the connection is up or connects to the broker.
Keyed Arguements:
start (bool): Start the receive loop once connected. Default True.
"""
if self._connection_state >\
TornadoConnector.ConnectionState.connected:
raise ShutdownError('Connection shutdown')
if self._connection_state ==\
TornadoConnector.ConnectionState.connected:
# already connected
return
if self._connection_state ==\
TornadoConnector.ConnectionState.connecting:
# wait for connection to complete
self._logger.debug('Waiting for pending connection')
yield self._connected_event.wait()
if self._connection_state !=\
TornadoConnector.ConnectionState.connected:
raise ConnectionError('Failed to connect')
return
# need to connect
# immediately set the state to connecting to avoid two attempts
# because we will yield to the ioloop here
self._connection_state = TornadoConnector.ConnectionState.connecting
# open the tcp connection
yield self._open_socket()
# send the connect frame
try:
yield self._send_frame(self._build_connect_frame(),
wait_connected=False)
except tornado.iostream.StreamClosedError as e:
six.raise_from(
ConnectionError('Failed to send connect frame'), e)
# receive the connected or error frame
self._logger.debug('Waiting for CONNECTED frame')
try:
frame = yield self._receive_frame(wait_connected=False)
except tornado.iostream.StreamClosedError as e:
six.raise_from(
ConnectionError('Failed to receive CONNECTED frame'), e)
self._handle_connected_frame(frame, start)
def _handle_connected_frame(self, frame, start):
"""Process the CONNECTED or ERROR frames in response to a CONNECT
frame.
CONNECTED frame means we successfully connected.
ERROR frame means we were rejected a connection.
Any other frame is an error condition.
Arguments:
frame (:class:`stimpi.frames.Frame`): A received frame in response
to the CONNECT frame. Should be either a
:class:`stimpi.frames.impl.connected.Connected` or
:class:`stimpi.frames.impl.error.Error` frame.
start (bool): Start the receive loop if successfully connected
to the broker.
"""
if isinstance(frame, self.protocol.module.Connected):
# successfully connected, notify waiting operations
self._connection_state = TornadoConnector.ConnectionState.connected
self._connect_attempts = 0
self._disconnected_event.clear()
self._connected_event.set()
# start the receive loop
if start:
self._logger.debug('Starting receive loop')
future_result = self._receive_frame()
self._io_loop.add_future(future_result, self._on_receive_frame)
elif isinstance(frame, self.protocol.module.Error):
# failed to connect
self._connection_state =\
TornadoConnector.ConnectionState.disconnected
self._connected_event.set() # let blocked requests through
raise ConnectionError(
'Failed to connect to server: {:s}'.format(
frame.headers.get('message', 'No error provided')
))
else:
# this is not a valid frame
self._connection_state =\
TornadoConnector.ConnectionState.disconnected
self._connected_event.set()
raise ConnectionError(
'Received unexpected frame in response to connect')
@tornado.gen.coroutine
def _open_socket(self):
"""Opens the TCP socket to the broker. This does not send any
messages.
"""
self._logger.debug('Opening socket connection',
extra={
'host': self.connection_params.host,
'port': self.connection_params.port
})
while True:
try:
self._stream = yield self._client.connect(
self.connection_params.host,
self.connection_params.port)
break
except IOError as e:
self._stream = None
self._connect_attempts += 1
if self._connect_attempts == self._reconnect_attempts:
self._logger.debug('Connection failed, giving up')
six.raise_from(
ConnectionError(
'Failed to open connection to '
'host "{:s}:{:d}"'.format(
self.connection_params.host,
self.connection_params.port)),
e)
else:
self._logger.debug('Connection failed, retrying in %f '
'seconds', self._reconnect_delay)
yield tornado.gen.sleep(self._reconnect_delay)
@tornado.gen.coroutine
def _receive_frame(self, wait_connected=True):
"""Read a single frame from the socket.
Keyed Arguements:
wait_connected (bool): Wait for the connection to come up fully.
Default True.
"""
frame = None
if wait_connected:
yield self._ensure_connected()
# ignore all EOL values
while True:
data = yield self._stream.read_until_regex(
self.protocol.line_end_pattern)
if self.protocol.line_end_pattern.match(data):
if self._receiving_heartbeats:
# this is a heartbeat -- connection is alive
pass
# we are not heartbeating, but the protocol allows for
# EOLs between frames -- ignore them
continue
else:
break
# at this point, we should have read a command
data = self._decode_frame_headers(data)
frame = Frame.loads_command(self.protocol, data)
# load the headers, if any
while True:
data = yield self._stream.read_until_regex(
self.protocol.line_end_pattern)
if self.protocol.line_end_pattern.match(data):
# end of headers / beginning of body
break
else:
data = self._decode_frame_headers(data)
frame.loads_header(data)
# load the body
content_length = frame.headers.get('content-length', 0)
if content_length:
# read content length + null byte
data = yield self._stream.read_bytes(content_length + 1)
else:
# read until the null byte
data = yield self._stream.read_until(
self.protocol.body_end_delim)
# trim the null byte (inefficient?)
frame.body = data[:-1]
self._logger.debug('Received frame: "{}"'.format(
frame.definition.verb))
self._logger.debug('Raw frame:\n%s', str(frame))
raise tornado.gen.Return(frame)
def _on_receive_frame(self, future):
# check if the error (if any specified) is recoverable
error = future.exception()
frame = None
if error:
if not self._is_receive_error_recoverable(error):
# this is a fatal error, stop receiving frames
return
else:
frame = future.result()
# if still connected, queue the next read. this essentially creates
# an infinite read loop until disconnected
if self.connected:
next_result = self._receive_frame()
self._io_loop.add_future(next_result, self._on_receive_frame)
# if there is not frame, nothing to process
if not frame:
return
# handle basic processing
is_error, has_receipt = self._process_received_frame(frame)
# if this is an error, shutdown
if is_error:
close_result = self._close_socket()
self._io_loop.add_future(close_result, self._on_receive_close)
if not has_receipt:
self._dispatch_error(StompError('Unexpected error', frame))
def _is_receive_error_recoverable(self, error):
if not error:
return True
if isinstance(error, ShutdownError):
# shutdown errors occur when the client disconnects cleanly
# at least (usually)
return False
elif isinstance(error, tornado.iostream.StreamClosedError):
if self._connection_state >\
TornadoConnector.ConnectionState.connected or\
self._clean_shutdown:
# this is in response to a disconnect/close operation
# swallow this exception
return False
# the socket went down unexpectedly, log and dispatch an error
self._logger.error('Connection lost')
close_result = self._close_socket()
self._io_loop.add_future(close_result, self._on_receive_close)
self._dispatch_error(ConnectionError('Connection lost', error))
return False
elif isinstance(error, FrameError):
# Failed to parse a server frame, try to continue
self._logger.error('Failure in parsing frame', error)
self._dispatch_error(error)
return True
# we got an error that we didn't expect
self._logger.exception(error)
self._dispatch_error(error)
return False
def _on_receive_close(self, future):
error = future.exception()
if not error:
return
if isinstance(error, ShutdownError):
# shutdown errors occur when the client disconnects cleanly
# at least (usually)
return
elif isinstance(error, tornado.iostream.StreamClosedError):
# we were already shuting down, so ignore
return
# we got an error that we didn't expect
self._logger.exception(error)
self._dispatch_error(error)
def _process_received_frame(self, frame):
"""Process a received frame for common cases of receipts and messages.
If an error frame is received, save the frame to raise a meaningful
exception to the caller.
Arguments:
frame (:class:`stimpi.frames.Frame`): Received frame to handle.
"""
is_error = isinstance(frame, self.protocol.module.Error)
has_receipt = False
# notify anyone blocking for a receipt
receipt_id = frame.headers.get('receipt-id', None)
if receipt_id:
receipt_event = self._waiting_receipts.pop(receipt_id, None)
if receipt_event:
has_receipt = True
if is_error:
self._error_receipts[receipt_id] = frame
receipt_event.set()
# if this is a message, dispatch to the correct handler
if isinstance(frame, self.protocol.module.Message):
self._dispatch_message(frame)
return is_error, has_receipt
def _find_message_handler(self, frame):
"""Find a handler (if exists) for the message based on subscription
and destination.
Arguments:
frame (:class:`stimpi.frames.Frame`): Received message frame.
"""
destination = frame.headers.get('destination', None)
subscription = frame.headers.get('subscription', None)
handlers = None
if subscription:
handlers = self._bound_message_handlers.get(subscription, None)
handlers = handlers or self._unbound_message_handlers
if not handlers:
return None, None
for handler in handlers:
if handler.pattern.match(destination):
return subscription, handler
return subscription, None
@tornado.gen.coroutine
def _dispatch_message(self, frame):
"""Dispatch a received message to the correct handler. This prefers
handlers for the designated subscription, but will use the global
unbound handlers if none are registered for the designated
subscription.
Arguments:
frame (:class:`stimpi.frames.Frame`): Received message frame.
"""
subscription, handler = self._find_message_handler(frame)
if not handler:
self._logger.warn('No handler for message')
self._dispatch_error(SubscriptionError('No handler for message'))
return
handler_impl = handler.handler(self, frame)
handler_impl.initialize(**handler.kwargs)
# process the message through the handler
try:
yield tornado.gen.maybe_future(handler_impl.received())
except Exception as e:
self._logger.exception(e)
if subscription:
if self._subscriptions.get(subscription, 'auto') != 'auto':
# we are not auto acking, so nack on error
yield self.nack(frame, subscription)
else:
if subscription:
if self._subscriptions.get(subscription, 'auto') != 'auto':
# we are not auto acking, so ack on success
yield self.ack(frame, subscription)
def _dispatch_error(self, error):
"""Dispatch an error to the error handler or raise if no handler
registered."""
if self._error_handler:
impl = self._error_handler(self, error)
impl.handle(error)
else:
six.reraise(error)
@tornado.gen.coroutine
def _close_socket(self):
"""Close the TCP socket."""
if self._connection_state >=\
TornadoConnector.ConnectionState.disconnected:
return
self._logger.debug('Starting disconnect')
if self._connection_state ==\
TornadoConnector.ConnectionState.disconnecting:
self._logger.debug('Waiting for existing disconnect request')
yield self._disconnected_event.wait()
return
self._connection_state =\
TornadoConnector.ConnectionState.disconnecting
self._connected_event.clear()
if self._stream:
self._logger.debug('Closing socket')
self._stream.close()
self._stream = None
self._connection_state =\
TornadoConnector.ConnectionState.disconnected
self._disconnected_event.set()
self._logger.debug('Disconnected')
def _create_receipt_event(self, frame, with_receipt):
"""Create and register a receipt event for the frame if requested.
If the frame has a receipt header, use that ID. Otherwise create
a random receipt ID.
Arguments:
frame (:class:`stimpi.frames.Frame`): Client frame.
with_receipt (bool): Requires/wants receipt.
"""
if not with_receipt:
return None
id = '{0:s}-{1:s}'.format(
frame.definition.verb, os.urandom(16).encode('hex'))
frame.headers['receipt'] = id
# use an event here because we don't want to get stuck waiting
# in the event that the receive loop notifies before we wait on
# the condition
receipt_event = locks.Event()
self._waiting_receipts[id] = receipt_event
return receipt_event
def _check_receipt_error(self, frame, message):
"""Check if there was an ERROR frame received instead of a RECEIPT
frame. If there was an ERROR frame received, raise the error
for the caller.
Arguments:
frame (:class:`stimpi.frames.Frame`): Client frame requiring
receipt.
message (str): Error message if error exists.
"""
id = frame.headers.get('receipt', None)
if not id:
return
error = self._error_receipts.pop(id, None)
if error:
raise StompError(
'{0:s}: {1:s}'.format(
message,
error.headers.get('message', 'unspecified error')),
error)
@tornado.gen.coroutine
def disconnect(self,
headers=None,
with_receipt=True):
"""Disconnect from the broker gracefully. Accepts a callback or
returns a future if no callback provided.
It is recommended to always ask for a receipt.
Keyed Arguemnts:
headers (:class:`collections.Mapping`, optional): Additional
headers to supply with the frame. Default None.
with_receipt (bool, optional): Request a receipt acknowledgement
from the broker. Default True.
callback (func, optional): Callback upon completion. Default None.
Returns:
A Future if no callback if specified.
"""
frame = self._build_disconnect_frame(headers)
# disconnect and wait for receipt, if requested
receipt_event = self._create_receipt_event(
frame,
with_receipt and self.protocol.disconnect_ack)
self._clean_shutdown = True
yield self._send_frame(frame)
# attempt to wait for receipt, but no guaruntees
if receipt_event:
try:
yield receipt_event.wait(self._disconnect_receipt_timeout)
except TimeoutError:
self._logger.debug('Timeout waiting for disconnect receipt')
self._connection_state =\
TornadoConnector.ConnectionState.pre_disconnecting
# close the socket
yield self._close_socket()
@tornado.gen.coroutine
def connect(self,
start=False):
"""Connect to a broker. Accepts a callback or returns a Future if
no callback provided.
Keyed Arguemnts:
start (bool): Start the frame receive loop.
callback (func, optional): Callback upon completion. Default None.
Returns:
A Future if no callback if specified.
Raises:
:class:`stimpi.errors.ConnectionError` if the connection was
could not be established.
"""
super(TornadoConnector, self).connect()
yield self._ensure_connected(start=start)
@tornado.gen.coroutine
def start(self):
"""Start the frame receive loop. Accepts a callback or returns a Future if
no callback provided.
Keyed Arguemnts:
callback (func, optional): Callback upon completion. Default None.
Returns:
A Future if no callback if specified.
"""
yield self._ensure_connected(start=False)
future_result = self._receive_frame()
self._io_loop.add_future(future_result, self._on_receive_frame)
@tornado.gen.coroutine
def subscribe(self,
destination,
message_handlers,
id=None,
ack='auto',
selector=None,
headers=None,
with_receipt=True):
"""Subscribe to a queue or topic. Accepts a callback or returns a
future if no callback provided.
It is recommended to always ask for a receipt.
Arguments:
destination (str): Queue or topic to subscribe.
message_handlers (list, optional): A list of iterables defining
the unbound handlers for MESSAGE frames received from the
broker for one or more subscriptions. These handlers are
unbound in the sense that they could handle messages for any
subcription.
Keyed Arguemnts:
id (str, optional): Unique identifier for this subscription across
this connection/session. If not provided, one is automatically
generated. Default None (i.e. generated).
ack (str): Message acknowledgement pattern. One of 'auto' or
'client'. Default 'auto'.
selector (str, optional): SQL92 selector pattern. Default None.
headers (:class:`collections.Mapping`, optional): Additional
headers to supply with the frame. Default None.
with_receipt (bool, optional): Request a receipt acknowledgement
from the broker. Default True.
callback (func, optional): Callback upon completion. Default None.
Returns:
A Future if no callback if specified. Result is the unique
identifier for the subscription.
Raises:
:class:`stimpi.errors.StompError` if the broker returned an error
frame with the matching receipt id. If with_receipt is False, no
StompError will ever be raised.
:class:`stimpi.errors.ShutdownError` if the connection was closed
by the client side.
:class:`stimpi.errors.ConnectionError` if the connection was
unexpectedly closed or lost.
"""
frame = self._build_subscribe_frame(destination,
id,
ack,
selector,
headers)
id = frame.headers['id']
if id in self._bound_message_handlers and message_handlers:
raise KeyError('Multiple subscriptions with the same '
'id: {0:s}'.format(
id))
# register the message handlers for this idetified subscription
# if any are provided. do this before sending the message
# to avoid missing messages before the ioloop gives us back
# control.
self._subscriptions[id] = ack
self.add_handlers(id, message_handlers)
# subscribe and wait for receipt, if requested
receipt_event = self._create_receipt_event(frame, with_receipt)
try:
yield self._send_frame(frame, receipt_event=receipt_event)
self._check_receipt_error(frame, 'Error subscribing')
except Exception as e:
self._subscriptions.pop(id, None)
self._bound_message_handlers.pop(id, None)
six.reraise(e)
raise tornado.gen.Return(id)
@tornado.gen.coroutine
def unsubscribe(self,
id,
headers=None,
with_receipt=True):
"""Unsubscribe from a queue or topic (i.e. cancel an existing
subscription). Accepts a callback or returns a future if no callback
provided.
It is recommended to always ask for a receipt.
Arguments:
id (str): Unique identifier for the subscription to cancel.
Keyed Arguemnts:
headers (:class:`collections.Mapping`, optional): Additional
headers to supply with the frame. Default None.
with_receipt (bool, optional): Request a receipt acknowledgement
from the broker. Default True.
callback (func, optional): Callback upon completion. Default None.
Returns:
A Future if no callback if specified. Result is the unique
identifier for the subscription.
Raises:
:class:`stimpi.errors.StompError` if the broker returned an error
frame with the matching receipt id. If with_receipt is False, no
StompError will ever be raised.
:class:`stimpi.errors.ShutdownError` if the connection was closed
by the client side.
:class:`stimpi.errors.ConnectionError` if the connection was
unexpectedly closed or lost.
"""
frame = self._build_unsubscribe_frame(id, headers)
# unsubscribe and wait for receipt, if requested
receipt_event = self._create_receipt_event(frame, with_receipt)
yield self._send_frame(frame, receipt_event=receipt_event)
self._check_receipt_error(frame, 'Error unsubscribing')
# remove handlers -- ignore if not found
self._bound_message_handlers.pop(id, None)
self._subscriptions.pop(id, None)
@tornado.gen.coroutine
def send(self,
destination,
data=None,
json_data=None,
transaction=None,
headers=None,
with_receipt=True):
"""Send (i.e. publish) a message to a queue or topic. Accepts a
callback or returns a future if no callback provided.
If data is provided (i.e. pre-encoded message body), the following
headrs should be specified:
* content-type: MIME type and optional charset
* content-length: byte count of data
If using json_data, content-type and content-length headers will be
automatically added when the data is encoded.
It is recommended to always ask for a receipt.
Arguments:
destination (str): Queue or topic to receive message.
Keyed Arguemnts:
data (str, optional): Body of the message. Use this if the content
is already encoded or binary.
json_data (:class:`collections.Mapping`, optional): Body of message
to be automatcially encoded as a JSON object.
selector (str, optional): SQL92 selector pattern. Default None.
headers (:class:`collections.Mapping` or
:class:`collections.Sequence`, optional): Additional
headers to supply with the frame. Default None.
with_receipt (bool, optional): Request a receipt acknowledgement
from the broker. Default True.
callback (func, optional): Callback upon completion. Default None.
Returns:
A Future if no callback if specified.
Raises:
:class:`stimpi.errors.StompError` if the broker returned an error
frame with the matching receipt id. If with_receipt is False, no
StompError will ever be raised.
:class:`stimpi.errors.ShutdownError` if the connection was closed
by the client side.
:class:`stimpi.errors.ConnectionError` if the connection was
unexpectedly closed or lost.
"""
frame = self._build_send_frame(destination, transaction, headers)
if json_data and data:
raise ValueError('Cannot specify both data and json_data to send')
if json_data:
if not isinstance(json_data, collections.Mapping) and\
not isinstance(json_data, collections.Sequence):
raise ValueError('json_data is not a mapping or a sequence')
data = json.dumps(json_data)
frame.headers['content-type'] = 'application/json;charset=utf-8'
frame.headers['content-length'] = len(data)
frame.body = data
# send and wait for receipt, if requested
receipt_event = self._create_receipt_event(frame, with_receipt)
yield self._send_frame(frame, receipt_event=receipt_event)
self._check_receipt_error(frame, 'Error sending')
@tornado.gen.coroutine
def ack(self,
message_id,
subscription=None,
transaction=None,
headers=None,
with_receipt=True):
frame = self._build_ack_frame(message_id,
subscription,
transaction,
headers)
# ack and wait for receipt, if requested
receipt_event = self._create_receipt_event(frame, with_receipt)
yield self._send_frame(frame, receipt_event=receipt_event)
self._check_receipt_error(frame, 'Error acking message')
@tornado.gen.coroutine
def nack(self,
message_id,
subscription=None,
transaction=None,
headers=None,
with_receipt=True):
frame = self._build_nack_frame(message_id,
subscription,
transaction,
headers)
# ack and wait for receipt, if requested
receipt_event = self._create_receipt_event(frame, with_receipt)
yield self._send_frame(frame, receipt_event=receipt_event)
self._check_receipt_error(frame, 'Error nacking message')
@tornado.gen.coroutine
def abort(self,
transaction,
headers=None,
with_receipt=True):
frame = self._build_abort_frame(transaction,
headers)
# ack and wait for receipt, if requested
receipt_event = self._create_receipt_event(frame, with_receipt)
yield self._send_frame(frame, receipt_event=receipt_event)
self._check_receipt_error(frame, 'Error aborting transaction')
@tornado.gen.coroutine
def commit(self,
transaction,
headers=None,
with_receipt=True):
frame = self._build_commit_frame(transaction,
headers)
# ack and wait for receipt, if requested
receipt_event = self._create_receipt_event(frame, with_receipt)
yield self._send_frame(frame, receipt_event=receipt_event)
self._check_receipt_error(frame, 'Error commiting transaction')
@tornado.gen.coroutine
def begin(self,
transaction=None,
headers=None,
with_receipt=True):
if not transaction:
transaction =\
'transaction-{0:s}'.format(os.urandom(16).encode('hex'))
frame = self._build_begin_frame(transaction,
headers)
# ack and wait for receipt, if requested
receipt_event = self._create_receipt_event(frame, with_receipt)
yield self._send_frame(frame, receipt_event=receipt_event)
self._check_receipt_error(frame, 'Error beginning transaction')
raise tornado.gen.Return(Transaction(self, transaction))
|
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python implementation of the counterfactual regret minimization algorithm.
One iteration of CFR consists of:
1) Compute current strategy from regrets (e.g. using Regret Matching).
2) Compute values using the current strategy
3) Compute regrets from these values
The average policy is what converges to a Nash Equilibrium.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import attr
import numpy as np
from open_spiel.python import policy
import pyspiel
@attr.s
class _InfoStateNode(object):
"""An object wrapping values associated to an information state."""
# The list of the legal actions.
legal_actions = attr.ib()
index_in_tabular_policy = attr.ib()
# Map from information states string representations and actions to the
# counterfactual regrets, accumulated over the policy iterations
cumulative_regret = attr.ib(factory=lambda: collections.defaultdict(float))
# Same as above for the cumulative of the policy probabilities computed
# during the policy iterations
cumulative_policy = attr.ib(factory=lambda: collections.defaultdict(float))
def _apply_regret_matching_plus_reset(info_state_nodes):
"""Resets negative cumulative regrets to 0.
Regret Matching+ corresponds to the following cumulative regrets update:
cumulative_regrets = max(cumulative_regrets + regrets, 0)
This must be done at the level of the information set, and thus cannot be
done during the tree traversal (which is done on histories). It is thus
performed as an additional step.
This function is a module level function to be reused by both CFRSolver and
CFRBRSolver.
Args:
info_state_nodes: A dictionary {`info_state_str` -> `_InfoStateNode`}.
"""
for info_state_node in info_state_nodes.values():
action_to_cum_regret = info_state_node.cumulative_regret
for action, cumulative_regret in action_to_cum_regret.items():
if cumulative_regret < 0:
action_to_cum_regret[action] = 0
def _update_current_policy(current_policy, info_state_nodes):
"""Updates in place `current_policy` from the cumulative regrets.
This function is a module level function to be reused by both CFRSolver and
CFRBRSolver.
Args:
current_policy: A `policy.TabularPolicy` to be updated in-place.
info_state_nodes: A dictionary {`info_state_str` -> `_InfoStateNode`}.
"""
for info_state, info_state_node in info_state_nodes.items():
state_policy = current_policy.policy_for_key(info_state)
for action, value in _regret_matching(
info_state_node.cumulative_regret,
info_state_node.legal_actions).items():
state_policy[action] = value
def _update_average_policy(average_policy, info_state_nodes):
"""Updates in place `average_policy` to the average of all policies iterated.
This function is a module level function to be reused by both CFRSolver and
CFRBRSolver.
Args:
average_policy: A `policy.TabularPolicy` to be updated in-place.
info_state_nodes: A dictionary {`info_state_str` -> `_InfoStateNode`}.
"""
for info_state, info_state_node in info_state_nodes.items():
info_state_policies_sum = info_state_node.cumulative_policy
state_policy = average_policy.policy_for_key(info_state)
probabilities_sum = sum(info_state_policies_sum.values())
if probabilities_sum == 0:
num_actions = len(info_state_node.legal_actions)
for action in info_state_node.legal_actions:
state_policy[action] = 1 / num_actions
else:
for action, action_prob_sum in info_state_policies_sum.items():
state_policy[action] = action_prob_sum / probabilities_sum
class _CFRSolverBase(object):
r"""A base class for both CFR and CFR-BR.
The main iteration loop is implemented in `evaluate_and_update_policy`:
```python
game = pyspiel.load_game("game_name")
initial_state = game.new_initial_state()
solver = Solver(game)
for i in range(num_iterations):
solver.evaluate_and_update_policy()
solver.current_policy() # Access the current policy
solver.average_policy() # Access the average policy
```
"""
def __init__(self, game, alternating_updates, linear_averaging,
regret_matching_plus):
# pyformat: disable
"""Initializer.
Args:
game: The `pyspiel.Game` to run on.
alternating_updates: If `True`, alternating updates are performed: for
each player, we compute and update the cumulative regrets and policies.
In that case, and when the policy is frozen during tree traversal, the
cache is reset after each update for one player.
Otherwise, the update is simultaneous.
linear_averaging: Whether to use linear averaging, i.e.
cumulative_policy[info_state][action] += (
iteration_number * reach_prob * action_prob)
or not:
cumulative_policy[info_state][action] += reach_prob * action_prob
regret_matching_plus: Whether to use Regret Matching+:
cumulative_regrets = max(cumulative_regrets + regrets, 0)
or simply regret matching:
cumulative_regrets = cumulative_regrets + regrets
"""
# pyformat: enable
assert game.get_type().dynamics == pyspiel.GameType.Dynamics.SEQUENTIAL, (
"CFR requires sequential games. If you're trying to run it " +
"on a simultaneous (or normal-form) game, please first transform it " +
"using turn_based_simultaneous_game.")
self._game = game
self._num_players = game.num_players()
self._root_node = self._game.new_initial_state()
# This is for returning the current policy and average policy to a caller
self._current_policy = policy.TabularPolicy(game)
self._average_policy = self._current_policy.__copy__()
self._info_state_nodes = {}
self._initialize_info_state_nodes(self._root_node)
self._iteration = 0 # For possible linear-averaging.
self._linear_averaging = linear_averaging
self._alternating_updates = alternating_updates
self._regret_matching_plus = regret_matching_plus
def _initialize_info_state_nodes(self, state):
"""Initializes info_state_nodes.
Create one _InfoStateNode per infoset. We could also initialize the node
when we try to access it and it does not exist.
Args:
state: The current state in the tree walk. This should be the root node
when we call this function from a CFR solver.
"""
if state.is_terminal():
return
if state.is_chance_node():
for action, unused_action_prob in state.chance_outcomes():
self._initialize_info_state_nodes(state.child(action))
return
current_player = state.current_player()
info_state = state.information_state_string(current_player)
info_state_node = self._info_state_nodes.get(info_state)
if info_state_node is None:
legal_actions = state.legal_actions(current_player)
info_state_node = _InfoStateNode(
legal_actions=legal_actions,
index_in_tabular_policy=self._current_policy.state_lookup[info_state])
self._info_state_nodes[info_state] = info_state_node
for action in info_state_node.legal_actions:
self._initialize_info_state_nodes(state.child(action))
def current_policy(self):
"""Returns the current policy as a TabularPolicy.
WARNING: The same object, updated in-place will be returned! You can copy
it (or its `action_probability_array` field).
For CFR/CFR+, this policy does not necessarily have to converge. It
converges with high probability for CFR-BR.
"""
return self._current_policy
def average_policy(self):
"""Returns the average of all policies iterated.
WARNING: The same object, updated in-place will be returned! You can copy
it (or its `action_probability_array` field).
This average policy converges to a Nash policy as the number of iterations
increases.
The policy is computed using the accumulated policy probabilities computed
using `evaluate_and_update_policy`.
Returns:
A `policy.TabularPolicy` object (shared between calls) giving the (linear)
time averaged policy (weighted by player reach probabilities) for both
players.
"""
_update_average_policy(self._average_policy, self._info_state_nodes)
return self._average_policy
def _compute_counterfactual_regret_for_player(self, state, policies,
reach_probabilities, player):
"""Increments the cumulative regrets and policy for `player`.
Args:
state: The initial game state to analyze from.
policies: A list of `num_players` callables taking as input an
`info_state_node` and returning a {action: prob} dictionary. For CFR,
this is simply returning the current policy, but this can be used in
the CFR-BR solver, to prevent code duplication. If None,
`_get_infostate_policy` is used.
reach_probabilities: The probability for each player of reaching `state`
as a numpy array [prob for player 0, for player 1,..., for chance].
`player_reach_probabilities[player]` will work in all cases.
player: The 0-indexed player to update the values for. If `None`, the
update for all players will be performed.
Returns:
The utility of `state` for all players, assuming all players follow the
current policy defined by `self.Policy`.
"""
if state.is_terminal():
return np.asarray(state.returns())
if state.is_chance_node():
state_value = 0.0
for action, action_prob in state.chance_outcomes():
assert action_prob > 0
new_state = state.child(action)
new_reach_probabilities = reach_probabilities.copy()
new_reach_probabilities[-1] *= action_prob
state_value += action_prob * self._compute_counterfactual_regret_for_player(
new_state, policies, new_reach_probabilities, player)
return state_value
current_player = state.current_player()
info_state = state.information_state_string(current_player)
# No need to continue on this history branch as no update will be performed
# for any player.
# The value we return here is not used in practice. If the conditional
# statement is True, then the last taken action has probability 0 of
# occurring, so the returned value is not impacting the parent node value.
if all(reach_probabilities[:-1] == 0):
return np.zeros(self._num_players)
state_value = np.zeros(self._num_players)
# The utilities of the children states are computed recursively. As the
# regrets are added to the information state regrets for each state in that
# information state, the recursive call can only be made once per child
# state. Therefore, the utilities are cached.
children_utilities = {}
info_state_node = self._info_state_nodes[info_state]
if policies is None:
info_state_policy = self._get_infostate_policy(info_state)
else:
info_state_policy = policies[current_player](info_state)
for action in state.legal_actions():
action_prob = info_state_policy.get(action, 0.)
new_state = state.child(action)
new_reach_probabilities = reach_probabilities.copy()
new_reach_probabilities[current_player] *= action_prob
child_utility = self._compute_counterfactual_regret_for_player(
new_state,
policies=policies,
reach_probabilities=new_reach_probabilities,
player=player)
state_value += action_prob * child_utility
children_utilities[action] = child_utility
# If we are performing alternating updates, and the current player is not
# the current_player, we skip the cumulative values update.
# If we are performing simultaneous updates, we do update the cumulative
# values.
simulatenous_updates = player is None
if not simulatenous_updates and current_player != player:
return state_value
reach_prob = reach_probabilities[current_player]
counterfactual_reach_prob = (
np.prod(reach_probabilities[:current_player]) *
np.prod(reach_probabilities[current_player + 1:]))
state_value_for_player = state_value[current_player]
for action, action_prob in info_state_policy.items():
cfr_regret = counterfactual_reach_prob * (
children_utilities[action][current_player] - state_value_for_player)
info_state_node.cumulative_regret[action] += cfr_regret
if self._linear_averaging:
info_state_node.cumulative_policy[
action] += self._iteration * reach_prob * action_prob
else:
info_state_node.cumulative_policy[action] += reach_prob * action_prob
return state_value
def _get_infostate_policy(self, info_state_str):
"""Returns an {action: prob} dictionary for the policy on `info_state`."""
info_state_node = self._info_state_nodes[info_state_str]
prob_vec = self._current_policy.action_probability_array[
info_state_node.index_in_tabular_policy]
return {
action: prob_vec[action] for action in info_state_node.legal_actions
}
def _regret_matching(cumulative_regrets, legal_actions):
"""Returns an info state policy by applying regret-matching.
Args:
cumulative_regrets: A {action: cumulative_regret} dictionary.
legal_actions: the list of legal actions at this state.
Returns:
A dict of action -> prob for all legal actions.
"""
regrets = cumulative_regrets.values()
sum_positive_regrets = sum((regret for regret in regrets if regret > 0))
info_state_policy = {}
if sum_positive_regrets > 0:
for action in legal_actions:
positive_action_regret = max(0.0, cumulative_regrets[action])
info_state_policy[action] = (
positive_action_regret / sum_positive_regrets)
else:
for action in legal_actions:
info_state_policy[action] = 1.0 / len(legal_actions)
return info_state_policy
class _CFRSolver(_CFRSolverBase):
r"""Implements the Counterfactual Regret Minimization (CFR) algorithm.
The algorithm computes an approximate Nash policy for 2 player zero-sum games.
CFR can be view as a policy iteration algorithm. Importantly, the policies
themselves do not converge to a Nash policy, but their average does.
The main iteration loop is implemented in `evaluate_and_update_policy`:
```python
game = pyspiel.load_game("game_name")
initial_state = game.new_initial_state()
cfr_solver = CFRSolver(game)
for i in range(num_iterations):
cfr.evaluate_and_update_policy()
```
Once the policy has converged, the average policy (which converges to the Nash
policy) can be computed:
```python
average_policy = cfr_solver.ComputeAveragePolicy()
```
# Policy and average policy
policy(0) and average_policy(0) are not technically defined, but these
methods will return arbitrarily the uniform_policy.
Then, we are expected to have:
```
for t in range(1, N):
cfr_solver.evaluate_and_update_policy()
policy(t) = RM or RM+ of cumulative regrets
avg_policy(t)(s, a) ~ \sum_{k=1}^t player_reach_prob(t)(s) * policy(k)(s, a)
With Linear Averaging, the avg_policy is proportional to:
\sum_{k=1}^t k * player_reach_prob(t)(s) * policy(k)(s, a)
```
"""
def evaluate_and_update_policy(self):
"""Performs a single step of policy evaluation and policy improvement."""
self._iteration += 1
if self._alternating_updates:
for player in range(self._game.num_players()):
self._compute_counterfactual_regret_for_player(
self._root_node,
policies=None,
reach_probabilities=np.ones(self._game.num_players() + 1),
player=player)
if self._regret_matching_plus:
_apply_regret_matching_plus_reset(self._info_state_nodes)
_update_current_policy(self._current_policy, self._info_state_nodes)
else:
self._compute_counterfactual_regret_for_player(
self._root_node,
policies=None,
reach_probabilities=np.ones(self._game.num_players() + 1),
player=None)
if self._regret_matching_plus:
_apply_regret_matching_plus_reset(self._info_state_nodes)
_update_current_policy(self._current_policy, self._info_state_nodes)
class CFRPlusSolver(_CFRSolver):
"""CFR+ implementation.
The algorithm computes an approximate Nash policy for 2 player zero-sum games.
More generally, it should approach a no-regret set, which corresponds to the
set of coarse-correlated equilibria. See https://arxiv.org/abs/1305.0034
CFR can be view as a policy iteration algorithm. Importantly, the policies
themselves do not converge to a Nash policy, but their average does.
See https://poker.cs.ualberta.ca/publications/2015-ijcai-cfrplus.pdf
CFR+ is CFR with the following modifications:
- use Regret Matching+ instead of Regret Matching.
- use alternating updates instead of simultaneous updates.
- use linear averaging.
Usage:
```python
game = pyspiel.load_game("game_name")
initial_state = game.new_initial_state()
cfr_solver = CFRSolver(game)
for i in range(num_iterations):
cfr.evaluate_and_update_policy()
```
Once the policy has converged, the average policy (which converges to the Nash
policy) can be computed:
```python
average_policy = cfr_solver.ComputeAveragePolicy()
```
"""
def __init__(self, game):
super(CFRPlusSolver, self).__init__(
game,
regret_matching_plus=True,
alternating_updates=True,
linear_averaging=True)
class CFRSolver(_CFRSolver):
"""Implements the Counterfactual Regret Minimization (CFR) algorithm.
See https://poker.cs.ualberta.ca/publications/NIPS07-cfr.pdf
NOTE: We use alternating updates (which was not the case in the original
paper) because it has been proved to be far more efficient.
"""
def __init__(self, game):
super(CFRSolver, self).__init__(
game,
regret_matching_plus=False,
alternating_updates=True,
linear_averaging=False)
|
|
from ..osid import query_inspectors as osid_query_inspectors
class ObjectiveQueryInspector(osid_query_inspectors.OsidObjectQueryInspector, osid_query_inspectors.OsidFederateableQueryInspector):
"""This is the query inspector for examining objective queries."""
def get_assessment_id_terms(self):
"""Gets the asset ``Id`` query terms.
:return: the asset ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
assessment_id_terms = property(fget=get_assessment_id_terms)
def get_assessment_terms(self):
"""Gets the asset query terms.
:return: the asset terms
:rtype: ``osid.repository.AssetQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.repository.AssetQueryInspector
assessment_terms = property(fget=get_assessment_terms)
def get_knowledge_category_id_terms(self):
"""Gets the knowledge category ``Id`` query terms.
:return: the knowledge category ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
knowledge_category_id_terms = property(fget=get_knowledge_category_id_terms)
def get_knowledge_category_terms(self):
"""Gets the knowledge category query terms.
:return: the knowledge category terms
:rtype: ``osid.grading.GradeQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradeQueryInspector
knowledge_category_terms = property(fget=get_knowledge_category_terms)
def get_cognitive_process_id_terms(self):
"""Gets the cognitive process ``Id`` query terms.
:return: the cognitive process ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
cognitive_process_id_terms = property(fget=get_cognitive_process_id_terms)
def get_cognitive_process_terms(self):
"""Gets the cognitive process query terms.
:return: the cognitive process terms
:rtype: ``osid.grading.GradeQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradeQueryInspector
cognitive_process_terms = property(fget=get_cognitive_process_terms)
def get_requisite_objective_id_terms(self):
"""Gets the requisite objective ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
requisite_objective_id_terms = property(fget=get_requisite_objective_id_terms)
def get_requisite_objective_terms(self):
"""Gets the requisite objective query terms.
:return: the query terms
:rtype: ``osid.learning.ObjectiveQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.ObjectiveQueryInspector
requisite_objective_terms = property(fget=get_requisite_objective_terms)
def get_dependent_objective_id_terms(self):
"""Gets the requisite objective ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
dependent_objective_id_terms = property(fget=get_dependent_objective_id_terms)
def get_dependent_objective_terms(self):
"""Gets the requisite objective query terms.
:return: the query terms
:rtype: ``osid.learning.ObjectiveQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.ObjectiveQueryInspector
dependent_objective_terms = property(fget=get_dependent_objective_terms)
def get_equivalent_objective_id_terms(self):
"""Gets the equivalent objective ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
equivalent_objective_id_terms = property(fget=get_equivalent_objective_id_terms)
def get_equivalent_objective_terms(self):
"""Gets the equivalent objective query terms.
:return: the query terms
:rtype: ``osid.learning.ObjectiveQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.ObjectiveQueryInspector
equivalent_objective_terms = property(fget=get_equivalent_objective_terms)
def get_ancestor_objective_id_terms(self):
"""Gets the ancestor objective ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
ancestor_objective_id_terms = property(fget=get_ancestor_objective_id_terms)
def get_ancestor_objective_terms(self):
"""Gets the ancestor objective query terms.
:return: the query terms
:rtype: ``osid.learning.ObjectiveQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.ObjectiveQueryInspector
ancestor_objective_terms = property(fget=get_ancestor_objective_terms)
def get_descendant_objective_id_terms(self):
"""Gets the descendant objective ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
descendant_objective_id_terms = property(fget=get_descendant_objective_id_terms)
def get_descendant_objective_terms(self):
"""Gets the descendant objective query terms.
:return: the query terms
:rtype: ``osid.learning.ObjectiveQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.ObjectiveQueryInspector
descendant_objective_terms = property(fget=get_descendant_objective_terms)
def get_activity_id_terms(self):
"""Gets the activity ``Id`` query terms.
:return: the activity ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
activity_id_terms = property(fget=get_activity_id_terms)
def get_activity_terms(self):
"""Gets the activity query terms.
:return: the activity terms
:rtype: ``osid.learning.ActivityQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.ActivityQueryInspector
activity_terms = property(fget=get_activity_terms)
def get_objective_bank_id_terms(self):
"""Gets the objective bank ``Id`` query terms.
:return: the objective bank ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
objective_bank_id_terms = property(fget=get_objective_bank_id_terms)
def get_objective_bank_terms(self):
"""Gets the objective bank query terms.
:return: the objective bank terms
:rtype: ``osid.learning.ObjectiveBankQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.ObjectiveBankQueryInspector
objective_bank_terms = property(fget=get_objective_bank_terms)
def get_objective_query_inspector_record(self, objective_record_type):
"""Gets the objective query inspector record corresponding to the given ``Objective`` record ``Type``.
:param objective_record_type: an objective record type
:type objective_record_type: ``osid.type.Type``
:return: the objective query inspector record
:rtype: ``osid.learning.records.ObjectiveQueryInspectorRecord``
:raise: ``NullArgument`` -- ``objective_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(objective_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.records.ObjectiveQueryInspectorRecord
class ActivityQueryInspector(osid_query_inspectors.OsidObjectQueryInspector, osid_query_inspectors.OsidSubjugateableQueryInspector):
"""This is the query inspector for examining activity queries."""
def get_objective_id_terms(self):
"""Gets the objective ``Id`` query terms.
:return: the objective ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
objective_id_terms = property(fget=get_objective_id_terms)
def get_objective_terms(self):
"""Gets the objective query terms.
:return: the objective terms
:rtype: ``osid.learning.ObjectiveQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.ObjectiveQueryInspector
objective_terms = property(fget=get_objective_terms)
def get_asset_id_terms(self):
"""Gets the asset ``Id`` query terms.
:return: the asset ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
asset_id_terms = property(fget=get_asset_id_terms)
def get_asset_terms(self):
"""Gets the asset query terms.
:return: the asset terms
:rtype: ``osid.repository.AssetQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.repository.AssetQueryInspector
asset_terms = property(fget=get_asset_terms)
def get_course_id_terms(self):
"""Gets the course ``Id`` query terms.
:return: the course ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
course_id_terms = property(fget=get_course_id_terms)
def get_course_terms(self):
"""Gets the course query terms.
:return: the course terms
:rtype: ``osid.course.CourseQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.course.CourseQueryInspector
course_terms = property(fget=get_course_terms)
def get_assessment_id_terms(self):
"""Gets the assessment ``Id`` query terms.
:return: the assessment ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
assessment_id_terms = property(fget=get_assessment_id_terms)
def get_assessment_terms(self):
"""Gets the assessment query terms.
:return: the assessment terms
:rtype: ``osid.assessment.AssessmentQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentQueryInspector
assessment_terms = property(fget=get_assessment_terms)
def get_objective_bank_id_terms(self):
"""Gets the objective bank ``Id`` query terms.
:return: the objective bank ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
objective_bank_id_terms = property(fget=get_objective_bank_id_terms)
def get_objective_bank_terms(self):
"""Gets the objective bank query terms.
:return: the objective bank terms
:rtype: ``osid.learning.ObjectiveBankQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.ObjectiveBankQueryInspector
objective_bank_terms = property(fget=get_objective_bank_terms)
def get_activity_query_inspector_record(self, activity_record_type):
"""Gets the activity query inspector record corresponding to the given ``Activity`` record ``Type``.
:param activity_record_type: an activity record type
:type activity_record_type: ``osid.type.Type``
:return: the activity query inspector record
:rtype: ``osid.learning.records.ActivityQueryInspectorRecord``
:raise: ``NullArgument`` -- ``activity_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(activity_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.records.ActivityQueryInspectorRecord
class ProficiencyQueryInspector(osid_query_inspectors.OsidRelationshipQueryInspector):
"""This is the query inspector for examining proficiency queries."""
def get_resource_id_terms(self):
"""Gets the resource ``Id`` terms.
:return: the resource ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
resource_id_terms = property(fget=get_resource_id_terms)
def get_resource_terms(self):
"""Gets the resource terms.
:return: the resource terms
:rtype: ``osid.resource.ResourceQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.resource.ResourceQueryInspector
resource_terms = property(fget=get_resource_terms)
def get_objective_id_terms(self):
"""Gets the objective ``Id`` terms.
:return: the objective ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
objective_id_terms = property(fget=get_objective_id_terms)
def get_objective_terms(self):
"""Gets the objective terms.
:return: the objective terms
:rtype: ``osid.learning.ObjectiveQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.ObjectiveQueryInspector
objective_terms = property(fget=get_objective_terms)
def get_completion_terms(self):
"""Gets the completion terms.
:return: the completion terms
:rtype: ``osid.search.terms.DecimalRangeTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.DecimalRangeTerm
completion_terms = property(fget=get_completion_terms)
def get_minimum_completion_terms(self):
"""Gets the minimum completion terms.
:return: the minimum completion terms
:rtype: ``osid.search.terms.DecimalTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.DecimalTerm
minimum_completion_terms = property(fget=get_minimum_completion_terms)
def get_level_id_terms(self):
"""Gets the level ``Id`` query terms.
:return: the level ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
level_id_terms = property(fget=get_level_id_terms)
def get_level_terms(self):
"""Gets the level query terms.
:return: the level terms
:rtype: ``osid.grading.GradeQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradeQueryInspector
level_terms = property(fget=get_level_terms)
def get_objective_bank_id_terms(self):
"""Gets the objective bank ``Id`` query terms.
:return: the objective bank ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
objective_bank_id_terms = property(fget=get_objective_bank_id_terms)
def get_objective_bank_terms(self):
"""Gets the objective bank query terms.
:return: the objective bank terms
:rtype: ``osid.learning.ObjectiveBankQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.ObjectiveBankQueryInspector
objective_bank_terms = property(fget=get_objective_bank_terms)
def get_proficiency_query_inspector_record(self, proficiency_record_type):
"""Gets the proficiency query inspector record corresponding to the given ``Proficiency`` record ``Type``.
:param proficiency_record_type: a proficiency record type
:type proficiency_record_type: ``osid.type.Type``
:return: the proficiency query inspector record
:rtype: ``osid.learning.records.ProficiencyQueryInspectorRecord``
:raise: ``NullArgument`` -- ``proficiency_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(proficiency_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.records.ProficiencyQueryInspectorRecord
class ObjectiveBankQueryInspector(osid_query_inspectors.OsidCatalogQueryInspector):
"""This is the query inspector for examining objective bank queries."""
def get_objective_id_terms(self):
"""Gets the objective ``Id`` query terms.
:return: the objective ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
objective_id_terms = property(fget=get_objective_id_terms)
def get_objective_terms(self):
"""Gets the objective query terms.
:return: the objective terms
:rtype: ``osid.learning.ObjectiveQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.ObjectiveQueryInspector
objective_terms = property(fget=get_objective_terms)
def get_activity_id_terms(self):
"""Gets the activity ``Id`` query terms.
:return: the activity ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
activity_id_terms = property(fget=get_activity_id_terms)
def get_activity_terms(self):
"""Gets the activity query terms.
:return: the activity terms
:rtype: ``osid.learning.ActivityQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.ActivityQueryInspector
activity_terms = property(fget=get_activity_terms)
def get_ancestor_objective_bank_id_terms(self):
"""Gets the ancestor objective bank ``Id`` query terms.
:return: the ancestor objective bank ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
ancestor_objective_bank_id_terms = property(fget=get_ancestor_objective_bank_id_terms)
def get_ancestor_objective_bank_terms(self):
"""Gets the ancestor objective bank query terms.
:return: the ancestor objective bank terms
:rtype: ``osid.learning.ObjectiveBankQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.ObjectiveBankQueryInspector
ancestor_objective_bank_terms = property(fget=get_ancestor_objective_bank_terms)
def get_descendant_objective_bank_id_terms(self):
"""Gets the descendant objective bank ``Id`` query terms.
:return: the descendant objective bank ``Id`` terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
descendant_objective_bank_id_terms = property(fget=get_descendant_objective_bank_id_terms)
def get_descendant_objective_bank_terms(self):
"""Gets the descendant objective bank query terms.
:return: the descendant objective bank terms
:rtype: ``osid.learning.ObjectiveBankQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.ObjectiveBankQueryInspector
descendant_objective_bank_terms = property(fget=get_descendant_objective_bank_terms)
def get_objective_bank_query_inspector_record(self, objective_bank_record_type):
"""Gets the objective bank query inspector record corresponding to the given ``ObjectiveBank`` record ``Type``.
:param objective_bank_record_type: an objective bank record type
:type objective_bank_record_type: ``osid.type.Type``
:return: the objective bank query inspector record
:rtype: ``osid.learning.records.ObjectiveBankQueryInspectorRecord``
:raise: ``NullArgument`` -- ``objective_bank_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(objective_bank_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.learning.records.ObjectiveBankQueryInspectorRecord
|
|
# Copyright 2014 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import json
from neutron.common import constants
from neutron.common import exceptions as exception
from neutron.openstack.common import log
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.common import exceptions as nsx_exc
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware.nsxlib import _build_uri_path
from neutron.plugins.vmware.nsxlib import do_request
from neutron.plugins.vmware.nsxlib import get_all_query_pages
HTTP_GET = "GET"
HTTP_POST = "POST"
HTTP_DELETE = "DELETE"
HTTP_PUT = "PUT"
LSWITCH_RESOURCE = "lswitch"
LSWITCHPORT_RESOURCE = "lport/%s" % LSWITCH_RESOURCE
LOG = log.getLogger(__name__)
def _configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled, security_profiles,
queue_id, mac_learning_enabled,
allowed_address_pairs):
lport_obj['allowed_address_pairs'] = []
if port_security_enabled:
for fixed_ip in fixed_ips:
ip_address = fixed_ip.get('ip_address')
if ip_address:
lport_obj['allowed_address_pairs'].append(
{'mac_address': mac_address, 'ip_address': ip_address})
# add address pair allowing src_ip 0.0.0.0 to leave
# this is required for outgoing dhcp request
lport_obj["allowed_address_pairs"].append(
{"mac_address": mac_address,
"ip_address": "0.0.0.0"})
lport_obj['security_profiles'] = list(security_profiles or [])
lport_obj['queue_uuid'] = queue_id
if mac_learning_enabled is not None:
lport_obj["mac_learning"] = mac_learning_enabled
lport_obj["type"] = "LogicalSwitchPortConfig"
for address_pair in list(allowed_address_pairs or []):
lport_obj['allowed_address_pairs'].append(
{'mac_address': address_pair['mac_address'],
'ip_address': address_pair['ip_address']})
def get_lswitch_by_id(cluster, lswitch_id):
try:
lswitch_uri_path = _build_uri_path(
LSWITCH_RESOURCE, lswitch_id,
relations="LogicalSwitchStatus")
return do_request(HTTP_GET, lswitch_uri_path, cluster=cluster)
except exception.NotFound:
# FIXME(salv-orlando): this should not raise a neutron exception
raise exception.NetworkNotFound(net_id=lswitch_id)
def get_lswitches(cluster, neutron_net_id):
def lookup_switches_by_tag():
# Fetch extra logical switches
lswitch_query_path = _build_uri_path(
LSWITCH_RESOURCE,
fields="uuid,display_name,tags,lport_count",
relations="LogicalSwitchStatus",
filters={'tag': neutron_net_id,
'tag_scope': 'quantum_net_id'})
return get_all_query_pages(lswitch_query_path, cluster)
lswitch_uri_path = _build_uri_path(LSWITCH_RESOURCE, neutron_net_id,
relations="LogicalSwitchStatus")
results = []
try:
ls = do_request(HTTP_GET, lswitch_uri_path, cluster=cluster)
results.append(ls)
for tag in ls['tags']:
if (tag['scope'] == "multi_lswitch" and
tag['tag'] == "True"):
results.extend(lookup_switches_by_tag())
except exception.NotFound:
# This is legit if the neutron network was created using
# a post-Havana version of the plugin
results.extend(lookup_switches_by_tag())
if results:
return results
else:
raise exception.NetworkNotFound(net_id=neutron_net_id)
def create_lswitch(cluster, neutron_net_id, tenant_id, display_name,
transport_zones_config,
shared=None,
**kwargs):
# The tag scope adopts a slightly different naming convention for
# historical reasons
lswitch_obj = {"display_name": utils.check_and_truncate(display_name),
"transport_zones": transport_zones_config,
"tags": utils.get_tags(os_tid=tenant_id,
quantum_net_id=neutron_net_id)}
# TODO(salv-orlando): Now that we have async status synchronization
# this tag is perhaps not needed anymore
if shared:
lswitch_obj["tags"].append({"tag": "true",
"scope": "shared"})
if "tags" in kwargs:
lswitch_obj["tags"].extend(kwargs["tags"])
uri = _build_uri_path(LSWITCH_RESOURCE)
lswitch = do_request(HTTP_POST, uri, json.dumps(lswitch_obj),
cluster=cluster)
LOG.debug(_("Created logical switch: %s"), lswitch['uuid'])
return lswitch
def update_lswitch(cluster, lswitch_id, display_name,
tenant_id=None, **kwargs):
uri = _build_uri_path(LSWITCH_RESOURCE, resource_id=lswitch_id)
lswitch_obj = {"display_name": utils.check_and_truncate(display_name),
"tags": utils.get_tags(os_tid=tenant_id)}
if "tags" in kwargs:
lswitch_obj["tags"].extend(kwargs["tags"])
try:
return do_request(HTTP_PUT, uri, json.dumps(lswitch_obj),
cluster=cluster)
except exception.NotFound as e:
LOG.error(_("Network not found, Error: %s"), str(e))
raise exception.NetworkNotFound(net_id=lswitch_id)
def delete_network(cluster, net_id, lswitch_id):
delete_networks(cluster, net_id, [lswitch_id])
#TODO(salvatore-orlando): Simplify and harmonize
def delete_networks(cluster, net_id, lswitch_ids):
for ls_id in lswitch_ids:
path = "/ws.v1/lswitch/%s" % ls_id
try:
do_request(HTTP_DELETE, path, cluster=cluster)
except exception.NotFound as e:
LOG.error(_("Network not found, Error: %s"), str(e))
raise exception.NetworkNotFound(net_id=ls_id)
def query_lswitch_lports(cluster, ls_uuid, fields="*",
filters=None, relations=None):
# Fix filter for attachments
if filters and "attachment" in filters:
filters['attachment_vif_uuid'] = filters["attachment"]
del filters['attachment']
uri = _build_uri_path(LSWITCHPORT_RESOURCE, parent_resource_id=ls_uuid,
fields=fields, filters=filters, relations=relations)
return do_request(HTTP_GET, uri, cluster=cluster)['results']
def delete_port(cluster, switch, port):
uri = "/ws.v1/lswitch/" + switch + "/lport/" + port
try:
do_request(HTTP_DELETE, uri, cluster=cluster)
except exception.NotFound:
LOG.exception(_("Port or Network not found"))
raise exception.PortNotFoundOnNetwork(
net_id=switch, port_id=port)
except api_exc.NsxApiException:
raise exception.NeutronException()
def get_ports(cluster, networks=None, devices=None, tenants=None):
vm_filter_obsolete = ""
vm_filter = ""
tenant_filter = ""
# This is used when calling delete_network. Neutron checks to see if
# the network has any ports.
if networks:
# FIXME (Aaron) If we get more than one network_id this won't work
lswitch = networks[0]
else:
lswitch = "*"
if devices:
for device_id in devices:
vm_filter_obsolete = '&'.join(
["tag_scope=vm_id",
"tag=%s" % utils.device_id_to_vm_id(device_id,
obfuscate=True),
vm_filter_obsolete])
vm_filter = '&'.join(
["tag_scope=vm_id",
"tag=%s" % utils.device_id_to_vm_id(device_id),
vm_filter])
if tenants:
for tenant in tenants:
tenant_filter = '&'.join(
["tag_scope=os_tid",
"tag=%s" % tenant,
tenant_filter])
nsx_lports = {}
lport_fields_str = ("tags,admin_status_enabled,display_name,"
"fabric_status_up")
try:
lport_query_path_obsolete = (
"/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
"&relations=LogicalPortStatus" %
(lswitch, lport_fields_str, vm_filter_obsolete, tenant_filter))
lport_query_path = (
"/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
"&relations=LogicalPortStatus" %
(lswitch, lport_fields_str, vm_filter, tenant_filter))
try:
# NOTE(armando-migliaccio): by querying with obsolete tag first
# current deployments won't take the performance hit of a double
# call. In release L-** or M-**, we might want to swap the calls
# as it's likely that ports with the new tag would outnumber the
# ones with the old tag
ports = get_all_query_pages(lport_query_path_obsolete, cluster)
if not ports:
ports = get_all_query_pages(lport_query_path, cluster)
except exception.NotFound:
LOG.warn(_("Lswitch %s not found in NSX"), lswitch)
ports = None
if ports:
for port in ports:
for tag in port["tags"]:
if tag["scope"] == "q_port_id":
nsx_lports[tag["tag"]] = port
except Exception:
err_msg = _("Unable to get ports")
LOG.exception(err_msg)
raise nsx_exc.NsxPluginException(err_msg=err_msg)
return nsx_lports
def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id):
"""Get port by neutron tag.
Returns the NSX UUID of the logical port with tag q_port_id equal to
neutron_port_id or None if the port is not Found.
"""
uri = _build_uri_path(LSWITCHPORT_RESOURCE,
parent_resource_id=lswitch_uuid,
fields='uuid',
filters={'tag': neutron_port_id,
'tag_scope': 'q_port_id'})
LOG.debug(_("Looking for port with q_port_id tag '%(neutron_port_id)s' "
"on: '%(lswitch_uuid)s'"),
{'neutron_port_id': neutron_port_id,
'lswitch_uuid': lswitch_uuid})
res = do_request(HTTP_GET, uri, cluster=cluster)
num_results = len(res["results"])
if num_results >= 1:
if num_results > 1:
LOG.warn(_("Found '%(num_ports)d' ports with "
"q_port_id tag: '%(neutron_port_id)s'. "
"Only 1 was expected."),
{'num_ports': num_results,
'neutron_port_id': neutron_port_id})
return res["results"][0]
def get_port(cluster, network, port, relations=None):
LOG.info(_("get_port() %(network)s %(port)s"),
{'network': network, 'port': port})
uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?"
if relations:
uri += "relations=%s" % relations
try:
return do_request(HTTP_GET, uri, cluster=cluster)
except exception.NotFound as e:
LOG.error(_("Port or Network not found, Error: %s"), str(e))
raise exception.PortNotFoundOnNetwork(
port_id=port, net_id=network)
def update_port(cluster, lswitch_uuid, lport_uuid, neutron_port_id, tenant_id,
display_name, device_id, admin_status_enabled,
mac_address=None, fixed_ips=None, port_security_enabled=None,
security_profiles=None, queue_id=None,
mac_learning_enabled=None, allowed_address_pairs=None):
lport_obj = dict(
admin_status_enabled=admin_status_enabled,
display_name=utils.check_and_truncate(display_name),
tags=utils.get_tags(os_tid=tenant_id,
q_port_id=neutron_port_id,
vm_id=utils.device_id_to_vm_id(device_id)))
_configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled, security_profiles,
queue_id, mac_learning_enabled,
allowed_address_pairs)
path = "/ws.v1/lswitch/" + lswitch_uuid + "/lport/" + lport_uuid
try:
result = do_request(HTTP_PUT, path, json.dumps(lport_obj),
cluster=cluster)
LOG.debug(_("Updated logical port %(result)s "
"on logical switch %(uuid)s"),
{'result': result['uuid'], 'uuid': lswitch_uuid})
return result
except exception.NotFound as e:
LOG.error(_("Port or Network not found, Error: %s"), str(e))
raise exception.PortNotFoundOnNetwork(
port_id=lport_uuid, net_id=lswitch_uuid)
def create_lport(cluster, lswitch_uuid, tenant_id, neutron_port_id,
display_name, device_id, admin_status_enabled,
mac_address=None, fixed_ips=None, port_security_enabled=None,
security_profiles=None, queue_id=None,
mac_learning_enabled=None, allowed_address_pairs=None):
"""Creates a logical port on the assigned logical switch."""
display_name = utils.check_and_truncate(display_name)
lport_obj = dict(
admin_status_enabled=admin_status_enabled,
display_name=display_name,
tags=utils.get_tags(os_tid=tenant_id,
q_port_id=neutron_port_id,
vm_id=utils.device_id_to_vm_id(device_id))
)
_configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled, security_profiles,
queue_id, mac_learning_enabled,
allowed_address_pairs)
path = _build_uri_path(LSWITCHPORT_RESOURCE,
parent_resource_id=lswitch_uuid)
result = do_request(HTTP_POST, path, json.dumps(lport_obj),
cluster=cluster)
LOG.debug(_("Created logical port %(result)s on logical switch %(uuid)s"),
{'result': result['uuid'], 'uuid': lswitch_uuid})
return result
def get_port_status(cluster, lswitch_id, port_id):
"""Retrieve the operational status of the port."""
try:
r = do_request(HTTP_GET,
"/ws.v1/lswitch/%s/lport/%s/status" %
(lswitch_id, port_id), cluster=cluster)
except exception.NotFound as e:
LOG.error(_("Port not found, Error: %s"), str(e))
raise exception.PortNotFoundOnNetwork(
port_id=port_id, net_id=lswitch_id)
if r['link_status_up'] is True:
return constants.PORT_STATUS_ACTIVE
else:
return constants.PORT_STATUS_DOWN
def plug_interface(cluster, lswitch_id, lport_id, att_obj):
return do_request(HTTP_PUT,
_build_uri_path(LSWITCHPORT_RESOURCE,
lport_id, lswitch_id,
is_attachment=True),
json.dumps(att_obj),
cluster=cluster)
def plug_vif_interface(
cluster, lswitch_id, port_id, port_type, attachment=None):
"""Plug a VIF Attachment object in a logical port."""
lport_obj = {}
if attachment:
lport_obj["vif_uuid"] = attachment
lport_obj["type"] = port_type
return plug_interface(cluster, lswitch_id, port_id, lport_obj)
|
|
import re
EOF = -1
COMMA = ','
SEMICOLON = ';'
LINE_REF_SEPARATORS = (COMMA, SEMICOLON)
default_range_info = dict(left_ref=None,
left_offset=None,
left_search_offsets=[],
separator=None,
right_ref=None,
right_offset=None,
right_search_offsets=[],
text_range='')
class ParserBase(object):
def __init__(self, source):
self.c = ''
self.source = source
self.result = default_range_info.copy()
self.n = -1
self.consume()
def consume(self):
if self.c == EOF:
raise SyntaxError("End of file reached.")
if self.n == -1 and not self.source:
self.c = EOF
return
else:
self.n += 1
if self.n >= len(self.source):
self.c = EOF
return
self.c = self.source[self.n]
class VimParser(ParserBase):
STATE_NEUTRAL = 0
STATE_SEARCH_OFFSET = 1
def __init__(self, *args, **kwargs):
self.state = VimParser.STATE_NEUTRAL
self.current_side = 'left'
ParserBase.__init__(self, *args, **kwargs)
def parse_full_range(self):
# todo: make sure that parse_range throws error for unknown tokens
self.parse_range()
sep = self.match_one(',;')
if sep:
if not self.result[self.current_side + '_offset'] and not self.result[self.current_side + '_ref']:
self.result[self.current_side + '_ref'] = '.'
self.consume()
self.result['separator'] = sep
self.current_side = 'right'
self.parse_range()
if self.c != EOF and not (self.c.isalpha() or self.c in '&!'):
raise SyntaxError("E492 Not an editor command.")
return self.result
def parse_range(self):
if self.c == EOF:
return self.result
line_ref = self.consume_if_in(list('.%$'))
if line_ref:
self.result[self.current_side + "_ref"] = line_ref
while self.c != EOF:
if self.c == "'":
self.consume()
if self.c != EOF and not (self.c.isalpha() or self.c in ("<", ">")):
raise SyntaxError("E492 Not an editor command.")
self.result[self.current_side + "_ref"] = "'%s" % self.c
self.consume()
elif self.c in ".$%%'" and not self.result[self.current_side + "_ref"]:
if (self.result[self.current_side + "_search_offsets"] or
self.result[self.current_side + "_offset"]):
raise SyntaxError("E492 Not an editor command.")
elif self.c.startswith(tuple("01234567890+-")):
offset = self.match_offset()
self.result[self.current_side + '_offset'] = offset
elif self.c.startswith(tuple('/?')):
self.state = VimParser.STATE_SEARCH_OFFSET
search_offests = self.match_search_based_offsets()
self.result[self.current_side + "_search_offsets"] = search_offests
self.state = VimParser.STATE_NEUTRAL
elif self.c not in ':,;&!' and not self.c.isalpha():
raise SyntaxError("E492 Not an editor command.")
else:
break
if (self.result[self.current_side + "_ref"] == '%' and
(self.result[self.current_side + "_offset"] or
self.result[self.current_side + "_search_offsets"])):
raise SyntaxError("E492 Not an editor command.")
end = max(0, min(self.n, len(self.source)))
self.result['text_range'] = self.source[:end]
return self.result
def consume_if_in(self, items):
rv = None
if self.c in items:
rv = self.c
self.consume()
return rv
def match_search_based_offsets(self):
offsets = []
while self.c != EOF and self.c.startswith(tuple('/?')):
new_offset = []
new_offset.append(self.c)
search = self.match_one_search_offset()
new_offset.append(search)
# numeric_offset = self.consume_while_match('^[0-9+-]') or '0'
numeric_offset = self.match_offset()
new_offset.append(numeric_offset)
offsets.append(new_offset)
return offsets
def match_one_search_offset(self):
search_kind = self.c
rv = ''
self.consume()
while self.c != EOF and self.c != search_kind:
if self.c == '\\':
self.consume()
if self.c != EOF:
rv += self.c
self.consume()
else:
rv += self.c
self.consume()
if self.c == search_kind:
self.consume()
return rv
def match_offset(self):
offsets = []
sign = 1
is_num_or_sign = re.compile('^[0-9+-]')
while self.c != EOF and is_num_or_sign.match(self.c):
if self.c in '+-':
signs = self.consume_while_match('^[+-]')
if self.state == VimParser.STATE_NEUTRAL and len(signs) > 1 and not self.result[self.current_side + '_ref']:
self.result[self.current_side + '_ref'] = '.'
if self.c != EOF and self.c.isdigit():
if self.state == VimParser.STATE_NEUTRAL and not self.result[self.current_side + '_ref']:
self.result[self.current_side + '_ref'] = '.'
sign = -1 if signs[-1] == '-' else 1
signs = signs[:-1] if signs else []
subtotal = 0
for item in signs:
subtotal += 1 if item == '+' else -1
offsets.append(subtotal)
elif self.c.isdigit():
nr = self.consume_while_match('^[0-9]')
offsets.append(sign * int(nr))
sign = 1
else:
break
return sum(offsets)
# self.result[self.current_side + '_offset'] = sum(offsets)
def match_one(self, seq):
if self.c != EOF and self.c in seq:
return self.c
def consume_while_match(self, regex):
rv = ''
r = re.compile(regex)
while self.c != EOF and r.match(self.c):
rv += self.c
self.consume()
return rv
class CommandLineParser(ParserBase):
def __init__(self, source, *args, **kwargs):
ParserBase.__init__(self, source, *args, **kwargs)
self.range_parser = VimParser(source)
self.result = dict(range=None, commands=[], errors=[])
def parse_cmd_line(self):
try:
rng = self.range_parser.parse_full_range()
except SyntaxError, e:
rng = None
self.result["errors"].append(str(e))
return self.result
self.result['range'] = rng
# sync up with range parser the dumb way
self.n = self.range_parser.n
self.c = self.range_parser.c
while self.c != EOF and self.c == ' ':
self.consume()
self.parse_commands()
if not self.result['commands'][0]['cmd']:
self.result['commands'][0]['cmd'] = ':'
return self.result
def parse_commands(self):
name = ''
cmd = {}
while self.c != EOF:
if self.c.isalpha() and '&' not in name:
name += self.c
self.consume()
elif self.c == '&' and (not name or name == '&'):
name += self.c
self.consume()
else:
break
if not name and self.c == '!':
name = '!'
self.consume()
cmd['cmd'] = name
cmd['forced'] = self.c == '!'
if cmd['forced']:
self.consume()
while self.c != EOF and self.c == ' ':
self.consume()
cmd['args'] = ''
if not self.c == EOF:
cmd['args'] = self.source[self.n:]
self.result['commands'].append(cmd)
class AddressParser(ParserBase):
STATE_NEUTRAL = 1
STATE_SEARCH_OFFSET = 2
def __init__(self, source, *args, **kwargs):
ParserBase.__init__(self, source, *args, **kwargs)
self.result = dict(ref=None, offset=None, search_offsets=[])
self.state = AddressParser.STATE_NEUTRAL
def parse(self):
if self.c == EOF:
return self.result
ref = self.consume_if_in(list('.$'))
if ref:
self.result["ref"] = ref
while self.c != EOF:
if self.c in '0123456789+-':
rv = self.match_offset()
self.result['offset'] = rv
elif self.c in '?/':
rv = self.match_search_based_offsets()
self.result['search_offsets'] = rv
return self.result
def match_search_based_offsets(self):
offsets = []
while self.c != EOF and self.c.startswith(tuple('/?')):
new_offset = []
new_offset.append(self.c)
search = self.match_one_search_offset()
new_offset.append(search)
# numeric_offset = self.consume_while_match('^[0-9+-]') or '0'
numeric_offset = self.match_offset()
new_offset.append(numeric_offset)
offsets.append(new_offset)
return offsets
def match_one_search_offset(self):
search_kind = self.c
rv = ''
self.consume()
while self.c != EOF and self.c != search_kind:
if self.c == '\\':
self.consume()
if self.c != EOF:
rv += self.c
self.consume()
else:
rv += self.c
self.consume()
if self.c == search_kind:
self.consume()
return rv
def match_offset(self):
offsets = []
sign = 1
is_num_or_sign = re.compile('^[0-9+-]')
while self.c != EOF and is_num_or_sign.match(self.c):
if self.c in '+-':
signs = self.consume_while_match('^[+-]')
if self.state == AddressParser.STATE_NEUTRAL and len(signs) > 0 and not self.result['ref']:
self.result['ref'] = '.'
if self.c != EOF and self.c.isdigit():
sign = -1 if signs[-1] == '-' else 1
signs = signs[:-1] if signs else []
subtotal = 0
for item in signs:
subtotal += 1 if item == '+' else -1
offsets.append(subtotal)
elif self.c.isdigit():
nr = self.consume_while_match('^[0-9]')
offsets.append(sign * int(nr))
sign = 1
else:
break
return sum(offsets)
def match_one(self, seq):
if self.c != EOF and self.c in seq:
return self.c
def consume_while_match(self, regex):
rv = ''
r = re.compile(regex)
while self.c != EOF and r.match(self.c):
rv += self.c
self.consume()
return rv
def consume_if_in(self, items):
rv = None
if self.c in items:
rv = self.c
self.consume()
return rv
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
This module is used to perform any operations on nested structures, which can be
specified as sequences that contain non-sequence elements or other sequences.
The utilities here assume (and do not check) that the nested structures form a
'tree', i.e. no references in the structure of the input of these functions
should be recursive.
@@assert_same_structure
@@is_sequence
@@flatten
@@flatten_dict_items
@@pack_sequence_as
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, or a `namedtuple` class.
args: elements to be converted to a sequence.
Returns:
`args` with the type of `instance`.
"""
if (isinstance(instance, tuple) and
hasattr(instance, "_fields") and
isinstance(instance._fields, collections.Sequence) and
all(isinstance(f, six.string_types) for f in instance._fields)):
# This is a namedtuple
return type(instance)(*args)
else:
# Not a namedtuple
return type(instance)(args)
def _yield_flat_nest(nest):
for n in nest:
if is_sequence(n):
for ni in _yield_flat_nest(n):
yield ni
else:
yield n
def is_sequence(seq):
"""Returns a true if its input is a collections.Sequence (except strings).
Args:
seq: an input sequence.
Returns:
True if the sequence is a not a string and is a collections.Sequence.
"""
return (isinstance(seq, collections.Sequence)
and not isinstance(seq, six.string_types))
def flatten(nest):
"""Returns a flat sequence from a given nested structure.
If `nest` is not a sequence, this returns a single-element list: `[nest]`.
Args:
nest: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the flattened version of the input.
"""
return list(_yield_flat_nest(nest)) if is_sequence(nest) else [nest]
def _recursive_assert_same_structure(nest1, nest2):
is_sequence_nest1 = is_sequence(nest1)
if is_sequence_nest1 != is_sequence(nest2):
raise ValueError(
"The two structures don't have the same nested structure. "
"First structure: %s, second structure: %s." % (nest1, nest2))
if is_sequence_nest1:
type_nest1 = type(nest1)
type_nest2 = type(nest2)
if type_nest1 != type_nest2:
raise TypeError(
"The two structures don't have the same sequence type. First "
"structure has type %s, while second structure has type %s."
% (type_nest1, type_nest2))
for n1, n2 in zip(nest1, nest2):
_recursive_assert_same_structure(n1, n2)
def assert_same_structure(nest1, nest2):
"""Asserts that two structures are nested in the same way.
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures.
"""
len_nest1 = len(flatten(nest1)) if is_sequence(nest1) else 1
len_nest2 = len(flatten(nest2)) if is_sequence(nest2) else 1
if len_nest1 != len_nest2:
raise ValueError("The two structures don't have the same number of "
"elements. First structure: %s, second structure: %s."
% (nest1, nest2))
_recursive_assert_same_structure(nest1, nest2)
def flatten_dict_items(dictionary):
"""Returns a dictionary with flattened keys and values.
This function flattens the keys and values of a dictionary, which can be
arbitrarily nested structures, and returns the flattened version of such
structures:
```python
example_dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", "d"))}
result = {4: "a", 5: "b", 6: "c", 8: "d"}
flatten_dict_items(example_dictionary) == result
```
The input dictionary must satisfy two properties:
1. Its keys and values should have the same exact nested structure.
2. The set of all flattened keys of the dictionary must not contain repeated
keys.
Args:
dictionary: the dictionary to zip
Returns:
The zipped dictionary.
Raises:
TypeError: If the input is not a dictionary.
ValueError: If any key and value have not the same structure, or if keys are
not unique.
"""
if not isinstance(dictionary, dict):
raise TypeError("input must be a dictionary")
flat_dictionary = {}
for i, v in six.iteritems(dictionary):
if not is_sequence(i):
if i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique." % i)
flat_dictionary[i] = v
else:
flat_i = flatten(i)
flat_v = flatten(v)
if len(flat_i) != len(flat_v):
raise ValueError(
"Could not flatten dictionary. Key had %d elements, but value had "
"%d elements. Key: %s, value: %s."
% (len(flat_i), len(flat_v), flat_i, flat_v))
for new_i, new_v in zip(flat_i, flat_v):
if new_i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique."
% (new_i))
flat_dictionary[new_i] = new_v
return flat_dictionary
def _packed_nest_with_indices(structure, flat, index):
"""Helper function for pack_nest_as.
Args:
structure: Substructure (tuple of elements and/or tuples) to mimic
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
for s in structure:
if is_sequence(s):
new_index, child = _packed_nest_with_indices(s, flat, index)
packed.append(_sequence_like(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def pack_sequence_as(structure, flat_sequence):
"""Returns a given flattened sequence packed into a nest.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
Args:
structure: tuple or list constructed of scalars and/or other tuples/lists,
or a scalar. Note: numpy arrays are considered scalars.
flat_sequence: flat sequence to pack.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If nest and structure have different element counts.
"""
if not is_sequence(flat_sequence):
raise TypeError("flat_sequence must be a sequence")
if not is_sequence(structure):
if len(flat_sequence) != 1:
raise ValueError("Structure is a scalar but len(flat_sequence) == %d > 1"
% len(flat_sequence))
return flat_sequence[0]
flat_structure = flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but flat_sequence "
"had %d elements. Structure: %s, flat_sequence: %s."
% (len(flat_structure), len(flat_sequence), structure, flat_sequence))
_, packed = _packed_nest_with_indices(structure, flat_sequence, 0)
return _sequence_like(structure, packed)
|
|
# Copyright 2014, Quixey Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
class Region(object):
def __init__(self, region_id, local_name):
"""Constructor.
Args:
region_id (str): The id of the region.
local_name (str): The local name of the region.
"""
self.region_id = region_id
self.local_name = local_name
def __repr__(self):
return u'<Region %s (%s) at %s>' % (
self.region_id, self.local_name, id(self))
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
class Instance(object):
"""An Aliyun ECS instance."""
def __init__(
self, instance_id, name, image_id, region_id, instance_type,
hostname, status, security_group_ids, public_ip_addresses,
internal_ip_addresses, internet_charge_type,
internet_max_bandwidth_in, internet_max_bandwidth_out,
creation_time, description, cluster_id, operation_locks, zone_id):
""""Constructor.
Args:
instance_id (str): The id of the instance.
name (str): The name of the instance.
image_id (str): The id of the image used to create the instance.
region_id (str): The id of the region in which the instance lies.
instance_type (str): The type of the instance.
hostname (str): The hostname of the instance.
status (str): The status of the instance.
security_group_ids (list): The security group ids for the instance.
public_ip_addresses (list): Its public ip addresses.
internal_ip_addresses (list): Its internal ip addresses.
internet_charge_type (str): The accounting method of network use.
internet_max_bandwidth_in (int): The max incoming bandwidth.
internet_max_bandwidth_out (int): The max outgoing bandwidth.
creation_time (datetime): Its creation time.
description (str): A long description of the instance.
operation_locks (list of str): Any held operation locks. 'security'
and/or 'financial'
zone_id (str): The ID of the Availability Zone this instance is in.
"""
self.instance_id = instance_id
self.name = name
self.image_id = image_id
self.region_id = region_id
self.instance_type = instance_type
self.hostname = hostname
self.status = status
self.security_group_ids = security_group_ids
self.public_ip_addresses = public_ip_addresses
self.internal_ip_addresses = internal_ip_addresses
self.internet_charge_type = internet_charge_type
self.internet_max_bandwidth_in = internet_max_bandwidth_in
self.internet_max_bandwidth_out = internet_max_bandwidth_out
self.creation_time = creation_time
self.description = description
self.operation_locks = operation_locks
self.zone_id = zone_id
def __repr__(self):
return '<Instance %s at %s>' % (self.instance_id, id(self))
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
class InstanceStatus(object):
def __init__(self, instance_id, status):
"""Constructor.
Args:
instance_id (str): The id of the instance.
status (str): The status of the instance.
"""
self.instance_id = instance_id
self.status = status
def __repr__(self):
return u'<InstanceId %s is %s at %s>' % (
self.instance_id, self.status, id(self))
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
class InstanceType(object):
def __init__(self, instance_type_id, cpu_core_count, memory_size):
"""Constructor.
Args:
instance_type_id (str): The instance type id.
cpu_core_count (int): The number of cpus.
memory_size (int): The memory size in GB.
"""
self.instance_type_id = instance_type_id
self.cpu_core_count = cpu_core_count
self.memory_size = memory_size
def __repr__(self):
return u'<InstanceType %s has %s cores and %sGB memory at %s>' % (
self.instance_type_id, self.cpu_core_count, self.memory_size,
id(self))
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
class Snapshot(object):
def __init__(self, snapshot_id, snapshot_name, progress, creation_time,
description=None, source_disk_id=None, source_disk_type=None,
source_disk_size=None):
"""Snapshot for ECS Disk.
snapshot_id (str): The id of the snapshot.
snapshot_name (str): The name of the snapshot.
progress (int): The progress ready percentage.
creation_time (datetime): Its creation time.
source_disk_id (str): ID of the original disk.
source_disk_type (str): "data" or "system", for the original disk.
source_disk_size (int): size of the original disk in GB.
"""
self.snapshot_id = snapshot_id
self.snapshot_name = snapshot_name
self.progress = progress
self.creation_time = creation_time
self.source_disk_id = source_disk_id
self.source_disk_type = source_disk_type
self.source_disk_size = source_disk_size
def __repr__(self):
return u'<Snapshot %s is %s%% ready at %s>' % (
self.snapshot_id, self.progress, id(self))
def __eq__(self, other):
print self.__dict__
print other.__dict__
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
class AutoSnapshotPolicy(object):
def __init__(self, system_disk_enabled, system_disk_time_period,
system_disk_retention_days, system_disk_retention_last_week,
data_disk_enabled, data_disk_time_period,
data_disk_retention_days, data_disk_retention_last_week):
'''AutoSnapshotPolicy describing how to manage snapshot rotation.
The policy is composed of a system- and data-disk policy, but the API
does not handle them independently, so this object combines them too.
Arguments:
system_disk_enabled (bool): wether the policy is on for SystemDisk
system_disk_time_period (int): the time period during which to
auto-snapshot. There are 4 choices:
1, 2, 3 or 4. These correspond to
these time periods:
1: 1:00 - 7:00
2: 7:00 - 13:00
3: 13:00 - 19:00
4: 19:00 - 1:00
All times Beijing Time.
system_disk_retention_days (int): number of days to retain.
must be between 1 and 3, inclusive
system_disk_retention_last_week (bool): wether to retain a weekly
snapshot from Sundays.
data_disk_enabled (bool): wether the policy is on for DataDisk
data_disk_time_period (int): the time period during which to
auto-snapshot. There are 4 choices: 1,
2, 3 or 4. These correspond to these
time periods:
1: 1:00 - 7:00
2: 7:00 - 13:00
3: 13:00 - 19:00
4: 19:00 - 1:00
All times Beijing Time.
data_disk_retention_days (int): number of days to retain.
must be between 1 and 3, inclusive
data_disk_retention_last_week (bool): wether to retain a weekly
snapshot from Sundays.
'''
self.system_disk_enabled = system_disk_enabled
self.system_disk_time_period = system_disk_time_period
self.system_disk_retention_days = system_disk_retention_days
self.system_disk_retention_last_week = system_disk_retention_last_week
self.data_disk_enabled = data_disk_enabled
self.data_disk_time_period = data_disk_time_period
self.data_disk_retention_days = data_disk_retention_days
self.data_disk_retention_last_week = data_disk_retention_last_week
def __repr__(self):
return u'<AutoSnapshotPolicy at %s>' % id(self)
def __eq__(self, other):
print self.__dict__
print other.__dict__
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
class AutoSnapshotExecutionStatus(object):
def __init__(self, system_disk_execution_status, data_disk_execution_status):
'''Description of the status of the auto-snapshot policy's executions.
The arguments are either 'Standby', 'Executed', or 'Failed'.
Standby: The policy is created, but disabled.
Executed: The latest auto-snapshot was successful.
Failed: The latest auto-snapshot was unsuccessful.
These are separated by system- or data-disk types since they can work
independently.
Args:
system_disk_execution_status (str): Standby|Executed|Failed
data_disk_execution_status (str): Standby|Executed|Failed
'''
self.system_disk_execution_status = system_disk_execution_status
self.data_disk_execution_status = data_disk_execution_status
def __repr__(self):
return u'<AutoSnapshotExecutionStatus at %s>' % id(self)
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
class AutoSnapshotPolicyStatus(object):
def __init__(self, status, policy):
self.status = status
self.policy = policy
def __repr__(self):
return u'<AutoSnapshotPolicyStatus at %s>' % id(self)
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
class Disk(object):
def __init__(self, disk_id, disk_type, disk_category, disk_size,
attached_time=None, creation_time=None, delete_auto_snapshot=None,
delete_with_instance=None, description=None, detached_time=None,
device=None, image_id=None, instance_id=None, operation_locks=None,
portable=None, product_code=None, snapshot_id=None, status=None,
zone_id=None):
"""ECS Disk object. Required arguments are always required when creating
an ECS disk.
Args:
disk_id (str): The id of the disk.
disk_type (str): The type of disk.
Values can be system or data.
disk_category (str): The category of the disk.
Values can be cloud, ephemeral
disk_size (int): Its size in GB.
attached_time (datetime): The time the disk was last attached.
creation_time (datetime): The time the disk was created.
delete_auto_snapshot (bool): Whether the AutoSnapshotPolicy will be
deleted with the disk.
delete_with_instance (bool): Whether the Disk will be deleted with
its associated Instance.
description (str): A long description of the disk.
detached_time (datetie): The time the disk was last detached.
device (str): The device path if attached. E.g. /dev/xvdb
image_id (str): The Image id the Disk was created with.
instance_id (str): The Instance id the disk is attached to.
operation_locks (list): The locks on the resource. It can be
'Financial' and/or 'Security'.
portable (bool): Whether the Disk can be detached and re-attached
elsewhere.
product_code (str): ID of the Disk in the ECS Mirror Market.
snapshot_id (str): ID of the snapshot the Disk was created from.
status (str): The status of the disk. E.g. "In_use", "Creating", &c.
zone_id (str): The Availability Zone of the Disk.
"""
if operation_locks is None:
operation_locks = []
self.disk_id = disk_id
self.disk_type = disk_type
self.disk_category = disk_category
self.disk_size = disk_size
self.attached_time = attached_time
self.creation_time = creation_time
self.delete_auto_snapshot = delete_auto_snapshot
self.delete_with_instance = delete_with_instance
self.description = description
self.detached_time = detached_time
self.device = device
self.image_id = image_id
self.instance_id = instance_id
self.operation_locks = operation_locks
self.portable = portable
self.product_code = product_code
self.snapshot_id = snapshot_id
self.status = status
self.zone_id = zone_id
def __repr__(self):
return u'<Disk %s of type %s is %sGB at %s>' % (
self.disk_id, self.disk_type, self.disk_size, id(self))
def __eq__(self, other):
print self.__dict__
print other.__dict__
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
class DiskMappingError(Exception):
"""DiskMappingError"""
class DiskMapping(object):
def __init__(self, category, size=None, snapshot_id=None, name=None,
description=None, device=None):
"""DiskMapping used to create and attach a disk to an instance.
The disk can be created from either a size parameter or a snapshot_id.
Different disk categories support different disk sizes, and snapshots
need to be from the same category of disk you are creating. "cloud"
disks support sizes between 5 and 2000 GB. "ephemeral" disks support 5
to 1024 GB sizes.
Args:
category (str): "cloud" or "ephemeral". Usually "cloud". Check the
output of :method:`aliyun.ecs.connection.EcsConnection.describe_zones`
to see which categories of disks your zone supports.
size (int): The size of the disk. Limits depend on category.
snapshot_id (str): ID of :class:`.model.Snapshot` to create disk of.
name (str): A short name for the disk, between 2 and 128 characters.
description (str): A longer description of the disk. Between 2 and
256 characters.
device (str): System device string. Leave None to defer to the system.
Valid choices are from /dev/xvdb to /dev/xvdz.
Raises:
DiskMappingError: If both size and snapshot are specified.
"""
if None not in (size, snapshot_id):
raise DiskMappingError("DiskMapping does not support both size AND snapshot. Choose one.")
self.category = category
self.size = size
self.snapshot_id = snapshot_id
self.name = name
self.description = description
self.device = device
def api_dict(self, ordinal=1):
"""Serialize for insertion into API request parameters.
Args:
ordinal (int): The number of the data disk to serialize as.
Returns:
dict: A dictionary of URL GET query parameters to create the disk.
E.g.::
{
'DataDisk.1.Category': 'cloud',
'DataDisk.1.Size': 2000
}
"""
ddisk = 'DataDisk.%s.' % ordinal
out = {ddisk + 'Category': self.category}
if self.size:
out[ddisk + 'Size'] = self.size
if self.snapshot_id:
out[ddisk + 'SnapshotId'] = self.snapshot_id
if self.name:
out[ddisk + 'DiskName'] = self.name
if self.description:
out[ddisk + 'Description'] = self.description
if self.device:
out[ddisk + 'Device'] = self.device
return out
def __repr__(self):
return u'<DiskMapping %s type %s at %s>' % (
self.name, self.category, id(self))
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
class Image(object):
def __init__(self, image_id, image_version, name, description, size,
architecture, owner_alias, os_name):
"""Constructor.
Args:
image_id (str): The id of the image.
image_version (str): The version of the image.
name (str): Name of the image.
description (str): The description.
size (int): Its size in GB.
architecture (str): The architecture - either i386 or x86_64.
owner_alias (str): system, else or others.
os_name (str): The os name.
"""
self.image_id = image_id
self.image_version = image_version
self.description = description
self.size = size
self.architecture = architecture
self.owner_alias = owner_alias
self.os_name = os_name
def __repr__(self):
return u'<Image %s(%s) for platform %s and arch %s at %s>' % (
self.image_id, self.description, self.os_name, self.architecture,
id(self))
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
class SecurityGroupInfo(object):
def __init__(self, security_group_id, description):
"""Constructor.
Args:
security_group_id (str): The id of the security group.
description (str): The description of the security group.
"""
self.security_group_id = security_group_id
self.description = description
def __repr__(self):
return u'<SecurityGroupInfo %s, %s at %s>' % (
self.security_group_id, self.description, id(self))
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
class SecurityGroupPermission(object):
def __init__(self, ip_protocol, port_range, source_cidr_ip,
source_group_id, policy, nic_type):
"""Constructor.
Args:
ip_protocol (str): TCP, UDP, ICMP, GRE or ALL
port_range (str): For tcp/udp range is 1 to 65535. Else -1/-1.
source_cidr_ip (str): Source IP address range.
source_group_id (str): Source security group.
policy (str): Accept, Drop or Reject.
nic_type (str): internet or intranet.
"""
self.ip_protocol = ip_protocol
self.port_range = port_range
self.source_cidr_ip = source_cidr_ip
self.source_group_id = source_group_id
self.policy = policy
self.nic_type = nic_type
def __repr__(self):
return u'<SecurityGroupPermission %s %s %s from %s at %s>' % (
self.policy, self.ip_protocol, self.port_range,
self.source_cidr_ip
if self.source_cidr_ip else self.source_group_id,
id(self))
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
class SecurityGroup(object):
def __init__(self, region_id, security_group_id, description, permissions):
"""Constructor.
Args:
region_id (str): The id of the region for the security group.
security_group_id (str): The id of the security group.
description (str): The description of the security group.
permission (list): List of SecurityGroupPermission.
"""
self.region_id = region_id
self.security_group_id = security_group_id
self.description = description
self.permissions = permissions
def __repr__(self):
return u'<SecurityGroup %s, %s at %s>' % (
self.security_group_id, self.description, id(self))
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
class Zone(object):
def __init__(self, zone_id, local_name, available_resource_creation=None,
available_disk_types=None):
"""Constructor.
Args:
zone_id (str): The id of the zone.
local_name (str): The local name of the zone.
available_resource_creation (list of 'Instance' and/or 'Disk'): The resource types which can be created in this zone.
available_disk_types (list of 'cloud' and/or 'ephemeral'): The types of disks which can be created in the zone.
"""
if available_resource_creation is None:
available_resource_creation = []
if available_disk_types is None:
available_disk_types = []
self.zone_id = zone_id
self.local_name = local_name
self.available_resource_creation = available_resource_creation
self.available_disk_types = available_disk_types
def __repr__(self):
return u'<Zone %s (%s) at %s>' % (
self.zone_id, self.local_name, id(self))
def disk_supported(self, disk_type):
"""Convenience method to say whether a disk type is supported.
Args:
disk_type (str): either 'cloud' or 'ephemeral'.
Returns:
boolean
"""
return disk_type in self.available_disk_types
def resource_creation_supported(self, resource_type):
"""Convenience method to say whether a resource can be created.
Args:
resource_type (str): either 'Instance' or 'Disk'
Returns:
Boolean. True if the resource creation is supported.
"""
return resource_type in self.available_resource_creation
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
|
|
import logging
import json
import asyncio
from . import jobs
logger = logging.getLogger(__name__)
async def start_master(host="", port=48484, *, loop=None):
"""
Starts a new HighFive master at the given host and port, and returns it.
"""
loop = loop if loop is not None else asyncio.get_event_loop()
manager = jobs.JobManager(loop=loop)
workers = set()
server = await loop.create_server(
lambda: WorkerProtocol(manager, workers), host, port)
return Master(server, manager, workers, loop=loop)
class WorkerProtocol(asyncio.Protocol):
"""
The asyncio protocol used to handle remote workers. This class finds lines
of input and delegates their processing to a Worker object.
"""
def __init__(self, manager, workers):
self._manager = manager
self._workers = workers
def connection_made(self, transport):
"""
Called when a remote worker connection has been found. Finishes setting
up the protocol object.
"""
if self._manager.is_closed():
logger.debug("worker tried to connect while manager was closed")
return
logger.debug("new worker connected")
self._transport = transport
self._buffer = bytearray()
self._worker = Worker(self._transport, self._manager)
self._workers.add(self._worker)
def data_received(self, data):
"""
Called when a chunk of data is received from the remote worker. These
chunks are stored in a buffer. When a complete line is found in the
buffer, it removed and sent to line_received().
"""
self._buffer.extend(data)
while True:
i = self._buffer.find(b"\n")
if i == -1:
break
line = self._buffer[:i+1]
self._buffer = self._buffer[i+1:]
self.line_received(line)
def line_received(self, line):
"""
Called when a complete line is found from the remote worker. Decodes
a response object from the line, then passes it to the worker object.
"""
response = json.loads(line.decode("utf-8"))
self._worker.response_received(response)
def connection_lost(self, exc):
"""
Called when the connection to the remote worker is broken. Closes the
worker.
"""
logger.debug("worker connection lost")
self._worker.close()
self._workers.remove(self._worker)
class Worker:
"""
Handles job retrieval and result reporting for remote workers.
"""
def __init__(self, transport, manager):
self._transport = transport
self._manager = manager
self._closed = False
self._load_job()
def _load_job(self):
"""
Initiates a job load from the job manager.
"""
self._job = None
self._manager.get_job(self._job_loaded)
def _job_loaded(self, job):
"""
Called when a job has been found for the worker to run. Sends the job's
RPC to the remote worker.
"""
logger.debug("worker {} found a job".format(id(self)))
if self._closed:
self._manager.return_job(job)
return
self._job = job
call_obj = self._job.get_call()
call = (json.dumps(call_obj) + "\n").encode("utf-8")
self._transport.write(call)
def response_received(self, response):
"""
Called when a response to a job RPC has been received. Decodes the
response and finalizes the result, then reports the result to the
job manager.
"""
if self._closed:
return
assert self._job is not None
logger.debug("worker {} got response".format(id(self)))
result = self._job.get_result(response)
self._manager.add_result(self._job, result)
self._load_job()
def close(self):
"""
Closes the worker. No more jobs will be handled by the worker, and any
running job is immediately returned to the job manager.
"""
if self._closed:
return
self._closed = True
if self._job is not None:
self._manager.return_job(self._job)
self._job = None
class Master:
def __init__(self, server, manager, workers, *, loop):
self._server = server
self._manager = manager
self._workers = workers
self._loop = loop
self._closed = False
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
self.close()
await self.wait_closed()
def run(self, job_list):
"""
Runs a job set which consists of the jobs in an iterable job list.
"""
if self._closed:
raise RuntimeError("master is closed")
return self._manager.add_job_set(job_list)
def close(self):
"""
Starts closing the HighFive master. The server will be closed and
all queued job sets will be cancelled.
"""
if self._closed:
return
self._closed = True
self._server.close()
self._manager.close()
for worker in self._workers:
worker.close()
async def wait_closed(self):
"""
Waits until the HighFive master closes completely.
"""
await self._server.wait_closed()
|
|
from django.apps import apps
import logging
from future.moves.urllib.parse import urljoin
import random
import requests
from framework.exceptions import HTTPError
from framework.celery_tasks import app as celery_app
from framework.postcommit_tasks.handlers import enqueue_postcommit_task, get_task_from_postcommit_queue
from framework import sentry
from website import settings, mails
from website.util.share import GraphNode, format_contributor, format_subject
logger = logging.getLogger(__name__)
@celery_app.task(ignore_results=True, max_retries=5, default_retry_delay=60)
def on_preprint_updated(preprint_id, update_share=True, share_type=None, old_subjects=None, saved_fields=None):
# WARNING: Only perform Read-Only operations in an asynchronous task, until Repeatable Read/Serializable
# transactions are implemented in View and Task application layers.
from osf.models import Preprint
preprint = Preprint.load(preprint_id)
if old_subjects is None:
old_subjects = []
need_update = bool(preprint.SEARCH_UPDATE_FIELDS.intersection(saved_fields or {}))
if need_update:
preprint.update_search()
if should_update_preprint_identifiers(preprint, old_subjects, saved_fields):
update_or_create_preprint_identifiers(preprint)
if update_share:
update_preprint_share(preprint, old_subjects, share_type)
def should_update_preprint_identifiers(preprint, old_subjects, saved_fields):
# Only update identifier metadata iff...
return (
# DOI didn't just get created
preprint and preprint.date_published and
not (saved_fields and 'preprint_doi_created' in saved_fields) and
# subjects aren't being set
not old_subjects and
# preprint isn't QA test
preprint.should_request_identifiers
)
def update_or_create_preprint_identifiers(preprint):
try:
preprint.request_identifier_update(category='doi')
except HTTPError as err:
sentry.log_exception()
sentry.log_message(err.args[0])
def update_or_enqueue_on_preprint_updated(preprint_id, update_share=True, share_type=None, old_subjects=None, saved_fields=None):
task = get_task_from_postcommit_queue(
'website.preprints.tasks.on_preprint_updated',
predicate=lambda task: task.kwargs['preprint_id'] == preprint_id
)
if task:
old_subjects = old_subjects or []
task_subjects = task.kwargs['old_subjects'] or []
task.kwargs['update_share'] = update_share or task.kwargs['update_share']
task.kwargs['share_type'] = share_type or task.kwargs['share_type']
task.kwargs['old_subjects'] = old_subjects + task_subjects
task.kwargs['saved_fields'] = list(set(task.kwargs['saved_fields']).union(saved_fields))
else:
enqueue_postcommit_task(
on_preprint_updated,
(),
{'preprint_id': preprint_id, 'old_subjects': old_subjects, 'update_share': update_share, 'share_type': share_type, 'saved_fields': saved_fields},
celery=True
)
def update_preprint_share(preprint, old_subjects=None, share_type=None):
if settings.SHARE_URL:
if not preprint.provider.access_token:
raise ValueError('No access_token for {}. Unable to send {} to SHARE.'.format(preprint.provider, preprint))
share_type = share_type or preprint.provider.share_publish_type
_update_preprint_share(preprint, old_subjects, share_type)
def _update_preprint_share(preprint, old_subjects, share_type):
# Any modifications to this function may need to change _async_update_preprint_share
data = serialize_share_preprint_data(preprint, share_type, old_subjects)
resp = send_share_preprint_data(preprint, data)
try:
resp.raise_for_status()
except Exception:
if resp.status_code >= 500:
_async_update_preprint_share.delay(preprint._id, old_subjects, share_type)
else:
send_desk_share_preprint_error(preprint, resp, 0)
@celery_app.task(bind=True, max_retries=4, acks_late=True)
def _async_update_preprint_share(self, preprint_id, old_subjects, share_type):
# Any modifications to this function may need to change _update_preprint_share
# Takes preprint_id to ensure async retries push fresh data
Preprint = apps.get_model('osf.Preprint')
preprint = Preprint.load(preprint_id)
data = serialize_share_preprint_data(preprint, share_type, old_subjects)
resp = send_share_preprint_data(preprint, data)
try:
resp.raise_for_status()
except Exception as e:
if resp.status_code >= 500:
if self.request.retries == self.max_retries:
send_desk_share_preprint_error(preprint, resp, self.request.retries)
raise self.retry(
exc=e,
countdown=(random.random() + 1) * min(60 + settings.CELERY_RETRY_BACKOFF_BASE ** self.request.retries, 60 * 10)
)
else:
send_desk_share_preprint_error(preprint, resp, self.request.retries)
def serialize_share_preprint_data(preprint, share_type, old_subjects):
return {
'data': {
'type': 'NormalizedData',
'attributes': {
'tasks': [],
'raw': None,
'data': {'@graph': format_preprint(preprint, share_type, old_subjects)}
}
}
}
def send_share_preprint_data(preprint, data):
resp = requests.post('{}api/v2/normalizeddata/'.format(settings.SHARE_URL), json=data, headers={'Authorization': 'Bearer {}'.format(preprint.provider.access_token), 'Content-Type': 'application/vnd.api+json'})
logger.debug(resp.content)
return resp
def format_preprint(preprint, share_type, old_subjects=None):
if old_subjects is None:
old_subjects = []
from osf.models import Subject
old_subjects = [Subject.objects.get(id=s) for s in old_subjects]
preprint_graph = GraphNode(share_type, **{
'title': preprint.title,
'description': preprint.description or '',
'is_deleted': (
(not preprint.verified_publishable and not preprint.is_retracted) or
preprint.tags.filter(name='qatest').exists()
),
'date_updated': preprint.modified.isoformat(),
'date_published': preprint.date_published.isoformat() if preprint.date_published else None
})
to_visit = [
preprint_graph,
GraphNode('workidentifier', creative_work=preprint_graph, uri=urljoin(settings.DOMAIN, preprint._id + '/'))
]
if preprint.get_identifier('doi'):
to_visit.append(GraphNode('workidentifier', creative_work=preprint_graph, uri='https://doi.org/{}'.format(preprint.get_identifier('doi').value)))
if preprint.provider.domain_redirect_enabled:
to_visit.append(GraphNode('workidentifier', creative_work=preprint_graph, uri=preprint.absolute_url))
if preprint.article_doi:
# Article DOI refers to a clone of this preprint on another system and therefore does not qualify as an identifier for this preprint
related_work = GraphNode('creativework')
to_visit.append(GraphNode('workrelation', subject=preprint_graph, related=related_work))
to_visit.append(GraphNode('workidentifier', creative_work=related_work, uri='https://doi.org/{}'.format(preprint.article_doi)))
preprint_graph.attrs['tags'] = [
GraphNode('throughtags', creative_work=preprint_graph, tag=GraphNode('tag', name=tag))
for tag in preprint.tags.values_list('name', flat=True) if tag
]
current_subjects = [
GraphNode('throughsubjects', creative_work=preprint_graph, is_deleted=False, subject=format_subject(s))
for s in preprint.subjects.all()
]
deleted_subjects = [
GraphNode('throughsubjects', creative_work=preprint_graph, is_deleted=True, subject=format_subject(s))
for s in old_subjects if not preprint.subjects.filter(id=s.id).exists()
]
preprint_graph.attrs['subjects'] = current_subjects + deleted_subjects
to_visit.extend(format_contributor(preprint_graph, user, preprint.get_visible(user), i) for i, user in enumerate(preprint.contributors))
visited = set()
to_visit.extend(preprint_graph.get_related())
while True:
if not to_visit:
break
n = to_visit.pop(0)
if n in visited:
continue
visited.add(n)
to_visit.extend(list(n.get_related()))
return [node.serialize() for node in visited]
def send_desk_share_preprint_error(preprint, resp, retries):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.SHARE_PREPRINT_ERROR_DESK,
preprint=preprint,
resp=resp,
retries=retries,
can_change_preferences=False,
logo=settings.OSF_PREPRINTS_LOGO
)
|
|
"""
Copyright (c) 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from textwrap import dedent
import sys
from flexmock import flexmock
import pytest
import atomic_reactor.utils.koji as koji_util
from atomic_reactor import util
from atomic_reactor.utils.cachito import CachitoAPI
from atomic_reactor.constants import PLUGIN_BUILD_ORCHESTRATE_KEY
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.plugin import PreBuildPluginsRunner, PluginFailedException
from atomic_reactor.plugins import pre_reactor_config
from atomic_reactor.plugins.build_orchestrate_build import (
WORKSPACE_KEY_OVERRIDE_KWARGS, OrchestrateBuildPlugin)
from atomic_reactor.plugins.pre_reactor_config import (
ReactorConfigPlugin, WORKSPACE_CONF_KEY, ReactorConfig)
from atomic_reactor.plugins.pre_resolve_remote_source import ResolveRemoteSourcePlugin
from atomic_reactor.source import SourceConfig
from tests.constants import MOCK_SOURCE
from tests.stubs import StubInsideBuilder, StubSource
KOJI_HUB = 'http://koji.com/hub'
KOJI_TASK_ID = 123
KOJI_TASK_OWNER = 'spam'
CACHITO_URL = 'https://cachito.example.com'
CACHITO_REQUEST_ID = 98765
CACHITO_REQUEST_DOWNLOAD_URL = '{}/api/v1/{}/download'.format(CACHITO_URL, CACHITO_REQUEST_ID)
CACHITO_REQUEST_CONFIG_URL = '{}/api/v1/requests/{}/configuration-files'.format(
CACHITO_URL,
CACHITO_REQUEST_ID
)
CACHITO_ICM_URL = '{}/api/v1/requests/{}/content-manifest'.format(
CACHITO_URL,
CACHITO_REQUEST_ID
)
REMOTE_SOURCE_REPO = 'https://git.example.com/team/repo.git'
REMOTE_SOURCE_REF = 'b55c00f45ec3dfee0c766cea3d395d6e21cc2e5a'
REMOTE_SOURCE_PACKAGES = [
{
'name': 'test-package',
'type': 'npm',
'version': '0.0.1'
}
]
CACHITO_SOURCE_REQUEST = {
'id': CACHITO_REQUEST_ID,
'repo': REMOTE_SOURCE_REPO,
'ref': REMOTE_SOURCE_REF,
'environment_variables': {
'GO111MODULE': 'on',
'GOPATH': 'deps/gomod',
'GOCACHE': 'deps/gomod',
},
'flags': ['enable-confeti', 'enable-party-popper'],
'pkg_managers': ['gomod'],
'dependencies': [
{
'name': 'github.com/op/go-logging',
'type': 'gomod',
'version': 'v0.1.1',
}
],
'packages': [
{
'name': 'github.com/spam/bacon/v2',
'type': 'gomod',
'version': 'v2.0.3'
}
],
'configuration_files': CACHITO_REQUEST_CONFIG_URL,
'content_manifest': CACHITO_ICM_URL,
'extra_cruft': 'ignored',
}
REMOTE_SOURCE_JSON = {
'repo': REMOTE_SOURCE_REPO,
'ref': REMOTE_SOURCE_REF,
'environment_variables': {
'GO111MODULE': 'on',
'GOPATH': 'deps/gomod',
'GOCACHE': 'deps/gomod',
},
'flags': ['enable-confeti', 'enable-party-popper'],
'pkg_managers': ['gomod'],
'dependencies': [
{
'name': 'github.com/op/go-logging',
'type': 'gomod',
'version': 'v0.1.1',
}
],
'packages': [
{
'name': 'github.com/spam/bacon/v2',
'type': 'gomod',
'version': 'v2.0.3'
}
],
'configuration_files': CACHITO_REQUEST_CONFIG_URL,
'content_manifest': CACHITO_ICM_URL,
}
@pytest.fixture
def workflow(tmpdir, user_params):
workflow = DockerBuildWorkflow(source=MOCK_SOURCE)
# Stash the tmpdir in workflow so it can be used later
workflow._tmpdir = tmpdir
class MockSource(StubSource):
def __init__(self, workdir):
super(MockSource, self).__init__()
self.workdir = workdir
workflow.source = MockSource(str(tmpdir))
builder = StubInsideBuilder().for_workflow(workflow)
builder.set_df_path(str(tmpdir))
builder.tasker = flexmock()
workflow.builder = flexmock(builder)
workflow.buildstep_plugins_conf = [{'name': PLUGIN_BUILD_ORCHESTRATE_KEY}]
mock_repo_config(workflow)
mock_reactor_config(workflow)
mock_build_json()
mock_cachito_api(workflow)
mock_koji()
return workflow
def mock_reactor_config(workflow, data=None):
if data is None:
data = dedent("""\
version: 1
cachito:
api_url: {}
auth:
ssl_certs_dir: {}
koji:
hub_url: /
root_url: ''
auth: {{}}
""".format(CACHITO_URL, workflow._tmpdir))
workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
workflow._tmpdir.join('cert').write('')
config = util.read_yaml(data, 'schemas/config.json')
workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] = ReactorConfig(config)
def mock_build_json(build_json=None):
if build_json is None:
build_json = {'metadata': {'labels': {'koji-task-id': str(KOJI_TASK_ID)}}}
flexmock(util).should_receive('get_build_json').and_return(build_json)
def mock_repo_config(workflow, data=None):
if data is None:
data = dedent("""\
remote_source:
repo: {}
ref: {}
""".format(REMOTE_SOURCE_REPO, REMOTE_SOURCE_REF))
workflow._tmpdir.join('container.yaml').write(data)
# The repo config is read when SourceConfig is initialized. Force
# reloading here to make usage easier.
workflow.source.config = SourceConfig(str(workflow._tmpdir))
def mock_cachito_api(workflow, user=KOJI_TASK_OWNER, source_request=None,
dependency_replacements=None):
if source_request is None:
source_request = CACHITO_SOURCE_REQUEST
(flexmock(CachitoAPI)
.should_receive('request_sources')
.with_args(
repo=REMOTE_SOURCE_REPO,
ref=REMOTE_SOURCE_REF,
user=user,
dependency_replacements=dependency_replacements,
)
.and_return({'id': CACHITO_REQUEST_ID}))
(flexmock(CachitoAPI)
.should_receive('wait_for_request')
.with_args({'id': CACHITO_REQUEST_ID})
.and_return(source_request))
(flexmock(CachitoAPI)
.should_receive('download_sources')
.with_args(source_request, dest_dir=str(workflow._tmpdir))
.and_return(expected_dowload_path(workflow)))
(flexmock(CachitoAPI)
.should_receive('assemble_download_url')
.with_args(source_request)
.and_return(CACHITO_REQUEST_DOWNLOAD_URL))
def mock_koji(user=KOJI_TASK_OWNER):
koji_session = flexmock()
flexmock(pre_reactor_config).should_receive('get_koji_session').and_return(koji_session)
flexmock(koji_util).should_receive('get_koji_task_owner').and_return({'name': user})
def expected_dowload_path(workflow):
return workflow._tmpdir.join('source.tar.gz')
def setup_function(*args):
# IMPORTANT: This needs to be done to ensure mocks at the module
# level are reset between test cases.
sys.modules.pop('pre_resolve_remote_source', None)
def teardown_function(*args):
# IMPORTANT: This needs to be done to ensure mocks at the module
# level are reset between test cases.
sys.modules.pop('pre_resolve_remote_source', None)
@pytest.mark.parametrize('scratch', (True, False))
@pytest.mark.parametrize('dr_strs, dependency_replacements',
((None, None),
(['gomod:foo.bar/project:2'],
[{
'name': 'foo.bar/project',
'type': 'gomod',
'version': '2'}]),
(['gomod:foo.bar/project:2:newproject'],
[{
'name': 'foo.bar/project',
'type': 'gomod',
'new_name': 'newproject',
'version': '2'}]),
(['gomod:foo.bar/project'], None)))
def test_resolve_remote_source(workflow, scratch, dr_strs, dependency_replacements):
build_json = {'metadata': {'labels': {'koji-task-id': str(KOJI_TASK_ID)}}}
mock_build_json(build_json=build_json)
mock_cachito_api(workflow, dependency_replacements=dependency_replacements)
workflow.user_params['scratch'] = scratch
err = None
if dr_strs and not scratch:
err = 'Cachito dependency replacements are only allowed for scratch builds'
if dr_strs and any(len(dr.split(':')) < 3 for dr in dr_strs):
err = 'Cachito dependency replacements must be'
run_plugin_with_args(
workflow,
dependency_replacements=dr_strs,
expect_error=err
)
@pytest.mark.parametrize('build_json', ({}, {'metadata': {}}))
def test_no_koji_user(workflow, build_json, caplog):
reactor_config = dedent("""\
version: 1
cachito:
api_url: {}
auth:
ssl_certs_dir: {}
koji:
hub_url: /
root_url: ''
auth: {{}}
""".format(CACHITO_URL, workflow._tmpdir))
mock_reactor_config(workflow, reactor_config)
mock_build_json(build_json=build_json)
mock_cachito_api(workflow, user='unknown_user')
log_msg = 'No build metadata'
if build_json:
log_msg = 'Invalid Koji task ID'
run_plugin_with_args(workflow)
assert log_msg in caplog.text
@pytest.mark.parametrize('pop_key', ('repo', 'ref', 'packages'))
def test_invalid_remote_source_structure(workflow, pop_key):
source_request = {
'id': CACHITO_REQUEST_ID,
'repo': REMOTE_SOURCE_REPO,
'ref': REMOTE_SOURCE_REF,
'packages': REMOTE_SOURCE_PACKAGES,
}
source_request.pop(pop_key)
mock_cachito_api(workflow, source_request=source_request)
run_plugin_with_args(workflow, expect_error='Received invalid source request')
def test_ignore_when_missing_cachito_config(workflow):
reactor_config = dedent("""\
version: 1
koji:
hub_url: /
root_url: ''
auth: {}
""")
mock_reactor_config(workflow, reactor_config)
result = run_plugin_with_args(workflow, expect_result=False)
assert result is None
def test_invalid_cert_reference(workflow):
bad_certs_dir = str(workflow._tmpdir.join('invalid-dir'))
reactor_config = dedent("""\
version: 1
cachito:
api_url: {}
auth:
ssl_certs_dir: {}
koji:
hub_url: /
root_url: ''
auth: {{}}
""".format(CACHITO_URL, bad_certs_dir))
mock_reactor_config(workflow, reactor_config)
run_plugin_with_args(workflow, expect_error="Cachito ssl_certs_dir doesn't exist")
def test_ignore_when_missing_remote_source_config(workflow):
remote_source_config = dedent("""---""")
mock_repo_config(workflow, remote_source_config)
result = run_plugin_with_args(workflow, expect_result=False)
assert result is None
@pytest.mark.parametrize(('build_json', 'log_entry'), (
({}, 'No build metadata'),
({'metadata': None}, 'Invalid Koji task ID'),
({'metadata': {}}, 'Invalid Koji task ID'),
({'metadata': {'labels': {}}}, 'Invalid Koji task ID'),
({'metadata': {'labels': {'koji-task-id': None}}}, 'Invalid Koji task ID'),
({'metadata': {'labels': {'koji-task-id': 'not-an-int'}}}, 'Invalid Koji task ID'),
))
def test_bad_build_metadata(workflow, build_json, log_entry, caplog):
mock_build_json(build_json=build_json)
mock_cachito_api(workflow, user='unknown_user')
run_plugin_with_args(workflow)
assert log_entry in caplog.text
assert 'unknown_user' in caplog.text
def run_plugin_with_args(workflow, dependency_replacements=None, expect_error=None,
expect_result=True):
runner = PreBuildPluginsRunner(
workflow.builder.tasker,
workflow,
[
{'name': ResolveRemoteSourcePlugin.key,
'args': {'dependency_replacements': dependency_replacements}},
]
)
if expect_error:
with pytest.raises(PluginFailedException, match=expect_error):
runner.run()
return
results = runner.run()[ResolveRemoteSourcePlugin.key]
if expect_result:
assert results['annotations']['remote_source_url']
assert results['remote_source_json'] == REMOTE_SOURCE_JSON
assert results['remote_source_path'] == expected_dowload_path(workflow)
# A result means the plugin was enabled and executed successfully.
# Let's verify the expected side effects.
orchestrator_build_workspace = workflow.plugin_workspace[OrchestrateBuildPlugin.key]
worker_params = orchestrator_build_workspace[WORKSPACE_KEY_OVERRIDE_KWARGS][None]
assert worker_params['remote_source_url'] == CACHITO_REQUEST_DOWNLOAD_URL
assert worker_params['remote_source_configs'] == CACHITO_REQUEST_CONFIG_URL
assert worker_params['remote_source_build_args'] == {
'GO111MODULE': 'on',
'GOPATH': '/remote-source/deps/gomod',
'GOCACHE': '/remote-source/deps/gomod',
'CACHITO_ENV_FILE': '/remote-source/cachito.env',
}
assert worker_params['remote_source_icm_url'] == CACHITO_ICM_URL
return results
|
|
"""
Test cases for the L{xmantissa.webadmin} module.
"""
from twisted.trial.unittest import TestCase
from nevow.athena import LivePage
from nevow.context import WovenContext
from nevow.testutil import FakeRequest
from nevow.loaders import stan
from nevow.tags import html, head, body, directive
from nevow.inevow import IRequest
from axiom.store import Store
from axiom.userbase import LoginSystem, LoginMethod
from axiom.dependency import installOn
from axiom.plugins.mantissacmd import Mantissa
from xmantissa.webadmin import (
LocalUserBrowser, LocalUserBrowserFragment,
UserInteractionFragment, EndowFragment, DepriveFragment,
SuspendFragment, UnsuspendFragment)
from xmantissa.product import Product
class UserInteractionFragmentTestCase(TestCase):
def setUp(self):
"""
Create a site store and a user store with a L{LocalUserBrowser}
installed on it.
"""
self.siteStore = Store()
self.loginSystem = LoginSystem(store=self.siteStore)
installOn(self.loginSystem, self.siteStore)
self.userStore = Store()
self.userStore.parent = self.siteStore
self.browser = LocalUserBrowser(store=self.userStore)
def test_createUser(self):
"""
Test that L{webadmin.UserInteractionFragment.createUser} method
actually creates a user.
"""
userInteractionFragment = UserInteractionFragment(self.browser)
userInteractionFragment.createUser(
u'testuser', u'localhost', u'password')
account = self.loginSystem.accountByAddress(u'testuser', u'localhost')
self.assertEquals(account.password, u'password')
def test_rendering(self):
"""
Test that L{webadmin.UserInteractionFragment} renders without raising
any exceptions.
"""
f = UserInteractionFragment(self.browser)
p = LivePage(
docFactory=stan(
html[
head(render=directive('liveglue')),
body(render=lambda ctx, data: f)]))
f.setFragmentParent(p)
ctx = WovenContext()
req = FakeRequest()
ctx.remember(req, IRequest)
d = p.renderHTTP(ctx)
def rendered(ign):
p.action_close(None)
d.addCallback(rendered)
return d
class ActionsTestCase(TestCase):
"""
Tests to verify that actions behave as expected.
@ivar siteStore: A site store containing an administrative user's account.
@ivar siteAccount: The L{axiom.userbase.LoginAccount} for the
administrator, in the site store.
@ivar siteMethod: The single L{axiom.userbase.LoginMethod} for the
administrator, in the site store.
@ivar localUserBrowserFragment: A L{LocalUserBrowserFragment} examining the
administrator's L{LocalUserBrowser} powerup.
"""
def setUp(self):
"""
Construct a site and user store with an administrator that can invoke the
web administrative tools, setting the instance variables described in
this class's docstring.
"""
self.siteStore = Store(filesdir=self.mktemp())
Mantissa().installSite(self.siteStore, u"localhost", u"", False)
Mantissa().installAdmin(self.siteStore, u'admin', u'localhost', u'asdf')
self.siteMethod = self.siteStore.findUnique(
LoginMethod, LoginMethod.localpart == u'admin')
self.siteAccount = self.siteMethod.account
userStore = self.siteAccount.avatars.open()
lub = userStore.findUnique(LocalUserBrowser)
self.localUserBrowserFragment = LocalUserBrowserFragment(lub)
def test_actionTypes(self):
"""
Verify that all the action methods expose the appropriate fragment
objects, with their attributes set to indicate the correct objects to
manipulate.
"""
myRowID = self.localUserBrowserFragment.linkToItem(self.siteMethod)
actionMap = [('installOn', EndowFragment),
('uninstallFrom', DepriveFragment),
('suspend', SuspendFragment),
('unsuspend', UnsuspendFragment)]
for action, fragmentType in actionMap:
resultFragment = self.localUserBrowserFragment.performAction(
action, myRowID)
self.failUnless(isinstance(resultFragment, fragmentType),
"%s does not return a %s" %
(action, fragmentType))
self.assertEquals(resultFragment.fragmentParent,
self.localUserBrowserFragment)
self.assertEquals(resultFragment.account, self.siteAccount)
class RenderingTestCase(TestCase):
"""
Test cases for HTML rendering of various fragments.
"""
def doRendering(self, fragmentClass):
"""
Verify that the given fragment class will render without raising an
exception.
"""
siteStore = Store()
loginSystem = LoginSystem(store=siteStore)
installOn(loginSystem, siteStore)
p = Product(store=siteStore, types=["xmantissa.webadmin.LocalUserBrowser",
"xmantissa.signup.SignupConfiguration"])
account = loginSystem.addAccount(u'testuser', u'localhost', None)
p.installProductOn(account.avatars.open())
f = fragmentClass(None, u'testuser', account)
p = LivePage(
docFactory=stan(
html[
head(render=directive('liveglue')),
body(render=lambda ctx, data: f)]))
f.setFragmentParent(p)
ctx = WovenContext()
req = FakeRequest()
ctx.remember(req, IRequest)
d = p.renderHTTP(ctx)
def rendered(ign):
p.action_close(None)
d.addCallback(rendered)
return d
def test_endowRendering(self):
"""
Verify that L{EndowFragment} can render without raising an exception.
"""
return self.doRendering(EndowFragment)
def test_depriveRendering(self):
"""
Verify that L{DepriveFragment} can render without raising an exception.
"""
return self.doRendering(DepriveFragment)
def test_suspendRendering(self):
"""
Verify that L{SuspendFragment} can render without raising an exception.
"""
return self.doRendering(SuspendFragment)
def test_unsuspendRendering(self):
"""
Verify that L{UnsuspendFragment} can render without raising an
exception.
"""
return self.doRendering(UnsuspendFragment)
|
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from future.utils import native
import base64
import glob
import logging
import pkg_resources
import os
import re
import sys
import time
import warnings
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.template import RenderError
from flexget.utils.pathscrub import pathscrub
log = logging.getLogger('deluge')
def add_deluge_windows_install_dir_to_sys_path():
# Deluge does not install to python system on Windows, add the install directory to sys.path if it is found
if not (sys.platform.startswith('win') or os.environ.get('ProgramFiles')):
return
deluge_dir = os.path.join(os.environ['ProgramFiles'], 'Deluge')
log.debug('Looking for deluge install in %s' % deluge_dir)
if not os.path.isdir(deluge_dir):
return
deluge_egg = glob.glob(os.path.join(deluge_dir, 'deluge-*-py2.?.egg'))
if not deluge_egg:
return
minor_version = int(re.search(r'py2\.(\d).egg', deluge_egg[0]).group(1))
if minor_version != sys.version_info[1]:
log.verbose('Cannot use deluge from install directory because its python version doesn\'t match.')
return
log.debug('Found deluge install in %s adding to sys.path' % deluge_dir)
sys.path.append(deluge_dir)
for item in os.listdir(deluge_dir):
if item.endswith(('.egg', '.zip')):
sys.path.append(os.path.join(deluge_dir, item))
add_deluge_windows_install_dir_to_sys_path()
def install_pausing_reactor():
class PausingReactor(SelectReactor):
"""A SelectReactor that can be paused and resumed."""
def __init__(self):
SelectReactor.__init__(self)
self.paused = False
self._return_value = None
self._release_requested = False
self._mainLoopGen = None
# Older versions of twisted do not have the _started attribute, make it a synonym for running in that case
if not hasattr(self, '_started'):
PausingReactor._started = property(lambda self: self.running)
def _mainLoopGenerator(self):
"""Generator that acts as mainLoop, but yields when requested."""
while self._started:
try:
while self._started:
if self._release_requested:
self._release_requested = False
self.paused = True
yield self._return_value
self.paused = False
self.iterate()
except KeyboardInterrupt:
# Keyboard interrupt pauses the reactor
self.pause()
except GeneratorExit:
# GeneratorExit means stop the generator; Do it cleanly by stopping the whole reactor.
log.debug('Got GeneratorExit, stopping reactor.', exc_info=True)
self.paused = False
self.stop()
except:
twisted_log.msg("Unexpected error in main loop.")
twisted_log.err()
else:
twisted_log.msg('Main loop terminated.')
def run(self, installSignalHandlers=False):
"""Starts or resumes the reactor."""
if not self._started:
self.startRunning(installSignalHandlers)
self._mainLoopGen = self._mainLoopGenerator()
try:
return next(self._mainLoopGen)
except StopIteration:
pass
def pause(self, return_value=None):
"""Causes reactor to pause after this iteration.
If :return_value: is specified, it will be returned by the reactor.run call."""
self._return_value = return_value
self._release_requested = True
def stop(self):
"""Stops the reactor."""
SelectReactor.stop(self)
# If this was called while the reactor was paused we have to resume in order for it to complete
if self.paused:
self.run()
# These need to be re-registered so that the PausingReactor can be safely restarted after a stop
self.addSystemEventTrigger('during', 'shutdown', self.crash)
self.addSystemEventTrigger('during', 'shutdown', self.disconnectAll)
# Configure twisted to use the PausingReactor.
installReactor(PausingReactor())
@event('manager.shutdown')
def stop_reactor(manager):
"""Shut down the twisted reactor after all tasks have run."""
if not reactor._stopped:
log.debug('Stopping twisted reactor.')
reactor.stop()
# Some twisted import is throwing a warning see #2434
warnings.filterwarnings('ignore', message='Not importing directory .*')
try:
from twisted.python import log as twisted_log
from twisted.internet.main import installReactor
from twisted.internet.selectreactor import SelectReactor
except ImportError:
# If twisted is not found, errors will be shown later
pass
else:
install_pausing_reactor()
try:
# These have to wait until reactor has been installed to import
from twisted.internet import reactor
from deluge.ui.client import client
from deluge.ui.common import get_localhost_auth
except (ImportError, pkg_resources.DistributionNotFound):
# If deluge is not found, errors will be shown later
pass
class DelugePlugin(object):
"""Base class for deluge plugins, contains settings and methods for connecting to a deluge daemon."""
def on_task_start(self, task, config):
"""Raise a DependencyError if our dependencies aren't available"""
try:
from deluge.ui.client import client
except ImportError as e:
log.debug('Error importing deluge: %s' % e)
raise plugin.DependencyError('deluge', 'deluge',
'Deluge >=1.2 module and it\'s dependencies required. ImportError: %s' % e, log)
try:
from twisted.internet import reactor
except:
raise plugin.DependencyError('deluge', 'twisted.internet', 'Twisted.internet package required', log)
def on_task_abort(self, task, config):
pass
def prepare_connection_info(self, config):
config.setdefault('host', 'localhost')
config.setdefault('port', 58846)
if 'user' in config or 'pass' in config:
warnings.warn('deluge `user` and `pass` options have been renamed `username` and `password`',
DeprecationWarning)
config.setdefault('username', config.get('user', ''))
config.setdefault('password', config.get('pass', ''))
config.setdefault('username', '')
config.setdefault('password', '')
def on_disconnect(self):
"""Pauses the reactor. Gets called when we disconnect from the daemon."""
# pause the reactor, so flexget can continue
reactor.callLater(0, reactor.pause)
def on_connect_fail(self, result):
"""Pauses the reactor, returns PluginError. Gets called when connection to deluge daemon fails."""
log.debug('Connect to deluge daemon failed, result: %s' % result)
reactor.callLater(0, reactor.pause, plugin.PluginError('Could not connect to deluge daemon', log))
def on_connect_success(self, result, task, config):
"""Gets called when successfully connected to the daemon. Should do the work then call client.disconnect"""
raise NotImplementedError
def connect(self, task, config):
"""Connects to the deluge daemon and runs on_connect_success """
if config['host'] in ['localhost', '127.0.0.1'] and not config.get('username'):
# If an username is not specified, we have to do a lookup for the localclient username/password
auth = get_localhost_auth()
if auth[0]:
config['username'], config['password'] = auth
else:
raise plugin.PluginError('Unable to get local authentication info for Deluge. You may need to '
'specify an username and password from your Deluge auth file.')
client.set_disconnect_callback(self.on_disconnect)
d = client.connect(
host=config['host'],
port=config['port'],
username=config['username'],
password=config['password'])
d.addCallback(self.on_connect_success, task, config).addErrback(self.on_connect_fail)
result = reactor.run()
if isinstance(result, Exception):
raise result
return result
class InputDeluge(DelugePlugin):
"""Create entries for torrents in the deluge session."""
#
settings_map = {
'name': 'title',
'hash': 'torrent_info_hash',
'num_peers': 'torrent_peers',
'num_seeds': 'torrent_seeds',
'progress': 'deluge_progress',
'seeding_time': ('deluge_seed_time', lambda time: time / 3600),
'private': 'deluge_private',
'state': 'deluge_state',
'eta': 'deluge_eta',
'ratio': 'deluge_ratio',
'move_on_completed_path': 'deluge_movedone',
'save_path': 'deluge_path',
'label': 'deluge_label',
'total_size': ('content_size', lambda size: size / 1024 / 1024),
'files': ('content_files', lambda file_dicts: [f['path'] for f in file_dicts])}
def __init__(self):
self.entries = []
schema = {
'anyOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'host': {'type': 'string'},
'port': {'type': 'integer'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'config_path': {'type': 'string', 'format': 'path'},
'filter': {
'type': 'object',
'properties': {
'label': {'type': 'string'},
'state': {
'type': 'string',
'enum': ['active', 'downloading', 'seeding', 'queued', 'paused']
}
},
'additionalProperties': False
}
},
'additionalProperties': False
}
]
}
def prepare_config(self, config):
if isinstance(config, bool):
config = {}
if 'filter' in config:
filter = config['filter']
if 'label' in filter:
filter['label'] = filter['label'].lower()
if 'state' in filter:
filter['state'] = filter['state'].capitalize()
self.prepare_connection_info(config)
return config
def on_task_input(self, task, config):
"""Generates and returns a list of entries from the deluge daemon."""
# Reset the entries list
self.entries = []
# Call connect, entries get generated if everything is successful
self.connect(task, self.prepare_config(config))
return self.entries
def on_connect_success(self, result, task, config):
"""Creates a list of FlexGet entries from items loaded in deluge and stores them to self.entries"""
from deluge.ui.client import client
def on_get_torrents_status(torrents):
config_path = os.path.expanduser(config.get('config_path', ''))
for hash, torrent_dict in torrents.items():
# Make sure it has a url so no plugins crash
entry = Entry(deluge_id=hash, url='')
if config_path:
torrent_path = os.path.join(config_path, 'state', hash + '.torrent')
if os.path.isfile(torrent_path):
entry['location'] = torrent_path
if not torrent_path.startswith('/'):
torrent_path = '/' + torrent_path
entry['url'] = 'file://' + torrent_path
else:
log.warning('Did not find torrent file at %s' % torrent_path)
for key, value in torrent_dict.items():
flexget_key = self.settings_map[key]
if isinstance(flexget_key, tuple):
flexget_key, format_func = flexget_key
value = format_func(value)
entry[flexget_key] = value
self.entries.append(entry)
client.disconnect()
filter = config.get('filter', {})
# deluge client lib chokes on future's newlist, make sure we have a native python list here
client.core.get_torrents_status(filter, native(list(self.settings_map.keys()))).addCallback(
on_get_torrents_status)
class OutputDeluge(DelugePlugin):
"""Add the torrents directly to deluge, supporting custom save paths."""
schema = {
'anyOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'host': {'type': 'string'},
'port': {'type': 'integer'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'path': {'type': 'string'},
'movedone': {'type': 'string'},
'label': {'type': 'string'},
'queuetotop': {'type': 'boolean'},
'automanaged': {'type': 'boolean'},
'maxupspeed': {'type': 'number'},
'maxdownspeed': {'type': 'number'},
'maxconnections': {'type': 'integer'},
'maxupslots': {'type': 'integer'},
'ratio': {'type': 'number'},
'removeatratio': {'type': 'boolean'},
'addpaused': {'type': 'boolean'},
'compact': {'type': 'boolean'},
'content_filename': {'type': 'string'},
'main_file_only': {'type': 'boolean'},
'main_file_ratio': {'type': 'number'},
'magnetization_timeout': {'type': 'integer'},
'keep_subs': {'type': 'boolean'},
'hide_sparse_files': {'type': 'boolean'},
'enabled': {'type': 'boolean'},
},
'additionalProperties': False
}
]
}
def prepare_config(self, config):
if isinstance(config, bool):
config = {'enabled': config}
self.prepare_connection_info(config)
config.setdefault('enabled', True)
config.setdefault('path', '')
config.setdefault('movedone', '')
config.setdefault('label', '')
config.setdefault('main_file_ratio', 0.90)
config.setdefault('magnetization_timeout', 0)
config.setdefault('keep_subs', True) # does nothing without 'content_filename' or 'main_file_only' enabled
config.setdefault('hide_sparse_files', False) # does nothing without 'main_file_only' enabled
return config
def __init__(self):
self.deluge_version = None
self.options = {'maxupspeed': 'max_upload_speed', 'maxdownspeed': 'max_download_speed',
'maxconnections': 'max_connections', 'maxupslots': 'max_upload_slots',
'automanaged': 'auto_managed', 'ratio': 'stop_ratio', 'removeatratio': 'remove_at_ratio',
'addpaused': 'add_paused', 'compact': 'compact_allocation'}
@plugin.priority(120)
def on_task_download(self, task, config):
"""
Call download plugin to generate the temp files we will load into deluge
then verify they are valid torrents
"""
import deluge.ui.common
config = self.prepare_config(config)
if not config['enabled']:
return
# If the download plugin is not enabled, we need to call it to get our temp .torrent files
if 'download' not in task.config:
download = plugin.get_plugin_by_name('download')
for entry in task.accepted:
if not entry.get('deluge_id'):
download.instance.get_temp_file(task, entry, handle_magnets=True)
# Check torrent files are valid
for entry in task.accepted:
if os.path.exists(entry.get('file', '')):
# Check if downloaded file is a valid torrent file
try:
deluge.ui.common.TorrentInfo(entry['file'])
except Exception:
entry.fail('Invalid torrent file')
log.error('Torrent file appears invalid for: %s', entry['title'])
@plugin.priority(135)
def on_task_output(self, task, config):
"""Add torrents to deluge at exit."""
config = self.prepare_config(config)
# don't add when learning
if task.options.learn:
return
if not config['enabled'] or not (task.accepted or task.options.test):
return
self.connect(task, config)
# Clean up temp file if download plugin is not configured for this task
if 'download' not in task.config:
for entry in task.accepted + task.failed:
if os.path.exists(entry.get('file', '')):
os.remove(entry['file'])
del(entry['file'])
def on_connect_success(self, result, task, config):
"""Gets called when successfully connected to a daemon."""
from deluge.ui.client import client
from twisted.internet import reactor, defer
if not result:
log.debug('on_connect_success returned a failed result. BUG?')
if task.options.test:
log.debug('Test connection to deluge daemon successful.')
client.disconnect()
return
def format_label(label):
"""Makes a string compliant with deluge label naming rules"""
return re.sub('[^\w-]+', '_', label.lower())
def set_torrent_options(torrent_id, entry, opts):
"""Gets called when a torrent was added to the daemon."""
dlist = []
if not torrent_id:
log.error('There was an error adding %s to deluge.' % entry['title'])
# TODO: Fail entry? How can this happen still now?
return
log.info('%s successfully added to deluge.' % entry['title'])
entry['deluge_id'] = torrent_id
def create_path(result, path):
"""Creates the specified path if deluge is older than 1.3"""
from deluge.common import VersionSplit
# Before 1.3, deluge would not create a non-existent move directory, so we need to.
if VersionSplit('1.3.0') > VersionSplit(self.deluge_version):
if client.is_localhost():
if not os.path.isdir(path):
log.debug('path %s doesn\'t exist, creating' % path)
os.makedirs(path)
else:
log.warning('If path does not exist on the machine running the daemon, move will fail.')
if opts.get('movedone'):
dlist.append(version_deferred.addCallback(create_path, opts['movedone']))
dlist.append(client.core.set_torrent_move_completed(torrent_id, True))
dlist.append(client.core.set_torrent_move_completed_path(torrent_id, opts['movedone']))
log.debug('%s move on complete set to %s' % (entry['title'], opts['movedone']))
if opts.get('label'):
def apply_label(result, torrent_id, label):
"""Gets called after labels and torrent were added to deluge."""
return client.label.set_torrent(torrent_id, label)
dlist.append(label_deferred.addCallback(apply_label, torrent_id, opts['label']))
if opts.get('queuetotop') is not None:
if opts['queuetotop']:
dlist.append(client.core.queue_top([torrent_id]))
log.debug('%s moved to top of queue' % entry['title'])
else:
dlist.append(client.core.queue_bottom([torrent_id]))
log.debug('%s moved to bottom of queue' % entry['title'])
def on_get_torrent_status(status):
"""Gets called with torrent status, including file info.
Sets the torrent options which require knowledge of the current status of the torrent."""
main_file_dlist = []
# Determine where the file should be
move_now_path = None
if opts.get('movedone'):
if status['progress'] == 100:
move_now_path = opts['movedone']
else:
# Deluge will unset the move completed option if we move the storage, forgo setting proper
# path, in favor of leaving proper final location.
log.debug('Not moving storage for %s, as this will prevent movedone.' % entry['title'])
elif opts.get('path'):
move_now_path = opts['path']
if move_now_path and os.path.normpath(move_now_path) != os.path.normpath(status['save_path']):
main_file_dlist.append(version_deferred.addCallback(create_path, move_now_path))
log.debug('Moving storage for %s to %s' % (entry['title'], move_now_path))
main_file_dlist.append(client.core.move_storage([torrent_id], move_now_path))
if opts.get('content_filename') or opts.get('main_file_only'):
def file_exists(filename):
# Checks the download path as well as the move completed path for existence of the file
if os.path.exists(os.path.join(status['save_path'], filename)):
return True
elif status.get('move_on_completed') and status.get('move_on_completed_path'):
if os.path.exists(os.path.join(status['move_on_completed_path'], filename)):
return True
else:
return False
def unused_name(name):
# If on local computer, tries appending a (#) suffix until a unique filename is found
if client.is_localhost():
counter = 2
while file_exists(name):
name = ''.join([os.path.splitext(name)[0],
" (", str(counter), ')',
os.path.splitext(name)[1]])
counter += 1
else:
log.debug('Cannot ensure content_filename is unique '
'when adding to a remote deluge daemon.')
return name
def rename(file, new_name):
# Renames a file in torrent
main_file_dlist.append(
client.core.rename_files(torrent_id,
[(file['index'], new_name)]))
log.debug('File %s in %s renamed to %s' % (file['path'], entry['title'], new_name))
# find a file that makes up more than main_file_ratio (default: 90%) of the total size
main_file = None
for file in status['files']:
if file['size'] > (status['total_size'] * opts.get('main_file_ratio')):
main_file = file
break
if main_file is not None:
# proceed with renaming only if such a big file is found
# find the subtitle file
keep_subs = opts.get('keep_subs')
sub_file = None
if keep_subs:
sub_exts = [".srt", ".sub"]
for file in status['files']:
ext = os.path.splitext(file['path'])[1]
if ext in sub_exts:
sub_file = file
break
# check for single file torrents so we dont add unnecessary folders
if (os.path.dirname(main_file['path']) is not ("" or "/")):
# check for top folder in user config
if (opts.get('content_filename') and os.path.dirname(opts['content_filename']) is not ""):
top_files_dir = os.path.dirname(opts['content_filename']) + "/"
else:
top_files_dir = os.path.dirname(main_file['path']) + "/"
else:
top_files_dir = "/"
if opts.get('content_filename'):
# rename the main file
big_file_name = (top_files_dir +
os.path.basename(opts['content_filename']) +
os.path.splitext(main_file['path'])[1])
big_file_name = unused_name(big_file_name)
rename(main_file, big_file_name)
# rename subs along with the main file
if sub_file is not None and keep_subs:
sub_file_name = (os.path.splitext(big_file_name)[0] +
os.path.splitext(sub_file['path'])[1])
rename(sub_file, sub_file_name)
if opts.get('main_file_only'):
# download only the main file (and subs)
file_priorities = [1 if f == main_file or (f == sub_file and keep_subs) else 0
for f in status['files']]
main_file_dlist.append(
client.core.set_torrent_file_priorities(torrent_id, file_priorities))
if opts.get('hide_sparse_files'):
# hide the other sparse files that are not supposed to download but are created anyway
# http://dev.deluge-torrent.org/ticket/1827
# Made sparse files behave better with deluge http://flexget.com/ticket/2881
sparse_files = [f for f in status['files']
if f != main_file and (f != sub_file or (not keep_subs))]
rename_pairs = [(f['index'],
top_files_dir + ".sparse_files/" + os.path.basename(f['path']))
for f in sparse_files]
main_file_dlist.append(client.core.rename_files(torrent_id, rename_pairs))
else:
log.warning('No files in "%s" are > %d%% of content size, no files renamed.' % (
entry['title'],
opts.get('main_file_ratio') * 100))
return defer.DeferredList(main_file_dlist)
status_keys = ['files', 'total_size', 'save_path', 'move_on_completed_path',
'move_on_completed', 'progress']
dlist.append(client.core.get_torrent_status(torrent_id, status_keys).addCallback(on_get_torrent_status))
return defer.DeferredList(dlist)
def on_fail(result, task, entry):
"""Gets called when daemon reports a failure adding the torrent."""
log.info('%s was not added to deluge! %s' % (entry['title'], result))
entry.fail('Could not be added to deluge')
# dlist is a list of deferreds that must complete before we exit
dlist = []
# loop through entries to get a list of labels to add
labels = set([format_label(entry['label']) for entry in task.accepted if entry.get('label')])
if config.get('label'):
labels.add(format_label(config['label']))
label_deferred = defer.succeed(True)
if labels:
# Make sure the label plugin is available and enabled, then add appropriate labels
def on_get_enabled_plugins(plugins):
"""Gets called with the list of enabled deluge plugins."""
def on_label_enabled(result):
""" This runs when we verify the label plugin is enabled. """
def on_get_labels(d_labels):
"""Gets available labels from deluge, and adds any new labels we need."""
dlist = []
for label in labels:
if label not in d_labels:
log.debug('Adding the label %s to deluge' % label)
dlist.append(client.label.add(label))
return defer.DeferredList(dlist)
return client.label.get_labels().addCallback(on_get_labels)
if 'Label' in plugins:
return on_label_enabled(True)
else:
# Label plugin isn't enabled, so we check if it's available and enable it.
def on_get_available_plugins(plugins):
"""Gets plugins available to deluge, enables Label plugin if available."""
if 'Label' in plugins:
log.debug('Enabling label plugin in deluge')
return client.core.enable_plugin('Label').addCallback(on_label_enabled)
else:
log.error('Label plugin is not installed in deluge')
return client.core.get_available_plugins().addCallback(on_get_available_plugins)
label_deferred = client.core.get_enabled_plugins().addCallback(on_get_enabled_plugins)
dlist.append(label_deferred)
def on_get_daemon_info(ver):
"""Gets called with the daemon version info, stores it in self."""
log.debug('deluge version %s' % ver)
self.deluge_version = ver
version_deferred = client.daemon.info().addCallback(on_get_daemon_info)
dlist.append(version_deferred)
def on_get_session_state(torrent_ids):
"""Gets called with a list of torrent_ids loaded in the deluge session.
Adds new torrents and modifies the settings for ones already in the session."""
dlist = []
# add the torrents
for entry in task.accepted:
@defer.inlineCallbacks
def _wait_for_metadata(torrent_id, timeout):
log.verbose('Waiting %d seconds for "%s" to magnetize' % (timeout, entry['title']))
for i in range(timeout):
time.sleep(1)
try:
status = yield client.core.get_torrent_status(torrent_id, ['files'])
except Exception as err:
log.error('wait_for_metadata Error: %s' % err)
break
if len(status['files']) > 0:
log.info('"%s" magnetization successful' % (entry['title']))
break
else:
log.warning('"%s" did not magnetize before the timeout elapsed, '
'file list unavailable for processing.' % entry['title'])
defer.returnValue(torrent_id)
def add_entry(entry, opts):
"""Adds an entry to the deluge session"""
magnet, filedump = None, None
if entry.get('url', '').startswith('magnet:'):
magnet = entry['url']
else:
if not os.path.exists(entry['file']):
entry.fail('Downloaded temp file \'%s\' doesn\'t exist!' % entry['file'])
del(entry['file'])
return
with open(entry['file'], 'rb') as f:
filedump = base64.encodestring(f.read())
log.verbose('Adding %s to deluge.' % entry['title'])
if magnet:
d = client.core.add_torrent_magnet(magnet, opts)
if config.get('magnetization_timeout'):
d.addCallback(_wait_for_metadata, config['magnetization_timeout'])
return d
else:
return client.core.add_torrent_file(entry['title'], filedump, opts)
# Generate deluge options dict for torrent add
add_opts = {}
try:
path = entry.render(entry.get('path', config['path']))
if path:
add_opts['download_location'] = pathscrub(os.path.expanduser(path))
except RenderError as e:
log.error('Could not set path for %s: %s' % (entry['title'], e))
for fopt, dopt in self.options.items():
value = entry.get(fopt, config.get(fopt))
if value is not None:
add_opts[dopt] = value
if fopt == 'ratio':
add_opts['stop_at_ratio'] = True
# Make another set of options, that get set after the torrent has been added
modify_opts = {
'label': format_label(entry.get('label', config['label'])),
'queuetotop': entry.get('queuetotop', config.get('queuetotop')),
'main_file_only': entry.get('main_file_only', config.get('main_file_only', False)),
'main_file_ratio': entry.get('main_file_ratio', config.get('main_file_ratio')),
'hide_sparse_files': entry.get('hide_sparse_files', config.get('hide_sparse_files', True)),
'keep_subs': entry.get('keep_subs', config.get('keep_subs', True))
}
try:
movedone = entry.render(entry.get('movedone', config['movedone']))
modify_opts['movedone'] = pathscrub(os.path.expanduser(movedone))
except RenderError as e:
log.error('Error setting movedone for %s: %s' % (entry['title'], e))
try:
content_filename = entry.get('content_filename', config.get('content_filename', ''))
modify_opts['content_filename'] = pathscrub(entry.render(content_filename))
except RenderError as e:
log.error('Error setting content_filename for %s: %s' % (entry['title'], e))
torrent_id = entry.get('deluge_id') or entry.get('torrent_info_hash')
torrent_id = torrent_id and torrent_id.lower()
if torrent_id in torrent_ids:
log.info('%s is already loaded in deluge, setting options' % entry['title'])
# Entry has a deluge id, verify the torrent is still in the deluge session and apply options
# Since this is already loaded in deluge, we may also need to change the path
modify_opts['path'] = add_opts.pop('download_location', None)
dlist.extend([set_torrent_options(torrent_id, entry, modify_opts),
client.core.set_torrent_options([torrent_id], add_opts)])
else:
dlist.append(add_entry(entry, add_opts).addCallbacks(
set_torrent_options, on_fail, callbackArgs=(entry, modify_opts), errbackArgs=(task, entry)))
return defer.DeferredList(dlist)
dlist.append(client.core.get_session_state().addCallback(on_get_session_state))
def on_complete(result):
"""Gets called when all of our tasks for deluge daemon are complete."""
client.disconnect()
tasks = defer.DeferredList(dlist).addBoth(on_complete)
def on_timeout(result):
"""Gets called if tasks have not completed in 30 seconds.
Should only happen when something goes wrong."""
log.error('Timed out while adding torrents to deluge.')
log.debug('dlist: %s' % result.resultList)
client.disconnect()
# Schedule a disconnect to happen if FlexGet hangs while connected to Deluge
# Leave the timeout long, to give time for possible lookups to occur
reactor.callLater(600, lambda: tasks.called or on_timeout(tasks))
def on_task_exit(self, task, config):
"""Make sure all temp files are cleaned up when task exits"""
# If download plugin is enabled, it will handle cleanup.
if 'download' not in task.config:
download = plugin.get_plugin_by_name('download')
download.instance.cleanup_temp_files(task)
def on_task_abort(self, task, config):
"""Make sure normal cleanup tasks still happen on abort."""
DelugePlugin.on_task_abort(self, task, config)
self.on_task_exit(task, config)
@event('plugin.register')
def register_plugin():
plugin.register(InputDeluge, 'from_deluge', api_ver=2)
plugin.register(OutputDeluge, 'deluge', api_ver=2)
|
|
# Copyright 2017 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shuts down a TCP connection on Linux or macOS.
Finds the process and socket file descriptor associated with a given TCP
connection. Then injects into that process a call to shutdown()
(http://man7.org/linux/man-pages/man2/shutdown.2.html) that file descriptor,
thereby shutting down the TCP connection.
Typical usage example:
tcp_kill("10.31.33.7", 50246 "93.184.216.34", 443)
Dependencies:
lsof (https://en.wikipedia.org/wiki/Lsof)
frida (https://www.frida.re/): sudo pip install frida
"""
__author__ = "[email protected] (Jason Geffner)"
__version__ = "1.0"
import argparse
import os
import platform
import re
import socket
import subprocess
import threading
import frida
_FRIDA_SCRIPT = """
var resolver = new ApiResolver("module");
var lib = Process.platform == "darwin" ? "libsystem" : "libc";
var matches = resolver.enumerateMatchesSync("exports:*" + lib + "*!shutdown");
if (matches.length == 0)
{
throw new Error("Could not find *" + lib + "*!shutdown in target process.");
}
else if (matches.length != 1)
{
// Sometimes Frida returns duplicates.
var address = 0;
var s = "";
var duplicates_only = true;
for (var i = 0; i < matches.length; i++)
{
if (s.length != 0)
{
s += ", ";
}
s += matches[i].name + "@" + matches[i].address;
if (address == 0)
{
address = matches[i].address;
}
else if (!address.equals(matches[i].address))
{
duplicates_only = false;
}
}
if (!duplicates_only)
{
throw new Error("More than one match found for *libc*!shutdown: " + s);
}
}
var shutdown = new NativeFunction(matches[0].address, "int", ["int", "int"]);
if (shutdown(%d, 0) != 0)
{
throw new Error("Call to shutdown() returned an error.");
}
send("");
"""
def canonicalize_ip_address(address):
if ":" in address:
family = socket.AF_INET6
else:
family = socket.AF_INET
return socket.inet_ntop(family, socket.inet_pton(family, address))
def tcp_kill(local_addr, local_port, remote_addr, remote_port, verbose=False):
"""Shuts down a TCP connection on Linux or macOS.
Finds the process and socket file descriptor associated with a given TCP
connection. Then injects into that process a call to shutdown()
(http://man7.org/linux/man-pages/man2/shutdown.2.html) that file descriptor,
thereby shutting down the TCP connection.
Args:
local_addr: The IP address (as a string) associated with the local endpoint
of the connection.
local_port: The port (as an int) associated with the local endpoint of the
connection.
remote_addr: The IP address (as a string) associated with the remote
endpoint of the connection.
remote_port: The port (as an int) associated with the remote endpoint of the
connection.
verbose: If True, print verbose output to the console.
Returns:
No return value if successful. If unsuccessful, raises an exception.
Raises:
KeyError: Unexpected output from lsof command.
NotImplementedError: Not running on a Linux or macOS system.
OSError: TCP connection not found or socket file descriptor not found.
RuntimeError: Error during execution of JavaScript injected into process.
"""
if platform.system() not in ("Darwin", "Linux"):
raise NotImplementedError("This function is only implemented for Linux and "
"macOS systems.")
local_addr = canonicalize_ip_address(local_addr)
remote_addr = canonicalize_ip_address(remote_addr)
name_pattern = re.compile(
r"^\[?(.+?)]?:([0-9]{1,5})->\[?(.+?)]?:([0-9]{1,5})$")
fd_pattern = re.compile(r"^(\d)+")
field_names = ("PID", "FD", "NAME")
fields = {}
pid = None
sockfd = None
for line in subprocess.check_output("lsof -bnlPiTCP -sTCP:ESTABLISHED "
"2>/dev/null", shell=True).splitlines():
words = line.split()
if len(fields) != len(field_names):
for i in xrange(len(words)):
for field in field_names:
if words[i] == field:
fields[field] = i
break
if len(fields) != len(field_names):
raise KeyError("Unexpected field headers in output of lsof command.")
continue
name = name_pattern.match(words[fields["NAME"]])
if not name:
raise KeyError("Unexpected NAME in output of lsof command.")
if (int(name.group(2)) == local_port and int(name.group(4)) == remote_port
and canonicalize_ip_address(name.group(1)) == local_addr and
canonicalize_ip_address(name.group(3)) == remote_addr):
pid = int(words[fields["PID"]])
sockfd = int(fd_pattern.match(words[fields["FD"]]).group(1))
if verbose:
print "Process ID of socket's process: %d" % pid
print "Socket file descriptor: %d" % sockfd
break
if not sockfd:
s = " Try running as root." if os.geteuid() != 0 else ""
raise OSError("Socket not found for connection." + s)
_shutdown_sockfd(pid, sockfd)
def _shutdown_sockfd(pid, sockfd):
"""Injects into a process a call to shutdown() a socket file descriptor.
Injects into a process a call to shutdown()
(http://man7.org/linux/man-pages/man2/shutdown.2.html) a socket file
descriptor, thereby shutting down its associated TCP connection.
Args:
pid: The process ID (as an int) of the target process.
sockfd: The socket file descriptor (as an int) in the context of the target
process to be shutdown.
Raises:
RuntimeError: Error during execution of JavaScript injected into process.
"""
js_error = {} # Using dictionary since Python 2.7 doesn't support "nonlocal".
event = threading.Event()
def on_message(message, data): # pylint: disable=unused-argument
if message["type"] == "error":
js_error["error"] = message["description"]
event.set()
session = frida.attach(pid)
script = session.create_script(_FRIDA_SCRIPT % sockfd)
script.on("message", on_message)
closed = False
try:
script.load()
except frida.TransportError as e:
if str(e) != "the connection is closed":
raise
closed = True
if not closed:
event.wait()
session.detach()
if "error" in js_error:
raise RuntimeError(js_error["error"])
if __name__ == "__main__":
class ArgParser(argparse.ArgumentParser):
def error(self, message):
print "tcp_killer v" + __version__
print "by " + __author__
print
print "Error: " + message
print
print self.format_help().replace("usage:", "Usage:")
self.exit(0)
parser = ArgParser(
add_help=False,
description="Shuts down a TCP connection on Linux or macOS. Local and "
"remote endpoint arguments can be copied from the output of 'netstat "
"-lanW'.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=r"""
Examples:
%(prog)s 10.31.33.7:50246 93.184.216.34:443
%(prog)s 2001:db8:85a3::8a2e:370:7334.93 2606:2800:220:1:248:1893:25c8:1946.80
%(prog)s -verbose [2001:4860:4860::8888]:46820 [2607:f8b0:4005:807::200e]:80
""")
args = parser.add_argument_group("Arguments")
args.add_argument("-verbose", required=False, action="store_const",
const=True, help="Show verbose output")
args.add_argument("local", metavar="<local endpoint>",
help="Connection's local IP address and port")
args.add_argument("remote", metavar="<remote endpoint>",
help="Connection's remote IP address and port")
parsed = parser.parse_args()
ep_format = re.compile(r"^(.+)[:\.]([0-9]{1,5})$")
local = ep_format.match(parsed.local)
remote = ep_format.match(parsed.remote)
if not local or not remote:
parser.error("Invalid command-line argument.")
local_address = local.group(1)
if local_address.startswith("[") and local_address.endswith("]"):
local_address = local_address[1:-1]
remote_address = remote.group(1)
if remote_address.startswith("[") and remote_address.endswith("]"):
remote_address = remote_address[1:-1]
tcp_kill(local_address, int(local.group(2)), remote_address,
int(remote.group(2)), parsed.verbose)
print "TCP connection was successfully shutdown."
|
|
import os
import subprocess
DRY_RUN = False
SQUELCH_STDERR = True
ECHO_CALLS = False
def br_call(args, dry_run=DRY_RUN, echo=ECHO_CALLS):
if dry_run or echo:
print('BRCALL: ' + ' '.join(args))
if dry_run:
return 0
if SQUELCH_STDERR:
return subprocess.call(args, stdout=open('/dev/null'),
stderr=open('/dev/null'))
else:
# Useful for debugging.
return subprocess.call(args, stdout=open('/dev/null'))
# We use this since our squelching of stderr can hide missing file errors.
def sanity_check_file_exists(f):
if not os.access(f, os.F_OK):
raise RuntimeError('Error! Could not find file: ' + f)
class SwiftTools(object):
"""A utility class that enables users to easily find sil-tools without needing
to constantly reform paths to the build directory. Also provides safety by
asserting if one of the tools does not exist at the specified path"""
def __init__(self, swift_build_dir):
self.swift_build_dir = swift_build_dir
def _get_tool(self, name):
path = os.path.join(self.swift_build_dir, 'bin', name)
if not os.access(path, os.F_OK):
error_msg = "Error! {} does not exist at: {}".format(name, path)
raise RuntimeError(error_msg)
return path
@property
def sil_nm(self):
"""Return the path to sil-nm in the specified swift build directory. Throws a
runtime error if the tool does not exist"""
return self._get_tool('sil-nm')
@property
def swiftc(self):
"""Return the path to swiftc in the specified swift build directory. Throws a
runtime error if the tool does not exist"""
return self._get_tool('swiftc')
@property
def sil_opt(self):
"""Return the path to sil-opt in the specified swift build directory. Throws a
runtime error if the tool does not exist"""
return self._get_tool('sil-opt')
@property
def sil_func_extractor(self):
"""Return the path to sil-func-extractor in the specified swift build
directory. Throws a runtime error if the tool does not exist."""
return self._get_tool('sil-func-extractor')
@property
def sil_passpipeline_dumper(self):
"""Return the path to sil-passpipeline-dumper in the specified swift build
directory. Throws a runtime error if the tool does not exist
"""
return self._get_tool('sil-passpipeline-dumper')
def maybe_abspath(x):
if x is None:
return x
return os.path.abspath(x)
class SILToolInvokerConfig(object):
def __init__(self, args):
self.module_cache = args.module_cache
self.sdk = args.sdk
self.target = args.target
self.resource_dir = maybe_abspath(args.resource_dir)
self.work_dir = maybe_abspath(args.work_dir)
self.module_name = args.module_name
class SILToolInvoker(object):
def __init__(self, config, extra_args=None):
self.config = config
self.extra_args = extra_args
def base_args(self, emit_sib):
x = [self.tool]
if self.config.sdk is not None:
x.append("-sdk=%s" % self.config.sdk)
if self.config.target is not None:
x.append("-target=%s" % self.config.target)
if self.config.resource_dir is not None:
x.append("-resource-dir=%s" % self.config.resource_dir)
if self.config.module_cache is not None:
x.append("-module-cache-path=%s" % self.config.module_cache)
if self.config.module_name is not None:
x.append("-module-name=%s" % self.config.module_name)
if emit_sib:
x.append("-emit-sib")
return x
@property
def tool(self):
raise RuntimeError('Abstract Method')
class SILConstantInputToolInvoker(SILToolInvoker):
def __init__(self, config, tools, initial_input_file, extra_args):
SILToolInvoker.__init__(self, config, extra_args)
self.tools = tools
# Start by creating our workdir if necessary
subprocess.check_call(["mkdir", "-p", self.config.work_dir])
# Then copy our input file into the work dir
base_input_file = os.path.basename(initial_input_file)
(base, ext) = os.path.splitext(base_input_file)
self.base_input_file_stem = base
self.base_input_file_ext = ".sib"
# First emit an initial *.sib file. This ensures no matter if we have a
# *.swiftmodule, *.sil, or *.sib file, we are always using *.sib.
self.input_file = initial_input_file
sanity_check_file_exists(initial_input_file)
def _invoke(self, *args, **kwargs):
raise RuntimeError('Abstract method')
def get_suffixed_filename(self, suffix):
basename = self.base_input_file_stem + '_' + suffix
basename += self.base_input_file_ext
return os.path.join(self.config.work_dir, basename)
class SILOptInvoker(SILConstantInputToolInvoker):
def __init__(self, config, tools, input_file, extra_args):
SILConstantInputToolInvoker.__init__(self, config, tools, input_file,
extra_args)
self.input_file = self.get_suffixed_filename('initial')
self._invoke(input_file, [], True, self.input_file)
@property
def tool(self):
return self.tools.sil_opt
def _cmdline(self, input_file, passes, emit_sib, output_file='-'):
assert(isinstance(emit_sib, bool))
assert(isinstance(output_file, str))
base_args = self.base_args(emit_sib)
sanity_check_file_exists(input_file)
base_args.extend([input_file, '-o', output_file])
base_args.extend(self.extra_args)
base_args.extend(passes)
return base_args
def _invoke(self, input_file, passes, emit_sib, output_filename):
cmdline = self._cmdline(input_file, passes, emit_sib, output_filename)
return br_call(cmdline)
def invoke_with_passlist(self, passes, output_filename):
return self._invoke(self.input_file, passes, True, output_filename)
def cmdline_with_passlist(self, passes):
return self._cmdline(self.input_file, passes, False)
class SILFuncExtractorInvoker(SILConstantInputToolInvoker):
def __init__(self, config, tools, input_file):
SILConstantInputToolInvoker.__init__(self, config, tools, input_file,
[])
@property
def tool(self):
return self.tools.sil_func_extractor
def _cmdline(self, input_file, funclist_path, emit_sib, output_file='-',
invert=False):
assert(isinstance(emit_sib, bool))
assert(isinstance(output_file, str))
sanity_check_file_exists(input_file)
sanity_check_file_exists(funclist_path)
assert(isinstance(funclist_path, str))
base_args = self.base_args(emit_sib)
base_args.extend([input_file, '-o', output_file,
'-func-file=%s' % funclist_path])
if invert:
base_args.append('-invert')
return base_args
def _invoke(self, input_file, funclist_path, output_filename,
invert=False):
assert(isinstance(funclist_path, str))
cmdline = self._cmdline(input_file, funclist_path, True, output_filename,
invert)
return br_call(cmdline)
def invoke_with_functions(self, funclist_path, output_filename,
invert=False):
assert(isinstance(funclist_path, str))
return self._invoke(self.input_file, funclist_path, output_filename,
invert)
class SILNMInvoker(SILToolInvoker):
def __init__(self, config, tools):
self.tools = tools
SILToolInvoker.__init__(self, config)
@property
def tool(self):
return self.tools.sil_nm
def get_symbols(self, input_file):
sanity_check_file_exists(input_file)
cmdline = self.base_args(emit_sib=False)
cmdline.append(input_file)
output = subprocess.check_output(cmdline)
for l in output.split("\n")[:-1]:
t = tuple(l.split(" "))
assert(len(t) == 2)
yield t
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from benchmarks import silk_flags
from benchmarks import webgl_expectations
from measurements import smoothness
from telemetry import benchmark
import page_sets
class SmoothnessTop25(perf_benchmark.PerfBenchmark):
"""Measures rendering statistics while scrolling down the top 25 web pages.
http://www.chromium.org/developers/design-documents/rendering-benchmarks
"""
test = smoothness.Smoothness
page_set = page_sets.Top25SmoothPageSet
@classmethod
def Name(cls):
return 'smoothness.top_25_smooth'
class SmoothnessToughFiltersCases(perf_benchmark.PerfBenchmark):
"""Measures frame rate and a variety of other statistics.
Uses a selection of pages making use of SVG and CSS Filter Effects.
"""
test = smoothness.Smoothness
page_set = page_sets.ToughFiltersCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_filters_cases'
class SmoothnessToughPathRenderingCases(perf_benchmark.PerfBenchmark):
"""Tests a selection of pages with SVG and 2D Canvas paths.
Measures frame rate and a variety of other statistics. """
test = smoothness.Smoothness
page_set = page_sets.ToughPathRenderingCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_path_rendering_cases'
# crbug.com/388877, crbug.com/396127
@benchmark.Disabled('mac', 'win', 'android')
class SmoothnessToughCanvasCases(perf_benchmark.PerfBenchmark):
"""Measures frame rate and a variety of other statistics.
Uses a selection of pages making use of the 2D Canvas API.
"""
test = smoothness.Smoothness
page_set = page_sets.ToughCanvasCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_canvas_cases'
@benchmark.Disabled('android') # crbug.com/373812
class SmoothnessToughWebGLCases(perf_benchmark.PerfBenchmark):
test = smoothness.Smoothness
page_set = page_sets.ToughWebglCasesPageSet
@classmethod
def CreateExpectations(cls):
return webgl_expectations.WebGLExpectations()
@classmethod
def Name(cls):
return 'smoothness.tough_webgl_cases'
@benchmark.Enabled('android')
class SmoothnessMaps(perf_benchmark.PerfBenchmark):
page_set = page_sets.MapsPageSet
@classmethod
def CreateExpectations(cls):
return webgl_expectations.MapsExpectations()
@classmethod
def Name(cls):
return 'smoothness.maps'
@benchmark.Disabled('android')
class SmoothnessKeyDesktopMoveCases(perf_benchmark.PerfBenchmark):
test = smoothness.Smoothness
page_set = page_sets.KeyDesktopMoveCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.key_desktop_move_cases'
@benchmark.Enabled('android')
class SmoothnessKeyMobileSites(perf_benchmark.PerfBenchmark):
"""Measures rendering statistics while scrolling down the key mobile sites.
http://www.chromium.org/developers/design-documents/rendering-benchmarks
"""
test = smoothness.Smoothness
page_set = page_sets.KeyMobileSitesSmoothPageSet
@classmethod
def Name(cls):
return 'smoothness.key_mobile_sites_smooth'
@benchmark.Disabled('mac', 'win', 'android')
class SmoothnessKeyMobileSitesWithSlimmingPaint(perf_benchmark.PerfBenchmark):
"""Measures smoothness on key mobile sites with --enable-slimming-paint.
http://www.chromium.org/developers/design-documents/rendering-benchmarks
"""
test = smoothness.Smoothness
page_set = page_sets.KeyMobileSitesSmoothPageSet
def SetExtraBrowserOptions(self, options):
options.AppendExtraBrowserArgs(['--enable-slimming-paint'])
@classmethod
def Name(cls):
return 'smoothness.key_mobile_sites_with_slimming_paint_smooth'
class SmoothnessToughAnimationCases(perf_benchmark.PerfBenchmark):
test = smoothness.SmoothnessWithRestart
page_set = page_sets.ToughAnimationCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_animation_cases'
@benchmark.Enabled('android')
class SmoothnessKeySilkCases(perf_benchmark.PerfBenchmark):
"""Measures rendering statistics for the key silk cases without GPU
rasterization.
"""
test = smoothness.Smoothness
page_set = page_sets.KeySilkCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.key_silk_cases'
@benchmark.Enabled('android')
class SmoothnessGpuRasterizationTop25(perf_benchmark.PerfBenchmark):
"""Measures rendering statistics for the top 25 with GPU rasterization.
"""
tag = 'gpu_rasterization'
test = smoothness.Smoothness
page_set = page_sets.Top25SmoothPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.top_25_smooth'
@benchmark.Enabled('android')
class SmoothnessGpuRasterizationKeyMobileSites(perf_benchmark.PerfBenchmark):
"""Measures rendering statistics for the key mobile sites with GPU
rasterization.
"""
tag = 'gpu_rasterization'
test = smoothness.Smoothness
page_set = page_sets.KeyMobileSitesSmoothPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.key_mobile_sites_smooth'
class SmoothnessGpuRasterizationToughPathRenderingCases(
perf_benchmark.PerfBenchmark):
"""Tests a selection of pages with SVG and 2D canvas paths with GPU
rasterization.
"""
tag = 'gpu_rasterization'
test = smoothness.Smoothness
page_set = page_sets.ToughPathRenderingCasesPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.tough_path_rendering_cases'
class SmoothnessGpuRasterizationFiltersCases(perf_benchmark.PerfBenchmark):
"""Tests a selection of pages with SVG and CSS filter effects with GPU
rasterization.
"""
tag = 'gpu_rasterization'
test = smoothness.Smoothness
page_set = page_sets.ToughFiltersCasesPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.tough_filters_cases'
@benchmark.Enabled('android')
class SmoothnessSyncScrollKeyMobileSites(perf_benchmark.PerfBenchmark):
"""Measures rendering statistics for the key mobile sites with synchronous
(main thread) scrolling.
"""
tag = 'sync_scroll'
test = smoothness.Smoothness
page_set = page_sets.KeyMobileSitesSmoothPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForSyncScrolling(options)
@classmethod
def Name(cls):
return 'smoothness.sync_scroll.key_mobile_sites_smooth'
@benchmark.Enabled('android')
class SmoothnessSimpleMobilePages(perf_benchmark.PerfBenchmark):
"""Measures rendering statistics for simple mobile sites page set.
"""
test = smoothness.Smoothness
page_set = page_sets.SimpleMobileSitesPageSet
@classmethod
def Name(cls):
return 'smoothness.simple_mobile_sites'
@benchmark.Enabled('android')
class SmoothnessFlingSimpleMobilePages(perf_benchmark.PerfBenchmark):
"""Measures rendering statistics for flinging a simple mobile sites page set.
"""
test = smoothness.Smoothness
page_set = page_sets.SimpleMobileSitesFlingPageSet
def SetExtraBrowserOptions(self, options):
# As the fling parameters cannot be analytically determined to not
# overscroll, disable overscrolling explicitly. Overscroll behavior is
# orthogonal to fling performance, and its activation is only more noise.
options.AppendExtraBrowserArgs('--disable-overscroll-edge-effect')
@classmethod
def Name(cls):
return 'smoothness.fling.simple_mobile_sites'
@benchmark.Enabled('android', 'chromeos')
class SmoothnessToughPinchZoomCases(perf_benchmark.PerfBenchmark):
"""Measures rendering statistics for pinch-zooming into the tough pinch zoom
cases.
"""
test = smoothness.Smoothness
page_set = page_sets.ToughPinchZoomCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_pinch_zoom_cases'
@benchmark.Enabled('android', 'chromeos')
class SmoothnessToughScrollingWhileZoomedInCases(perf_benchmark.PerfBenchmark):
"""Measures rendering statistics for pinch-zooming then diagonal scrolling"""
test = smoothness.Smoothness
page_set = page_sets.ToughScrollingWhileZoomedInCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_scrolling_while_zoomed_in_cases'
@benchmark.Enabled('android')
class SmoothnessPolymer(perf_benchmark.PerfBenchmark):
"""Measures rendering statistics for Polymer cases.
"""
test = smoothness.Smoothness
page_set = page_sets.PolymerPageSet
@classmethod
def Name(cls):
return 'smoothness.polymer'
@benchmark.Enabled('android')
class SmoothnessGpuRasterizationPolymer(perf_benchmark.PerfBenchmark):
"""Measures rendering statistics for the Polymer cases with GPU rasterization.
"""
tag = 'gpu_rasterization'
test = smoothness.Smoothness
page_set = page_sets.PolymerPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.polymer'
class SmoothnessToughScrollingCases(perf_benchmark.PerfBenchmark):
test = smoothness.Smoothness
page_set = page_sets.ToughScrollingCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_scrolling_cases'
class SmoothnessImageDecodingCases(perf_benchmark.PerfBenchmark):
"""Measures decoding statistics for jpeg images.
"""
test = smoothness.Smoothness
page_set = page_sets.ImageDecodingCasesPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
options.AppendExtraBrowserArgs('--disable-accelerated-jpeg-decoding')
@classmethod
def Name(cls):
return 'smoothness.image_decoding_cases'
class SmoothnessGpuImageDecodingCases(perf_benchmark.PerfBenchmark):
"""Measures decoding statistics for jpeg images with GPU rasterization.
"""
tag = 'gpu_rasterization_and_decoding'
test = smoothness.Smoothness
page_set = page_sets.ImageDecodingCasesPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
# TODO(sugoi): Remove the following line once M41 goes stable
options.AppendExtraBrowserArgs('--enable-accelerated-jpeg-decoding')
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization_and_decoding.image_decoding_cases'
@benchmark.Enabled('android')
class SmoothnessPathologicalMobileSites(perf_benchmark.PerfBenchmark):
"""Measures task execution statistics while scrolling pathological sites.
"""
test = smoothness.Smoothness
page_set = page_sets.PathologicalMobileSitesPageSet
@classmethod
def Name(cls):
return 'smoothness.pathological_mobile_sites'
@benchmark.Enabled('android')
class SmoothnessSyncScrollPathologicalMobileSites(perf_benchmark.PerfBenchmark):
"""Measures task execution statistics while sync-scrolling pathological sites.
"""
tag = 'sync_scroll'
page_set = page_sets.PathologicalMobileSitesPageSet
test = smoothness.Smoothness
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForSyncScrolling(options)
@classmethod
def Name(cls):
return 'smoothness.sync_scroll.pathological_mobile_sites'
class SmoothnessToughAnimatedImageCases(perf_benchmark.PerfBenchmark):
test = smoothness.Smoothness
page_set = page_sets.ToughAnimatedImageCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_animated_image_cases'
|
|
from __future__ import unicode_literals
import boto.rds
import boto.vpc
from boto.exception import BotoServerError
import sure # noqa
from moto import mock_ec2, mock_rds
from tests.helpers import disable_on_py3
@disable_on_py3()
@mock_rds
def test_create_database():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2',
security_groups=["my_sg"])
database.status.should.equal('available')
database.id.should.equal("db-master-1")
database.allocated_storage.should.equal(10)
database.instance_class.should.equal("db.m1.small")
database.master_username.should.equal("root")
database.endpoint.should.equal(('db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306))
database.security_groups[0].name.should.equal('my_sg')
@disable_on_py3()
@mock_rds
def test_get_databases():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbinstances()).should.have.length_of(0)
conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
conn.create_dbinstance("db-master-2", 10, 'db.m1.small', 'root', 'hunter2')
list(conn.get_all_dbinstances()).should.have.length_of(2)
databases = conn.get_all_dbinstances("db-master-1")
list(databases).should.have.length_of(1)
databases[0].id.should.equal("db-master-1")
@mock_rds
def test_describe_non_existant_database():
conn = boto.rds.connect_to_region("us-west-2")
conn.get_all_dbinstances.when.called_with("not-a-db").should.throw(BotoServerError)
@disable_on_py3()
@mock_rds
def test_delete_database():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbinstances()).should.have.length_of(0)
conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
list(conn.get_all_dbinstances()).should.have.length_of(1)
conn.delete_dbinstance("db-master-1")
list(conn.get_all_dbinstances()).should.have.length_of(0)
@mock_rds
def test_delete_non_existant_database():
conn = boto.rds.connect_to_region("us-west-2")
conn.delete_dbinstance.when.called_with("not-a-db").should.throw(BotoServerError)
@mock_rds
def test_create_database_security_group():
conn = boto.rds.connect_to_region("us-west-2")
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
security_group.name.should.equal('db_sg')
security_group.description.should.equal("DB Security Group")
list(security_group.ip_ranges).should.equal([])
@mock_rds
def test_get_security_groups():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbsecurity_groups()).should.have.length_of(0)
conn.create_dbsecurity_group('db_sg1', 'DB Security Group')
conn.create_dbsecurity_group('db_sg2', 'DB Security Group')
list(conn.get_all_dbsecurity_groups()).should.have.length_of(2)
databases = conn.get_all_dbsecurity_groups("db_sg1")
list(databases).should.have.length_of(1)
databases[0].name.should.equal("db_sg1")
@mock_rds
def test_get_non_existant_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.get_all_dbsecurity_groups.when.called_with("not-a-sg").should.throw(BotoServerError)
@mock_rds
def test_delete_database_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.create_dbsecurity_group('db_sg', 'DB Security Group')
list(conn.get_all_dbsecurity_groups()).should.have.length_of(1)
conn.delete_dbsecurity_group("db_sg")
list(conn.get_all_dbsecurity_groups()).should.have.length_of(0)
@mock_rds
def test_delete_non_existant_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.delete_dbsecurity_group.when.called_with("not-a-db").should.throw(BotoServerError)
@disable_on_py3()
@mock_rds
def test_security_group_authorize():
conn = boto.rds.connect_to_region("us-west-2")
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
list(security_group.ip_ranges).should.equal([])
security_group.authorize(cidr_ip='10.3.2.45/32')
security_group = conn.get_all_dbsecurity_groups()[0]
list(security_group.ip_ranges).should.have.length_of(1)
security_group.ip_ranges[0].cidr_ip.should.equal('10.3.2.45/32')
@disable_on_py3()
@mock_rds
def test_add_security_group_to_database():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
database.modify(security_groups=[security_group])
database = conn.get_all_dbinstances()[0]
list(database.security_groups).should.have.length_of(1)
database.security_groups[0].name.should.equal("db_sg")
@mock_ec2
@mock_rds
def test_add_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet1 = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
subnet2 = vpc_conn.create_subnet(vpc.id, "10.2.0.0/24")
subnet_ids = [subnet1.id, subnet2.id]
conn = boto.rds.connect_to_region("us-west-2")
subnet_group = conn.create_db_subnet_group("db_subnet", "my db subnet", subnet_ids)
subnet_group.name.should.equal('db_subnet')
subnet_group.description.should.equal("my db subnet")
list(subnet_group.subnet_ids).should.equal(subnet_ids)
@mock_ec2
@mock_rds
def test_describe_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
conn.create_db_subnet_group("db_subnet2", "my db subnet", [subnet.id])
list(conn.get_all_db_subnet_groups()).should.have.length_of(2)
list(conn.get_all_db_subnet_groups("db_subnet1")).should.have.length_of(1)
conn.get_all_db_subnet_groups.when.called_with("not-a-subnet").should.throw(BotoServerError)
@mock_ec2
@mock_rds
def test_delete_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
list(conn.get_all_db_subnet_groups()).should.have.length_of(1)
conn.delete_db_subnet_group("db_subnet1")
list(conn.get_all_db_subnet_groups()).should.have.length_of(0)
conn.delete_db_subnet_group.when.called_with("db_subnet1").should.throw(BotoServerError)
@disable_on_py3()
@mock_ec2
@mock_rds
def test_create_database_in_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small',
'root', 'hunter2', db_subnet_group_name="db_subnet1")
database = conn.get_all_dbinstances("db-master-1")[0]
database.subnet_group.name.should.equal("db_subnet1")
@disable_on_py3()
@mock_rds
def test_create_database_replica():
conn = boto.rds.connect_to_region("us-west-2")
primary = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
replica = conn.create_dbinstance_read_replica("replica", "db-master-1", "db.m1.small")
replica.id.should.equal("replica")
replica.instance_class.should.equal("db.m1.small")
status_info = replica.status_infos[0]
status_info.normal.should.equal(True)
status_info.status_type.should.equal('read replication')
status_info.status.should.equal('replicating')
primary = conn.get_all_dbinstances("db-master-1")[0]
primary.read_replica_dbinstance_identifiers[0].should.equal("replica")
conn.delete_dbinstance("replica")
primary = conn.get_all_dbinstances("db-master-1")[0]
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
@disable_on_py3()
@mock_rds
def test_create_cross_region_database_replica():
west_1_conn = boto.rds.connect_to_region("us-west-1")
west_2_conn = boto.rds.connect_to_region("us-west-2")
primary = west_1_conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
primary_arn = "arn:aws:rds:us-west-1:1234567890:db:db-master-1"
replica = west_2_conn.create_dbinstance_read_replica(
"replica",
primary_arn,
"db.m1.small",
)
primary = west_1_conn.get_all_dbinstances("db-master-1")[0]
primary.read_replica_dbinstance_identifiers[0].should.equal("replica")
replica = west_2_conn.get_all_dbinstances("replica")[0]
replica.instance_class.should.equal("db.m1.small")
west_2_conn.delete_dbinstance("replica")
primary = west_1_conn.get_all_dbinstances("db-master-1")[0]
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
@disable_on_py3()
@mock_rds
def test_connecting_to_us_east_1():
# boto does not use us-east-1 in the URL for RDS,
# and that broke moto in the past:
# https://github.com/boto/boto/blob/e271ff09364ea18d9d8b6f4d63d6b0ac6cbc9b75/boto/endpoints.json#L285
conn = boto.rds.connect_to_region("us-east-1")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2',
security_groups=["my_sg"])
database.status.should.equal('available')
database.id.should.equal("db-master-1")
database.allocated_storage.should.equal(10)
database.instance_class.should.equal("db.m1.small")
database.master_username.should.equal("root")
database.endpoint.should.equal(('db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com', 3306))
database.security_groups[0].name.should.equal('my_sg')
@disable_on_py3()
@mock_rds
def test_create_database_with_iops():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', iops=6000)
database.status.should.equal('available')
database.iops.should.equal(6000)
# boto>2.36.0 may change the following property name to `storage_type`
database.StorageType.should.equal('io1')
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for the internal ops used by tfdbg v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.compat import compat
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import debug_events_writer
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_debug_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class DebugIdentityV2OpTest(dumping_callback_test_lib.DumpingCallbackTestBase):
"""Tests for DebugIdentityV2Op: when DebugEventsWriter is initialized.
DebugEventsWriter being initialized prior to DebugIdentityV2 ops being invoked
for the first time is the typical case (e.g., tfdbg2 running on a local
machine with only local devices.)
"""
def setUp(self):
super(DebugIdentityV2OpTest, self).setUp()
# Testing using a small circular-buffer size.
self.circular_buffer_size = 4
self.tfdbg_run_id = "test_tfdbg_run"
self.writer = debug_events_writer.DebugEventsWriter(
self.dump_root, self.tfdbg_run_id, self.circular_buffer_size)
def tearDown(self):
self.writer.Close()
super(DebugIdentityV2OpTest, self).tearDown()
@test_util.run_in_graph_and_eager_modes
def testSingleTensorFullTensorDebugModeWithCircularBufferBehavior(self):
@def_function.function
def write_debug_trace(x):
square = math_ops.square(x)
gen_debug_ops.debug_identity_v2(
square,
tfdbg_context_id="deadbeaf",
op_name="Square",
output_slot=0,
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root])
sqrt = math_ops.sqrt(x)
gen_debug_ops.debug_identity_v2(
sqrt,
tfdbg_context_id="beafdead",
op_name="Sqrt",
output_slot=0,
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root])
return square + sqrt
x = np.array([3.0, 4.0])
# Only the graph-execution trace of the last iteration should be written
# to self.dump_root.
for _ in range(self.circular_buffer_size // 2 + 1):
self.assertAllClose(
write_debug_trace(x), [9.0 + np.sqrt(3.0), 16.0 + 2.0])
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
# Check that the .metadata DebugEvents data file has been created, even
# before FlushExecutionFiles() is called.
self.assertGreater(reader.starting_wall_time(), 0)
self.assertTrue(reader.tensorflow_version())
self.assertTrue(reader.tfdbg_file_version().startswith("debug.Event"))
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
# Before FlushExecutionFiles() is called, the .graph_execution_traces file
# ought to be empty.
with self.assertRaises(StopIteration):
next(graph_trace_iter)
# Flush the circular buffer.
self.writer.FlushExecutionFiles()
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
# The circular buffer has a size of 4. So only the data from the
# last two iterations should have been written to self.dump_root.
for _ in range(2):
debug_event = next(graph_trace_iter).debug_event
self.assertGreater(debug_event.wall_time, 0)
trace = debug_event.graph_execution_trace
self.assertEqual(trace.tfdbg_context_id, "deadbeaf")
self.assertEqual(trace.op_name, "Square")
self.assertEqual(trace.output_slot, 0)
self.assertEqual(trace.tensor_debug_mode,
debug_event_pb2.TensorDebugMode.FULL_TENSOR)
tensor_value = tensor_util.MakeNdarray(trace.tensor_proto)
self.assertAllClose(tensor_value, [9.0, 16.0])
debug_event = next(graph_trace_iter).debug_event
self.assertGreater(debug_event.wall_time, 0)
trace = debug_event.graph_execution_trace
self.assertEqual(trace.tfdbg_context_id, "beafdead")
self.assertEqual(trace.op_name, "Sqrt")
self.assertEqual(trace.output_slot, 0)
self.assertEqual(trace.tensor_debug_mode,
debug_event_pb2.TensorDebugMode.FULL_TENSOR)
tensor_value = tensor_util.MakeNdarray(trace.tensor_proto)
self.assertAllClose(tensor_value, [np.sqrt(3.0), 2.0])
# Only the graph-execution trace of the last iteration should be written
# to self.dump_root.
with self.assertRaises(StopIteration):
next(graph_trace_iter)
@test_util.run_in_graph_and_eager_modes
def testControlFlow(self):
@def_function.function
def collatz(x):
counter = constant_op.constant(0, dtype=dtypes.int32)
while math_ops.greater(x, 1):
counter = counter + 1
gen_debug_ops.debug_identity_v2(
x,
tfdbg_context_id="deadbeaf",
op_name="x",
output_slot=0,
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root])
if math_ops.equal(x % 2, 0):
x = math_ops.div(x, 2)
else:
x = x * 3 + 1
return counter
x = constant_op.constant(10, dtype=dtypes.int32)
self.evaluate(collatz(x))
self.writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
try:
x_values = []
timestamp = 0
while True:
debug_event = next(graph_trace_iter).debug_event
self.assertGreater(debug_event.wall_time, timestamp)
timestamp = debug_event.wall_time
trace = debug_event.graph_execution_trace
self.assertEqual(trace.tfdbg_context_id, "deadbeaf")
self.assertEqual(trace.op_name, "x")
self.assertEqual(trace.output_slot, 0)
self.assertEqual(trace.tensor_debug_mode,
debug_event_pb2.TensorDebugMode.FULL_TENSOR)
x_values.append(int(tensor_util.MakeNdarray(trace.tensor_proto)))
except StopIteration:
pass
# Due to the circular buffer, only the last 4 iterations of
# [10, 5, 16, 8, 4, 2] should have been written.
self.assertAllEqual(x_values, [16, 8, 4, 2])
@test_util.run_in_graph_and_eager_modes
def testTwoDumpRoots(self):
another_dump_root = os.path.join(self.dump_root, "another")
another_debug_url = "file://%s" % another_dump_root
another_writer = debug_events_writer.DebugEventsWriter(
another_dump_root, "test_tfdbg_run")
@def_function.function
def write_debug_trace(x):
# DebugIdentityV2 is a stateful op. It ought to be included by auto
# control dependency.
square = math_ops.square(x)
gen_debug_ops.debug_identity_v2(
square,
tfdbg_context_id="deadbeaf",
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root, another_debug_url])
return square + 1.0
x = np.array([3.0, 4.0])
self.assertAllClose(write_debug_trace(x), np.array([10.0, 17.0]))
self.writer.FlushExecutionFiles()
another_writer.FlushExecutionFiles()
another_writer.Close()
for debug_root in (self.dump_root, another_dump_root):
with debug_events_reader.DebugEventsReader(debug_root) as reader:
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
debug_event = next(graph_trace_iter).debug_event
trace = debug_event.graph_execution_trace
self.assertEqual(trace.tfdbg_context_id, "deadbeaf")
self.assertEqual(trace.op_name, "")
self.assertEqual(trace.tensor_debug_mode,
debug_event_pb2.TensorDebugMode.FULL_TENSOR)
tensor_value = tensor_util.MakeNdarray(trace.tensor_proto)
self.assertAllClose(tensor_value, [9.0, 16.0])
with self.assertRaises(StopIteration):
next(graph_trace_iter)
class DebugIdentityV2OpUninitializedWriterTest(
dumping_callback_test_lib.DumpingCallbackTestBase):
"""Tests for DebugIdentityV2Op: when DebugEventsWriter is not initialized.
This case can occur when DebugIdentityV2Ops are running on a remote
TensorFlow server (e.g., a TPU worker).
"""
@test_util.run_in_graph_and_eager_modes
def testInvokingDebugIdentityV2OpBeforeCreatingDebugEventsWriterWorks(self):
if not compat.forward_compatible(2020, 6, 24):
self.skipTest("Functionality currently not supported.")
circular_buffer_size = 3
@def_function.function
def write_debug_trace(x):
# DebugIdentityV2 is a stateful op. It ought to be included by auto
# control dependency.
square = math_ops.square(x)
gen_debug_ops.debug_identity_v2(
square,
tfdbg_context_id="deadbeaf",
op_name="Square",
output_slot=0,
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root],
circular_buffer_size=circular_buffer_size)
return square
# The DebugIdentityV2 ops are invokes *before* a DebugEventsWriter at the
# same dump root is created.
for i in range(circular_buffer_size * 2):
self.assertAllClose(
write_debug_trace(np.array([i]).astype(np.float32)), [i**2.0])
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
"test_tfdbg_run",
circular_buffer_size)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
graph_execution_traces = []
while True:
try:
graph_execution_traces.append(
next(graph_trace_iter).debug_event.graph_execution_trace)
except StopIteration:
break
self.assertLen(graph_execution_traces, circular_buffer_size)
for i in range(circular_buffer_size):
self.assertAllClose(
tensor_util.MakeNdarray(graph_execution_traces[i].tensor_proto),
[(i + circular_buffer_size)**2.0])
class DebugNumericSummaryV2Test(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpReduceInfNanThreeSlots(self):
def debug_summary(x):
return self.evaluate(gen_debug_ops.debug_numeric_summary_v2(
x, tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.REDUCE_INF_NAN_THREE_SLOTS)))
self.assertAllEqual(
debug_summary(constant_op.constant([])), [0.0, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant(42.0)), [0.0, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant([3.0, 4.0])), [0.0, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant(np.array([3.0, -np.inf]))),
[-np.inf, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant(np.array([[0, 0], [np.nan, 0]]))),
[0.0, 0.0, np.nan])
self.assertAllEqual(
debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, np.inf]]))),
[0.0, np.inf, np.nan])
self.assertAllEqual(
debug_summary(
constant_op.constant(np.array([[0, np.inf], [np.nan, -np.inf]]))),
[-np.inf, np.inf, np.nan])
x = np.zeros([100, 100], dtype=np.float16)
x[32, 47] = np.nan
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [0.0, 0.0, np.nan])
x = np.zeros([97, 97], dtype=np.float32)
x[50, 83] = -np.inf
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [-np.inf, 0.0, 0.0])
x[1, 41] = np.nan
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [-np.inf, 0.0, np.nan])
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [0.0, 0.0, np.nan])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpLargeTensorIDError(self):
modes = [
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH,
debug_event_pb2.TensorDebugMode.SHAPE,
]
# Maximum allowed tensor_id
tensor_id = np.power(2, 53, dtype=np.int64)
for mode in modes:
self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
constant_op.constant(42.0),
tensor_debug_mode=mode,
tensor_id=tensor_id,
output_dtype=dtypes.float64))
# Incrementing by one should error
tensor_id += 1
for mode in modes:
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
constant_op.constant(42.0),
tensor_debug_mode=mode,
tensor_id=tensor_id,
output_dtype=dtypes.float64))
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpCurtHealthValuesSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.CURT_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant([]))
self.assertAllEqual(tensor, [tensor_id, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant(42.0))
self.assertAllEqual(tensor, [tensor_id, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant([3.0, 4.0]))
self.assertAllEqual(tensor, [tensor_id, 0.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([3.0, -np.inf])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, 0]])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, np.inf], [np.nan, -np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpCurtHealthValuesLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.CURT_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([100, 100], dtype=np.float16)
x[32, 47] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
x = np.zeros([97, 97], dtype=np.float32)
x[50, 83] = -np.inf
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
x[1, 41] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpCurtHealthConsistency(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.CURT_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([100, 100], dtype=np.float16)
x[43, 99] = np.nan
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
x = np.zeros([100, 100, 50], dtype=np.float64)
x[0, 0, 1] = np.inf
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
c = constant_op.constant(np.ones((100, 200), np.double))
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpConciseHealthSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant([]))
self.assertAllEqual(tensor, [tensor_id, 0.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant(42.0))
self.assertAllEqual(tensor, [tensor_id, 1.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant([3.0, 4.0]))
self.assertAllEqual(tensor, [tensor_id, 2.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([3.0, -np.inf])))
self.assertAllEqual(tensor, [tensor_id, 2.0, 1.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, 0]])))
self.assertAllEqual(tensor, [tensor_id, 4.0, 0.0, 0.0, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 4.0, 0.0, 1.0, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, np.inf], [np.nan, -np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 4.0, 1.0, 1.0, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpConciseHealthLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([100, 100], dtype=np.float16)
x[32, :] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 10000.0, 0.0, 0.0, 100.0])
x = np.zeros([97, 97], dtype=np.float32)
x[50, 83:85] = -np.inf
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 97 * 97, 2.0, 0.0, 0.0])
x[1:9, 41] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 97 * 97, 2.0, 0.0, 8.0])
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 9701, 0.0, 0.0, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpConciseHealthConsistency(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
# Assert the same op is returns a consistent value
x = np.zeros([100, 100], dtype=np.float16)
x[3, 4] = -np.inf
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
c = constant_op.constant(np.ones((100, 200), np.double))
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpShapeEmpty(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.SHAPE),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant(0.0))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpShapeSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.SHAPE),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([3, 4], dtype=np.float32)
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 2.0, 12.0, 3.0, 4.0, 0.0, 0.0, 0.0, 0.0])
x = np.ones([1, 2, 3, 4, 5, 6], dtype=np.float16)
x[0, 1, 2, 2, 2, 2] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(
tensor,
[tensor_id, 19, 6.0, 2 * 3 * 4 * 5 * 6, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x = np.zeros([2], dtype=np.float32)
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 1.0, 2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant([]))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpShapeLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.SHAPE),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.ones([1, 2, 3, 4, 5, 6, 7], dtype=np.double)
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [
tensor_id, 2.0, 7.0, 2 * 3 * 4 * 5 * 6 * 7, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0
])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpFullHealthSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.FULL_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant([]))
expected = [tensor_id, -1, 1, 1, 0, 0, 0, 0, 0, 0, 0]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(constant_op.constant(42.0))
expected = [tensor_id, -1, 1, 0, 1, 0, 0, 0, 0, 0, 1]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(constant_op.constant([3.0, 4.0]))
expected = [tensor_id, -1, 1, 1, 2, 0, 0, 0, 0, 0, 2]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([3, -np.inf], dtype=np.float32)))
expected = [tensor_id, -1, 1, 1, 2, 1, 0, 0, 0, 0, 1]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, 0]], dtype=np.float64)))
expected = [tensor_id, -1, 2, 2, 4, 0, 0, 1, 0, 3, 0]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(
np.array([[0, 0], [np.nan, np.inf]], dtype=np.float16)))
expected = [tensor_id, -1, 19, 2, 4, 0, 1, 1, 0, 2, 0]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(
np.array([[0, np.inf], [np.nan, -np.inf]], dtype=np.float32)))
expected = [tensor_id, -1, 1, 2, 4, 1, 1, 1, 0, 1, 0]
self.assertAllEqual(tensor, expected)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpFullHealthLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.FULL_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
def tensor_counts(arr):
counts = [len(np.shape(arr)), np.size(arr), 0, 0, 0, 0, 0, 0]
for n in np.ravel(arr):
if np.isneginf(n):
counts[2] += 1
elif np.isposinf(n):
counts[3] += 1
elif np.isnan(n):
counts[4] += 1
elif n < 0.:
counts[5] += 1
elif n == 0.:
counts[6] += 1
else:
counts[7] += 1
return counts
x = np.zeros([50, 50], dtype=np.float16)
x[32, 47] = np.nan
x[0:4, 3] = np.inf
x[40:50, 40:50] = 10
x[3, 20] = -10
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [tensor_id, -1, 19] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
x = np.ones([25, 25, 50], dtype=np.float32) * np.inf
x[:, :, 1] = np.nan
x[:, :, 2] = -np.inf
x[:, :, 3] = -1
x[:, :, 4] = 0
x[:, :, 5] = 1
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [tensor_id, -1, 1] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
x[0, 0, 0] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [tensor_id, -1, 1,] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [tensor_id, -1, 2] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpFullHealthConsistency(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.FULL_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
# Assert the same op is returns a consistent value
x = np.zeros([100, 100], dtype=np.float16)
x[32, 47] = np.nan
x[0:4, 3] = np.inf
x[90:100, 90:100] = 10
x[3, 20] = -10
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
x = np.ones((100, 200, 3, 10), np.double)
x[1, 30, 2] = 10
x[5, :, 0, 1] = np.nan
x[90:100, 150, :, :] = np.inf
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
def testCheckNumericsV2OpNegativeAndPositiveInf(self):
"""Test that CheckNumericsV2 op distinguishes negative and positive infs."""
with self.session(graph=ops.Graph()):
t1 = constant_op.constant([-1.0, 1.0])
t2 = constant_op.constant([0.0, 0.0])
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"pass through test.*had -Inf and \+Inf values"):
self.evaluate(
array_ops.check_numerics_v2(t1 / t2, message="pass through test"))
def testCheckNumericsV2OpNegativeAndPositiveInfAndNaN(self):
"""CheckNumericsV2 op distinguishes - & + infs when nan is present."""
with self.session(graph=ops.Graph()):
t1 = constant_op.constant([-1.0, 1.0, 0.0])
t2 = constant_op.constant([0.0, 0.0, 0.0])
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"pass through test.*had -Inf, \+Inf, and NaN values"):
self.evaluate(
array_ops.check_numerics_v2(t1 / t2, message="pass through test"))
def testCheckNumericsV2PositiveInfAndNaN(self):
"""Test that CheckNumericsV2 op shows sign of inf when nan is present."""
with self.session(graph=ops.Graph()):
t1 = constant_op.constant([0.0, 1.0])
t2 = constant_op.constant([0.0, 0.0])
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"pass through test.*had \+Inf and NaN values"):
self.evaluate(
array_ops.check_numerics_v2(t1 / t2, message="pass through test"))
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
|
|
from functools import partial
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from drivers.drivers_base import BaseSeleniumBrowser, DriverChoices, BaseRequests
class SeleniumPhantom(BaseSeleniumBrowser):
'''
'''
def __init__(self):
'''
'''
super().__init__('PhantomJS')
self._headers={}
self._header_name=''
def phantom_command(self):
'''
'''
script_for_status="""
this.onResourceReceived = function(request) {
this.request_response=request
}.bind(this);
"""
#add phantomjs execute endpoint
phantom_exc_uri='/session/$sessionId/phantom/execute'
cmds=self.browser.command_executor._commands
cmds['executePhantomScript'] = ('POST', phantom_exc_uri)
self.browser.execute('executePhantomScript',
{'script': script_for_status, 'args': []})
def driver_script(self, script, args=[]):
'''
run scripts with phantom internal
'''
return self.phantom_call({'script': script, 'args': args})
def set_header(self, confObject):
'''
'''
headersObj=[h for h in confObject.driver.headers.all()]
if not len(headersObj):return
self._headers={h.field_name:h.field_value
for h in headersObj
#Accept-Encoding - avoid phantom bug
if h.field_name not in ['Accept-Encoding']}
self._header_name=headersObj[0].header_name
header_scrit="""
this.customHeaders = {headers};
""".format(headers=str(self._headers))
self.driver_script(header_scrit)
def load_confs(self, confObject):
'''
'''
#prepare phantomjs driver call
self.phantom_command()
self.phantom_call=partial(self.browser.execute, 'executePhantomScript')
#load headers
self.set_header(confObject)
#specific confs
self.browser.set_window_size(1124, 850)
self.pid=self.browser.service.process.pid
def get_headers(self):
'''
** Cookie from response + Request headers **
'''
cookie_script="""
return this.cookies;
"""
if 'Cookie' in self._headers:return self._headers
cookies=self.driver_script(cookie_script)['value']
cookie_string=' ;'.join(['{}={}'.format(c['name'],c['value'])
for c in cookies])
self._headers.update({'Cookie':cookie_string})
return self._headers
def xpathToaction(self, xpathSelector):
"""
"""
return self.browser.find_elements_by_xpath(xpathSelector)
class SeleniumRC(BaseSeleniumBrowser):
'''
'''
def __init__(self):
'''
'''
super().__init__('Remote')
self._port=4444
self._host='127.0.0.1'
self._command_executor=None
self._exec_str='http://{host}:{port}/wd/hub'
self._remote_type=DesiredCapabilities.FIREFOX
def load_confs(self, confObject):
'''
'''
if confObject.driver.port:
self._port=confObject.driver.port
if confObject.driver.host:
self._host=confObject.driver.host
if confObject.driver.remote_browser_type:
rbt=confObject.driver.remote_browser_type.upper()
self._remote_type=getattr(DesiredCapabilities, rbt)
self._command_executor=self._exec_str.format(host=self._host,
port=self._port)
def build_driver(self, proxy_port=None):
'''
'''
if proxy_port:
raise NotImplemented('[-] Proxy not working \
with remote server yet')
if not self._command_executor:
self._command_executor=self._exec_str.format(host=self._host,
port=self._port)
self.browser=getattr(self._driver, self._driver_name)(
command_executor=self._command_executor,
desired_capabilities=self._remote_type)
class LeanRequests(BaseRequests):
'''
'''
def __init__(self):
'''
'''
super().__init__()
self._headers={}
self._header_name=''
def load_confs(self, confObject):
'''
'''
headersObj=[h for h in confObject.driver.headers.all()]
if not len(headersObj):return
headers={h.field_name:h.field_value
for h in headersObj}
self.set_header(**headers)
self._header_name=headersObj[0].header_name
def set_header(self, **kwargs):
'''
'''
self._headers=kwargs
class SeleniumChrome(BaseSeleniumBrowser):
'''
'''
def __init__(self):
'''
'''
options = webdriver.ChromeOptions()
options.add_argument("headless")
super().__init__('Chrome', chrome_options=options)
self._headers={}
self._header_name=''
def set_header(self, confObject):
'''
'''
headersObj=[h for h in confObject.driver.headers.all()]
if not len(headersObj):return
self._headers={h.field_name:h.field_value
for h in headersObj
#Accept-Encoding - avoid phantom bug
if h.field_name not in ['Accept-Encoding']}
self._header_name=headersObj[0].header_name
header_scrit="""
this.customHeaders = {headers};
""".format(headers=str(self._headers))
self.driver_script(header_scrit)
def load_confs(self, confObject):
'''
'''
#load headers
self.set_header(confObject)
#specific confs
self.browser.set_window_size(1124, 850)
self.pid=self.browser.service.process.pid
def xpathToaction(self, xpathSelector):
"""
"""
return self.browser.find_elements_by_xpath(xpathSelector)
DriverChoices.register(SeleniumPhantom)
DriverChoices.register(LeanRequests)
DriverChoices.register(SeleniumRC)
DriverChoices.register(SeleniumChrome)
|
|
"""
Various data structures used in query construction.
Factored out from django.db.models.query to avoid making the main module very
large and/or so that they can be used by other modules without getting into
circular import difficulties.
"""
from __future__ import unicode_literals
from collections import namedtuple
from django.apps import apps
from django.core.exceptions import FieldDoesNotExist
from django.db.backends import utils
from django.db.models.constants import LOOKUP_SEP
from django.utils import tree
# PathInfo is used when converting lookups (fk__somecol). The contents
# describe the relation in Model terms (model Options and Fields for both
# sides of the relation. The join_field is the field backing the relation.
PathInfo = namedtuple('PathInfo', 'from_opts to_opts target_fields join_field m2m direct')
class InvalidQuery(Exception):
"""
The query passed to raw isn't a safe query to use with raw.
"""
pass
class QueryWrapper(object):
"""
A type that indicates the contents are an SQL fragment and the associate
parameters. Can be used to pass opaque data to a where-clause, for example.
"""
contains_aggregate = False
def __init__(self, sql, params):
self.data = sql, list(params)
def as_sql(self, compiler=None, connection=None):
return self.data
class Q(tree.Node):
"""
Encapsulates filters as objects that can then be combined logically (using
& and |).
"""
# Connection types
AND = 'AND'
OR = 'OR'
default = AND
def __init__(self, *args, **kwargs):
super(Q, self).__init__(children=list(args) + list(kwargs.items()))
def _combine(self, other, conn):
if not isinstance(other, Q):
raise TypeError(other)
obj = type(self)()
obj.connector = conn
obj.add(self, conn)
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __invert__(self):
obj = type(self)()
obj.add(self, self.AND)
obj.negate()
return obj
def clone(self):
clone = self.__class__._new_instance(
children=[], connector=self.connector, negated=self.negated)
for child in self.children:
if hasattr(child, 'clone'):
clone.children.append(child.clone())
else:
clone.children.append(child)
return clone
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# We must promote any new joins to left outer joins so that when Q is
# used as an expression, rows aren't filtered due to joins.
joins_before = query.tables[:]
clause, joins = query._add_q(self, reuse, allow_joins=allow_joins, split_subq=False)
joins_to_promote = [j for j in joins if j not in joins_before]
query.promote_joins(joins_to_promote)
return clause
@classmethod
def _refs_aggregate(cls, obj, existing_aggregates):
if not isinstance(obj, tree.Node):
aggregate, aggregate_lookups = refs_aggregate(obj[0].split(LOOKUP_SEP), existing_aggregates)
if not aggregate and hasattr(obj[1], 'refs_aggregate'):
return obj[1].refs_aggregate(existing_aggregates)
return aggregate, aggregate_lookups
for c in obj.children:
aggregate, aggregate_lookups = cls._refs_aggregate(c, existing_aggregates)
if aggregate:
return aggregate, aggregate_lookups
return False, ()
def refs_aggregate(self, existing_aggregates):
if not existing_aggregates:
return False
return self._refs_aggregate(self, existing_aggregates)
class DeferredAttribute(object):
"""
A wrapper for a deferred-loading field. When the value is read from this
object the first time, the query is executed.
"""
def __init__(self, field_name, model):
self.field_name = field_name
def __get__(self, instance, owner):
"""
Retrieves and caches the value from the datastore on the first lookup.
Returns the cached value.
"""
non_deferred_model = instance._meta.proxy_for_model
opts = non_deferred_model._meta
assert instance is not None
data = instance.__dict__
if data.get(self.field_name, self) is self:
# self.field_name is the attname of the field, but only() takes the
# actual name, so we need to translate it here.
try:
f = opts.get_field(self.field_name)
except FieldDoesNotExist:
f = [f for f in opts.fields if f.attname == self.field_name][0]
name = f.name
# Let's see if the field is part of the parent chain. If so we
# might be able to reuse the already loaded value. Refs #18343.
val = self._check_parent_chain(instance, name)
if val is None:
instance.refresh_from_db(fields=[self.field_name])
val = getattr(instance, self.field_name)
data[self.field_name] = val
return data[self.field_name]
def __set__(self, instance, value):
"""
Deferred loading attributes can be set normally (which means there will
never be a database lookup involved.
"""
instance.__dict__[self.field_name] = value
def _check_parent_chain(self, instance, name):
"""
Check if the field value can be fetched from a parent field already
loaded in the instance. This can be done if the to-be fetched
field is a primary key field.
"""
opts = instance._meta
f = opts.get_field(name)
link_field = opts.get_ancestor_link(f.model)
if f.primary_key and f != link_field:
return getattr(instance, link_field.attname)
return None
def select_related_descend(field, restricted, requested, load_fields, reverse=False):
"""
Returns True if this field should be used to descend deeper for
select_related() purposes. Used by both the query construction code
(sql.query.fill_related_selections()) and the model instance creation code
(query.get_klass_info()).
Arguments:
* field - the field to be checked
* restricted - a boolean field, indicating if the field list has been
manually restricted using a requested clause)
* requested - The select_related() dictionary.
* load_fields - the set of fields to be loaded on this model
* reverse - boolean, True if we are checking a reverse select related
"""
if not field.remote_field:
return False
if field.remote_field.parent_link and not reverse:
return False
if restricted:
if reverse and field.related_query_name() not in requested:
return False
if not reverse and field.name not in requested:
return False
if not restricted and field.null:
return False
if load_fields:
if field.attname not in load_fields:
if restricted and field.name in requested:
raise InvalidQuery("Field %s.%s cannot be both deferred"
" and traversed using select_related"
" at the same time." %
(field.model._meta.object_name, field.name))
return False
return True
# This function is needed because data descriptors must be defined on a class
# object, not an instance, to have any effect.
def deferred_class_factory(model, attrs):
"""
Returns a class object that is a copy of "model" with the specified "attrs"
being replaced with DeferredAttribute objects. The "pk_value" ties the
deferred attributes to a particular instance of the model.
"""
if not attrs:
return model
# Never create deferred models based on deferred model
if model._deferred:
# Deferred models are proxies for the non-deferred model. We never
# create chains of defers => proxy_for_model is the non-deferred
# model.
model = model._meta.proxy_for_model
# The app registry wants a unique name for each model, otherwise the new
# class won't be created (we get an exception). Therefore, we generate
# the name using the passed in attrs. It's OK to reuse an existing class
# object if the attrs are identical.
name = "%s_Deferred_%s" % (model.__name__, '_'.join(sorted(attrs)))
name = utils.truncate_name(name, 80, 32)
try:
return apps.get_model(model._meta.app_label, name)
except LookupError:
class Meta:
proxy = True
app_label = model._meta.app_label
overrides = {attr: DeferredAttribute(attr, model) for attr in attrs}
overrides["Meta"] = Meta
overrides["__module__"] = model.__module__
overrides["_deferred"] = True
return type(str(name), (model,), overrides)
# The above function is also used to unpickle model instances with deferred
# fields.
deferred_class_factory.__safe_for_unpickling__ = True
def refs_aggregate(lookup_parts, aggregates):
"""
A helper method to check if the lookup_parts contains references
to the given aggregates set. Because the LOOKUP_SEP is contained in the
default annotation names we must check each prefix of the lookup_parts
for a match.
"""
for n in range(len(lookup_parts) + 1):
level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n])
if level_n_lookup in aggregates and aggregates[level_n_lookup].contains_aggregate:
return aggregates[level_n_lookup], lookup_parts[n:]
return False, ()
def refs_expression(lookup_parts, annotations):
"""
A helper method to check if the lookup_parts contains references
to the given annotations set. Because the LOOKUP_SEP is contained in the
default annotation names we must check each prefix of the lookup_parts
for a match.
"""
for n in range(len(lookup_parts) + 1):
level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n])
if level_n_lookup in annotations and annotations[level_n_lookup]:
return annotations[level_n_lookup], lookup_parts[n:]
return False, ()
|
|
# Thanks to Zhao Yu for converting the .ipynb notebook to
# this simplified Python script that I edited a little.
# Note that the dataset must be already downloaded for this script to work, do:
# $ cd data/
# $ python download_dataset.py
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import metrics
import os
if __name__ == "__main__":
# -----------------------------
# step1: load and prepare data
# -----------------------------
# Those are separate normalised input features for the neural network
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATA_PATH = "data/"
DATASET_PATH = DATA_PATH + "UCI HAR Dataset/"
print("\n" + "Dataset is now located at: " + DATASET_PATH)
# Preparing data set:
TRAIN = "train/"
TEST = "test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(' ', ' ').strip().split(' ') for row in file
]]
)
file.close()
"""Examples
--------
>> > x = np.arange(4).reshape((2, 2))
>> > x
array([[0, 1],
[2, 3]])
>> > np.transpose(x)
array([[0, 2],
[1, 3]])
>> > x = np.ones((1, 2, 3))
>> > np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
X_train = load_X(X_train_signals_paths) # [7352, 128, 9]
X_test = load_X(X_test_signals_paths) # [7352, 128, 9]
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 128
print(len(X_train[0][0])) # 9
print(type(X_train))
X_train = np.reshape(X_train, [-1, 32, 36])
X_test = np.reshape(X_test, [-1, 32, 36])
print("-----------------X_train---------------")
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 32
print(len(X_train[0][0])) # 36
print(type(X_train))
# exit()
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
def one_hot(label):
"""convert label from dense to one hot
argument:
label: ndarray dense label ,shape: [sample_num,1]
return:
one_hot_label: ndarray one hot, shape: [sample_num,n_class]
"""
label_num = len(label)
new_label = label.reshape(label_num) # shape : [sample_num]
# because max is 5, and we will create 6 columns
n_values = np.max(new_label) + 1
return np.eye(n_values)[np.array(new_label, dtype=np.int32)]
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# Subtract 1 to each output class for friendly 0-based indexing
return y_ - 1
y_train = one_hot(load_y(y_train_path))
y_test = one_hot(load_y(y_test_path))
print("---------y_train----------")
# print(y_train)
print(len(y_train)) # 7352
print(len(y_train[0])) # 6
# exit()
# -----------------------------------
# step2: define parameters for model
# -----------------------------------
class Config(object):
"""
define a class to store parameters,
the input should be feature mat of training and testing
"""
def __init__(self, X_train, X_test):
# Input data
self.train_count = len(X_train) # 7352 training series
self.test_data_count = len(X_test) # 2947 testing series
self.n_steps = len(X_train[0]) # 128 time_steps per series
# Training
self.learning_rate = 0.0025 # 1e-4
self.lambda_loss_amount = 0.0015
self.training_epochs = 300
self.batch_size = 640
# LSTM structure
self.n_inputs = len(X_train[0][0]) # Features count is of 9: three 3D sensors features over time
self.n_hidden = 32 # nb of neurons inside the neural network
self.n_classes = 6 # Final output classes
self.W = {
'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])), # [9, 32]
'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes])) # [32, 6]
# 'output': tf.Variable(tf.random_normal([4, self.n_classes])) # [32, 6]
}
self.biases = {
'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)), # [32]
# 'hidden': tf.Variable(tf.random_normal([16], mean=1.0)), # [32]
'output': tf.Variable(tf.random_normal([self.n_classes])) # [6]
}
config = Config(X_train, X_test)
# print("Some useful info to get an insight on dataset's shape and normalisation:")
# print("features shape, labels shape, each features mean, each features standard deviation")
# print(X_test.shape, y_test.shape,
# np.mean(X_test), np.std(X_test))
# print("the dataset is therefore properly normalised, as expected.")
#
#
# ------------------------------------------------------
# step3: Let's get serious and build the neural network
# ------------------------------------------------------
# [none, 128, 9]
X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs])
# [none, 6]
Y = tf.placeholder(tf.float32, [None, config.n_classes])
print("-------X Y----------")
print(X)
X = tf.reshape(X, shape=[-1, 32, 36])
print(X)
print(Y)
Y = tf.reshape(Y, shape=[-1, 6])
print(Y)
# Weight Initialization
def weight_variable(shape):
# tra ve 1 gia tri random theo thuat toan truncated_ normal
initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def bias_varibale(shape):
initial = tf.constant(0.1, shape=shape, name='Bias')
return tf.Variable(initial)
# Convolution and Pooling
def conv2d(x, W):
# Must have `strides[0] = strides[3] = 1 `.
# For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `.
return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d')
def max_pool_2x2(x):
return tf.nn.max_pool(value=x, ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1], padding='SAME', name='max_pool')
def LSTM_Network(feature_mat, config):
"""model a LSTM Network,
it stacks 2 LSTM layers, each layer has n_hidden=32 cells
and 1 output layer, it is a full connet layer
argument:
feature_mat: ndarray feature matrix, shape=[batch_size,time_steps,n_inputs]
config: class containing config of network
return:
: matrix output shape [batch_size,n_classes]
"""
W_conv1 = weight_variable([5, 5, 1, 4])
b_conv1 = bias_varibale([4])
# x_image = tf.reshape(x, shape=[-1, 28, 28, 1])
feature_mat_image = tf.reshape(feature_mat, shape=[-1, 32, 36, 1])
print("----feature_mat_image-----")
print(feature_mat_image.get_shape())
h_conv1 = tf.nn.relu(conv2d(feature_mat_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Second Convolutional Layer
W_conv2 = weight_variable([5, 5, 4, 6])
b_conv2 = weight_variable([6])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# Third Convolutional Layer
W_conv3 = weight_variable([5, 5, 6, 8])
b_conv3 = weight_variable([8])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
h_pool3 = max_pool_2x2(h_conv3)
# Forth Convolutional Layer
W_conv4 = weight_variable([5, 5, 8, 1])
b_conv4 = weight_variable([1])
h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)
h_pool4 = max_pool_2x2(h_conv4)
# h_pool2 = tf.reshape(h_pool2, shape=[-1, 32, 36])
# feature_mat = h_pool2
h_pool4 = tf.reshape(h_pool4, shape=[-1, 32, 36])
feature_mat = h_pool4
print("----feature_mat-----")
print(feature_mat)
# exit()
# W_fc1 = weight_variable([8 * 9 * 1, 1024])
# b_fc1 = bias_varibale([1024])
# h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 9 * 1])
# h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# print("----h_fc1_drop-----")
# print(h_fc1)
# exit()
#
# # keep_prob = tf.placeholder(tf.float32)
# keep_prob = tf.placeholder(1.0)
# h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=keep_prob)
# print("----h_fc1_drop-----")
# print(h_fc1_drop)
# exit()
#
# W_fc2 = weight_variable([1024, 10])
# b_fc2 = bias_varibale([10])
#
# y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# print("----y_conv-----")
# print(y_conv)
# exit()
# Exchange dim 1 and dim 0
# Ban dau: [0,1,2] = [batch_size, 128, 9] => [batch_size, 32, 36]
feature_mat = tf.transpose(feature_mat, [1, 0, 2])
# New feature_mat's shape: [time_steps, batch_size, n_inputs] [128, batch_size, 9]
print("----feature_mat-----")
print(feature_mat)
# exit()
# Temporarily crush the feature_mat's dimensions
feature_mat = tf.reshape(feature_mat, [-1, config.n_inputs]) # 9
# New feature_mat's shape: [time_steps*batch_size, n_inputs] # 128 * batch_size, 9
# Linear activation, reshaping inputs to the LSTM's number of hidden:
hidden = tf.nn.relu(tf.matmul(
feature_mat, config.W['hidden']
) + config.biases['hidden'])
# New feature_mat (hidden) shape: [time_steps*batch_size, n_hidden] [128*batch_size, 32]
print("--n_steps--")
print(config.n_steps)
print("--n_steps--")
print(hidden)
# exit()
# Split the series because the rnn cell needs time_steps features, each of shape:
hidden = tf.split(0, config.n_steps, hidden) # (0, 128, [128*batch_size, 32])
# New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden]
# Define LSTM cell of first hidden layer:
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0)
# Stack two LSTM layers, both layers has the same shape
lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2)
# Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here
outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32)
# outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden]
print("------------------list-------------------")
print(outputs)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1] # Chi lay phan tu cuoi cung voi shape: [?, 32]
print("------------------last outputs-------------------")
print (lstm_last_output)
# Linear activation
return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output']
pred_Y = LSTM_Network(X, config) # shape[?,6]
print("------------------pred_Y-------------------")
print(pred_Y)
# exit()
# Loss,train_step,evaluation
l2 = config.lambda_loss_amount * \
sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
# Softmax loss and L2
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2
train_step = tf.train.AdamOptimizer(
learning_rate=config.learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
# --------------------------------------------
# step4: Hooray, now train the neural network
# --------------------------------------------
# Note that log_device_placement can be turned ON but will cause console spam.
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))
tf.initialize_all_variables().run()
best_accuracy = 0.0
# Start training for each batch and loop epochs
for i in range(config.training_epochs):
for start, end in zip(range(0, config.train_count, config.batch_size), # (0, 7352, 1500)
range(config.batch_size, config.train_count + 1,
config.batch_size)): # (1500, 7353, 1500)
print(start)
print(end)
sess.run(train_step, feed_dict={X: X_train[start:end],
Y: y_train[start:end]})
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
print("traing iter: {},".format(i) + \
" test accuracy : {},".format(accuracy_out) + \
" loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
print("")
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
#
# #------------------------------------------------------------------
# # step5: Training is good, but having visual insight is even better
# #------------------------------------------------------------------
# # The code is in the .ipynb
#
# #------------------------------------------------------------------
# # step6: And finally, the multi-class confusion matrix and metrics!
# #------------------------------------------------------------------
# # The code is in the .ipynb
|
|
from .. import exception
import token
class TokenStream:
"""Provides a stream abstraction for tokens in the BASIC language."""
def __init__(self, buffer):
"""Initializes based on a string to tokenize."""
self.buffer = buffer
self.offset = 0 # current offset within the buffer
self.peek = None # a single buffered token to peek at
def AtTerminator(self):
"""Checks if the stream is at a statement terminator.
Statement terminators include EOF, ELSE, and a colon.
"""
token = self.Peek()
return (token.IsType(token.TYPE_EOF) or
token.IsType(token.TYPE_COLON) or
token.IsKeyword('ELSE'))
def Eof(self):
"""Checks if the stream is at its end."""
return (self.offset >= len(self.buffer))
def Get(self):
"""Return the next token in the stream.
This will advance the state of the stream. Use Peek instead for a read
without side-effects.
Returns:
parser.Token: The next token.
"""
if self.peek:
token = self.peek
self.peek = None
return token
return self._ReadToken()
def Peek(self):
"""Returns the next token in the stream without advancing the pointer.
This will not advance the state of the stream. Use Get instead for a
read with side-effects.
Returns:
parser.Token: The next token.
"""
if not self.peek:
self.peek = self._ReadToken()
return self.peek
def RequireId(self):
"""Reads the next token in the stream, which must be an ID token type.
Returns:
parser.Token: The next token.
Raises:
runtime.TokenException if the token is not an ID type.
"""
tok = self.Get()
if tok.IsId():
return tok
raise exception.TokenException('Unexpected input "%s"' % str(tok))
def RequireInt(self):
"""Reads the next token in the stream, which must be an integer type.
Returns:
parser.Token: The next token.
Raises:
runtime.TokenException if the token is not an integer type.
"""
tok = self.Get()
if tok.IsInt():
return tok
raise exception.TokenException('Unexpected input "%s"' % str(tok))
def RequireKeyword(self, keyword):
"""Reads the next token in the stream, which must be the given keyword.
Args:
keyword (str): The keyword to match.
Returns:
parser.Token: The next token.
Raises:
runtime.TokenException if the token is not the given keyword.
"""
tok = self.Get()
if tok.IsType(token.TYPE_KEYWORD) and tok.value == keyword:
return tok
raise exception.TokenException('Unexpected input "%s"' % str(tok))
def Require(self, type):
"""Reads the next token in the stream, which must match the given type.
Args:
type (token.TYPE_*): The type to match.
Returns:
parser.Token: The next token.
Raises:
exception.TokenException if the token is not of the required type.
"""
tok = self.Get()
if tok.type == type:
return tok
raise exception.TokenException('Unexpected input "%s"' % str(tok))
def Reset(self):
"""Resets the internal state so that subsequent reads start over."""
self.offset = 0
self.peek = None
# The states of the finite-state machine for reading tokens.
STATE_INIT = 1
STATE_BASE = 2
STATE_COLON = 3
STATE_COMMA = 4
STATE_COMMENT = 5
STATE_DIVIDE = 6
STATE_EQUAL = 7
STATE_FLOAT1 = 8
STATE_FLOAT2 = 9
STATE_FLOAT3 = 10
STATE_GT = 11
STATE_ID = 12
STATE_INT = 13
STATE_INT_BIN = 14
STATE_INT_HEX = 15
STATE_LPAREN = 16
STATE_LT = 17
STATE_MINUS = 18
STATE_PLUS = 19
STATE_POWER = 20
STATE_QUESTION = 21
STATE_REM1 = 22
STATE_REM2 = 23
STATE_REM3 = 24
STATE_RPAREN = 25
STATE_SEMICOLON = 26
STATE_STRING = 27
STATE_TIMES = 28
STATE_EOF = 29
STATE_ERROR = 30
def _IsDigit(self, ch):
"""Checks whether this character is a digit."""
return ch >= '0' and ch <= '9'
def _IsHex(self, ch):
"""Checks whether this character is a hexadecimal digit."""
return ((ch >= '0' and ch <= '9') or
(ch >= 'A' and ch <= 'F') or
(ch >= 'a' and ch <= 'f'))
def _IsLetter(self, ch):
"""Checks whether this character is a letter."""
return (ch >= 'A' and ch <= 'Z') or (ch >= 'a' and ch <= 'z')
def _IsWhitespace(self, ch):
"""Checks whether this character is whitespace."""
return ch in (' ', '\t', '\n', '\r', '\f')
def _ReadToken(self):
"""Reads the next token from the buffer and returns it.
If no more tokens are available, returns the EOF token.
Returns:
parser.Token: The next token.
"""
acc = ''
state = self.STATE_INIT
ch = None
# Operate the FSM to create a token.
while True:
# Get a hold of the next character.
ch = '\0' if self.Eof() else self.buffer[self.offset]
# Dispatch based on the current state.
if state == self.STATE_INIT:
if ch == '0':
state = self.STATE_EOF
elif ch == 'R' or ch == 'r':
acc += ch
self.offset += 1
state = self.STATE_REM1
elif self._IsLetter(ch):
acc += ch
self.offset += 1
state = self.STATE_ID
elif self._IsDigit(ch):
acc += ch
self.offset += 1
state = self.STATE_INT
elif ch == '.':
acc += ch
self.offset += 1
state = self.STATE_FLOAT1
elif ch == '&':
self.offset += 1
state = self.STATE_BASE
elif ch == ':':
self.offset += 1
state = self.STATE_COLON
elif ch == ',':
self.offset += 1
state = self.STATE_COMMA
elif ch == "'":
self.offset += 1
state = self.STATE_COMMENT
elif ch == '/':
self.offset += 1
state = self.STATE_DIVIDE
elif ch == '=':
self.offset += 1
state = self.STATE_EQUAL
elif ch == '>':
self.offset += 1
state = self.STATE_GT
elif ch == '<':
self.offset += 1
state = self.STATE_LT
elif ch == '(':
self.offset += 1
state = self.STATE_LPAREN
elif ch == '-':
self.offset += 1
state = self.STATE_MINUS
elif ch == '+':
self.offset += 1
state = self.STATE_PLUS
elif ch == '^':
self.offset += 1
state = self.STATE_POWER
elif ch == '?':
self.offset += 1
state = self.STATE_QUESTION
elif ch == ')':
self.offset += 1
state = self.STATE_RPAREN
elif ch == ';':
self.offset += 1
state = self.STATE_SEMICOLON
elif ch == '"':
self.offset += 1
state = self.STATE_STRING
elif ch == '*':
self.offset += 1
state = self.STATE_TIMES
elif self._IsWhitespace(ch):
self.offset += 1
state = self.STATE_ERROR
elif state == self.STATE_ID:
if self._IsLetter(ch) or self._IsDigit(ch):
acc += ch
self.offset += 1
elif ch == '%':
acc += ch
self.offset += 1
return token.Token(token.TYPE_ID_INT, acc)
elif ch == '$':
acc += ch
self.offset += 1
return token.Token(token.TYPE_ID_STRING, acc)
else:
return token.Token(token.TYPE_ID_FLOAT, acc)
elif state == self.STATE_BASE:
if ch == 'B' or ch == 'b':
self.offset += 1
state = self.STATE_INT_BIN
elif ch == 'H' or ch == 'h':
self.offset += 1
state = self.STATE_INT_HEX
else:
state = self.STATE_ERROR
elif state == self.STATE_INT_BIN:
if ch == '0' or ch == '1':
acc += ch
self.offset += 1
else:
return token.Token(token.TYPE_INT_BIN, int(acc, 2))
elif state == self.STATE_INT_HEX:
if self._IsHex(ch):
acc += ch
self.offset += 1
else:
return token.Token(token.TYPE_INT_HEX, int(acc, 16))
elif state == self.STATE_FLOAT1:
if self._IsDigit(ch):
acc += ch
self.offset += 1
elif ch == 'E' or ch == 'e':
acc += ch
self.offset += 1
state = self.STATE_FLOAT2
else:
return token.Token(token.TYPE_FLOAT, float(acc))
elif state == self.STATE_FLOAT2:
if ch == '+' or ch == '-' or self._IsDigit(ch):
acc += ch
self.offset += 1
state = self.STATE_FLOAT3
else:
state = self.STATE_ERROR
elif state == self.STATE_FLOAT3:
if self._IsDigit(ch):
acc += ch
self.offset += 1
else:
return token.Token(token.TYPE_FLOAT, float(acc))
elif state == self.STATE_REM1:
if ch == 'E' or ch == 'e':
acc += ch
self.offset += 1
state = self.STATE_REM2
else:
state = self.STATE_ID
elif state == self.STATE_REM2:
if ch == 'M' or ch == 'm':
acc = ''
self.offset += 1
state = self.STATE_REM3
else:
state = self.STATE_ID
elif state == self.STATE_REM3:
if ch == '\0':
return token.Token(token.TYPE_COMMENT, acc)
elif ch == ' ':
self.offset += 1
state = self.STATE_COMMENT
else:
acc = 'rem'
state = self.STATE_ID
elif state == self.STATE_COMMENT:
if ch == '\0':
return token.Token(token.TYPE_COMMENT, acc)
else:
acc += ch
self.offset += 1
elif state == self.STATE_STRING:
if ch == '\0':
return token.Token(token.TYPE_STRING, acc)
elif ch == '"':
self.offset += 1
return token.Token(token.TYPE_STRING, acc)
else:
acc += ch
self.offset += 1
elif state == self.STATE_GT:
if ch == '=':
self.offset += 1
return token.Token(token.TYPE_GEQ)
else:
return token.Token(token.TYPE_GT)
elif state == self.STATE_LT:
if ch == '=':
self.offset += 1
return token.Token(token.TYPE_LEQ)
elif ch == '>':
self.offset += 1
return token.Token(token.TYPE_NEQUAL)
else:
return token.Token(token.TYPE_LT)
elif state == self.STATE_COLON:
return token.Token(token.TYPE_COLON)
elif state == self.STATE_COMMA:
return token.Token(token.TYPE_COMMA)
elif state == self.STATE_DIVIDE:
return token.Token(token.TYPE_DIVIDE)
elif state == self.STATE_EQUAL:
return token.Token(token.TYPE_EQUAL)
elif state == self.STATE_LPAREN:
return token.Token(token.TYPE_LPAREN)
elif state == self.STATE_MINUS:
return token.Token(token.TYPE_MINUS)
elif state == self.STATE_PLUS:
return token.Token(token.TYPE_PLUS)
elif state == self.STATE_POWER:
return token.Token(token.TYPE_POWER)
elif state == self.STATE_QUESTION:
return token.Token(token.TYPE_KEYWORD, 'PRINT')
elif state == self.STATE_RPAREN:
return token.Token(token.TYPE_RPAREN)
elif state == self.STATE_SEMICOLON:
return token.Token(token.TYPE_SEMICOLON)
elif state == self.STATE_TIMES:
return token.Token(token.TYPE_TIMES)
elif state == self.STATE_EOF:
return token.Token(token.TYPE_EOF)
elif state == self.STATE_ERROR:
raise exception.TokenException(
"Unexpected char '%s' at column %d" % (ch, self.offset))
|
|
from itertools import chain
import warnings
from django.core.exceptions import FieldError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import RawSQL, Ref, Random, ColIndexRef
from django.db.models.query_utils import select_related_descend, QueryWrapper
from django.db.models.sql.constants import (CURSOR, SINGLE, MULTI, NO_RESULTS,
ORDER_DIR, GET_ITERATOR_CHUNK_SIZE)
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.query import get_order_dir, Query
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.six.moves import zip
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {'*': '*'}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
def setup_query(self):
if all(self.query.alias_refcount[a] == 0 for a in self.query.tables):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select()
self.col_count = len(self.select)
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query()
order_by = self.get_order_by()
extra_select = self.get_extra_select(order_by, self.select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Returns a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, 'as_sql'):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
for expr, _, _ in select:
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
for expr, _ in order_by:
if expr.contains_aggregate:
continue
# We can skip References to select clause, as all expressions in
# the select clause are already part of the group by.
if isinstance(expr, Ref):
continue
expressions.append(expr)
having = self.query.having.get_group_by_cols()
for expr in having:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having)
for expr in expressions:
sql, params = self.compile(expr)
if (sql, tuple(params)) not in seen:
result.append((sql, params))
seen.add((sql, tuple(params)))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key. Currently only the MySQL form is
# implemented.
# MySQLism: however, columns in HAVING clause must be added to the
# GROUP BY.
if self.connection.features.allows_group_by_pk:
# The logic here is: if the main model's primary key is in the
# query, then set new_expressions to that field. If that happens,
# then also add having expressions to group by.
pk = None
for expr in expressions:
if (expr.output_field.primary_key and
getattr(expr.output_field, 'model') == self.query.model):
pk = expr
if pk:
expressions = [pk] + [expr for expr in expressions if expr in having]
return expressions
def get_select(self):
"""
Returns three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- Which model to instantiate
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
if self.query.default_cols:
select_list = []
for c in self.get_default_columns():
select_list.append(select_idx)
select.append((c, None))
select_idx += 1
klass_info = {
'model': self.query.model,
'select_fields': select_list,
}
# self.query.select is a special case. These columns never go to
# any model.
for col in self.query.select:
select.append((col, None))
select_idx += 1
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info['related_klass_infos'] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info['related_klass_infos']:
if ki['from_parent']:
ki['select_fields'] = (klass_info['select_fields'] +
ki['select_fields'])
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
for col, alias in select:
ret.append((col, self.compile(col, select_format=True), alias))
return ret, klass_info, annotations
def get_order_by(self):
"""
Returns a list of 2-tuples of form (expr, (sql, params)) for the
ORDER BY clause.
The order_by clause can alter the select clause (for example it
can add aliases to clauses that do not yet have one, or it can
add totally new select clauses).
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by or self.query.get_meta().ordering or [])
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
order_by = []
for pos, field in enumerate(ordering):
if field == '?':
order_by.append((Random(), asc, False))
continue
if isinstance(field, int):
if field < 0:
field = -field
int_ord = desc
order_by.append((ColIndexRef(field), int_ord, True))
continue
col, order = get_order_dir(field, asc)
if col in self.query.annotation_select:
order_by.append((Ref(col, self.query.annotation_select[col]), order, True))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
expr = RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), [])
order_by.append((expr, order, False))
continue
if not self.query._extra or get_order_dir(field)[0] not in self.query._extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
order_by.extend(self.find_ordering_name(field, self.query.get_meta(),
default_order=asc))
else:
if col not in self.query.extra_select:
order_by.append((RawSQL(*self.query.extra[col]), order, False))
else:
order_by.append((Ref(col, RawSQL(*self.query.extra[col])),
order, True))
result = []
seen = set()
for expr, order, is_ref in order_by:
sql, params = self.compile(expr)
if (sql, tuple(params)) in seen:
continue
seen.add((sql, tuple(params)))
result.append((expr, (sql, params, order, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
select_sql = [t[1] for t in select]
if self.query.distinct and not self.query.distinct_fields:
for expr, (sql, params, _, is_ref) in order_by:
if not is_ref and (sql, params) not in select_sql:
extra_select.append((expr, (sql, params), None))
return extra_select
def __call__(self, name):
"""
Backwards-compatibility shim so that calling a SQLCompiler is equivalent to
calling its quote_name_unless_alias method.
"""
warnings.warn(
"Calling a SQLCompiler directly is deprecated. "
"Call compiler.quote_name_unless_alias instead.",
RemovedInDjango20Warning, stacklevel=2)
return self.quote_name_unless_alias(name)
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select or name in self.query.external_aliases):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node, select_format=False):
vendor_impl = getattr(node, 'as_' + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
if select_format:
return node.output_field.select_format(self, sql, params)
return sql, params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup()
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
where, w_params = self.compile(self.query.where)
having, h_params = self.compile(self.query.having)
params = []
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
out_cols = []
col_idx = 1
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias))
elif with_col_aliases:
s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
col_idx += 1
params.extend(s_params)
out_cols.append(s_sql)
result.append(', '.join(out_cols))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) is not implemented.")
if not order_by:
order_by = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if order_by:
ordering = []
for _, (o_sql, o_params, order, _) in order_by:
ordering.append('%s %s' % (o_sql, order))
params.extend(o_params)
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
if self.connection.get_autocommit():
raise TransactionManagementError(
"select_for_update cannot be used outside of a transaction."
)
# If we've been asked for a NOWAIT query but the backend does
# not support it, raise a DatabaseError otherwise we could get
# an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
return ' '.join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None and not self.query.distinct_fields:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
return obj.get_compiler(connection=self.connection).as_sql()
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if from_parent and model is not None and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if field.model in only_load and field.attname not in only_load[field.model]:
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _ = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
result.append("%s.%s" % (qn(alias), qn2(target.column)))
return result
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless the attribute name
# of the field is specified.
if field.rel and path and opts.ordering and name != field.attname:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple(self.query.alias_map[j].table_name for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(t.get_col(alias), order, False) for t in targets]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_order_by and get_distinct.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, path = self.query.setup_joins(
pieces, opts, alias)
alias = joins[-1]
return field, targets, alias, joins, path, opts
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
result.append(', %s' % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects if f.field.unique
)
return chain(direct_choices, reverse_choices)
related_klass_infos = []
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info['related_klass_infos'] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or (cur_depth == 1 and f.name in requested):
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s" % (
f.name,
", ".join(_get_field_choices()) or '(none)',
)
)
else:
next = False
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
klass_info = {
'model': f.rel.to,
'field': f,
'reverse': False,
'from_parent': False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(start_alias=alias, opts=f.rel.to._meta)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_klass_infos = self.get_related_selections(
select, f.rel.to._meta, alias, cur_depth + 1, next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
_, _, _, joins, _ = self.query.setup_joins([related_field_name], opts, root_alias)
alias = joins[-1]
from_parent = issubclass(model, opts.model)
klass_info = {
'model': model,
'field': f,
'reverse': True,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta, from_parent=opts.model)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select, model._meta, alias, cur_depth + 1,
next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested.keys()).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
'Invalid field name(s) given in select_related: %s. '
'Choices are: %s' % (
', '.join(invalid_fields),
', '.join(_get_field_choices()) or '(none)',
)
)
return related_klass_infos
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb)
return columns
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters, field_converters, expression)
return converters
def apply_converters(self, row, converters):
row = list(row)
for pos, (backend_converters, field_converters, field) in converters.items():
value = row[pos]
for converter in backend_converters:
value = converter(value, field, self.query.context)
for converter in field_converters:
value = converter(value, self.connection, self.query.context)
row[pos] = value
return tuple(row)
def results_iter(self, results=None):
"""
Returns an iterator over the results from executing this query.
"""
converters = None
if results is None:
results = self.execute_sql(MULTI)
fields = [s[0] for s in self.select[0:self.col_count]]
converters = self.get_converters(fields)
for rows in results:
for row in rows:
if converters:
row = self.apply_converters(row, converters)
yield row
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
if not result_type:
result_type = NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
cursor.close()
raise
if result_type == CURSOR:
# Caller didn't specify a result_type, so just give them back the
# cursor to process (and close).
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0:self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor, self.connection.features.empty_fetchmany_value,
self.col_count
)
if not self.connection.features.can_use_chunked_reads:
try:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
finally:
# done with the cursor
cursor.close()
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
if len(columns) == 1:
sql, params = self.as_sql()
return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
def __init__(self, *args, **kwargs):
self.return_id = False
super(SQLInsertCompiler, self).__init__(*args, **kwargs)
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if has_fields:
params = values = [
[
f.get_db_prep_save(
getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True),
connection=self.connection
) for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple(v for val in values for v in val))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.get_meta().db_table, self.query.get_meta().pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'resolve_expression'):
val = val.resolve_expression(self.query, allow_joins=False)
elif hasattr(val, 'prepare_database_save'):
if field.rel:
val = val.prepare_database_save(field)
else:
raise TypeError("Database is trying to update a relational field "
"of type %s with a value of type %s. Make sure "
"you are setting the correct relations" %
(field.__class__.__name__, val.__class__.__name__))
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = '%s'
name = field.column
if hasattr(val, 'as_sql'):
sql, params = self.compile(val)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.clone(klass=Query)
query.select_related = False
query.clear_ordering(True)
query._extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
super(SQLUpdateCompiler, self).pre_sql_setup()
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
# Empty SQL for the inner query is a marker that the inner query
# isn't going to produce any results. This can happen when doing
# LIMIT 0 queries (generated by qs[:0]) for example.
if not self.query.subquery:
raise EmptyResultSet
sql, params = [], []
for annotation in self.query.annotation_select.values():
agg_sql, agg_params = self.compile(annotation)
sql.append(agg_sql)
params.extend(agg_params)
self.col_count = len(self.query.annotation_select)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
def cursor_iter(cursor, sentinel, col_count):
"""
Yields blocks of rows from a cursor and ensures the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[0:col_count] for r in rows]
finally:
cursor.close()
|
|
from corehq.apps.reports.datatables import DataTablesHeader
from corehq.apps.reports.graph_models import MultiBarChart, Axis
from corehq.apps.reports.sqlreport import calculate_total_row, TableDataFormat, DataFormatter
from dimagi.utils.decorators.memoized import memoized
from corehq.apps.reports.standard import DatespanMixin, CustomProjectReport, ProjectReportParametersMixin
from .filters import AgeFilter, GenderFilter, GroupUserFilter, GroupFilter, ALL_CVSU_GROUP
from .sqldata import (ChildProtectionData, ChildrenInHouseholdData,
ChildProtectionDataTrend, ChildrenInHouseholdDataTrend,
CVSUActivityData, CVSUActivityDataTrend,
CVSUIncidentResolutionData, CVSUIncidentResolutionDataTrend,
CVSUServicesData, CVSUServicesDataTrend)
class MultiReportPage(CustomProjectReport, ProjectReportParametersMixin, DatespanMixin):
"""
Report class that supports having multiple 'reports' shown at a time.
i.e. multiple sections of _graph and report table_
Each section is represented by a 'data provider' class.
"""
title = ''
report_template_path = "cvsu/multi_report.html"
flush_layout = True
@property
@memoized
def rendered_report_title(self):
return self.title
@property
@memoized
def data_providers(self):
return []
@property
def report_context(self):
context = {
'reports': [self.get_report_context(dp) for dp in self.data_providers],
'title': self.title
}
return context
def get_report_context(self, data_provider):
headers = DataTablesHeader(*[c.data_tables_column for c in data_provider.columns])
if self.needs_filters:
rows = []
charts = []
total_row = []
else:
formatter = DataFormatter(TableDataFormat(data_provider.columns, no_value=self.no_value))
rows = list(formatter.format(data_provider.data, keys=data_provider.keys, group_by=data_provider.group_by))
charts = list(self.get_chart(
rows,
data_provider.columns,
x_label=data_provider.chart_x_label,
y_label=data_provider.chart_y_label,
has_total_column=data_provider.has_total_column
))
total_row = list(calculate_total_row(rows))
if total_row:
total_row[0] = 'Total'
context = dict(
report_table=dict(
title=data_provider.title,
headers=headers,
rows=rows,
total_row=total_row,
default_rows=self.default_rows,
datatables=True
),
charts=charts,
chart_span=12
)
return context
@property
def export_table(self):
reports = [r['report_table'] for r in self.report_context['reports']]
return [self._export_table(r['title'], r['headers'], r['rows'], total_row=r['total_row']) for r in reports]
def _export_table(self, export_sheet_name, headers, formatted_rows, total_row=None):
def _unformat_row(row):
return [col.get("sort_key", col) if isinstance(col, dict) else col for col in row]
table = headers.as_export_table
rows = [_unformat_row(row) for row in formatted_rows]
table.extend(rows)
if total_row:
table.append(_unformat_row(total_row))
return [export_sheet_name, table]
def get_chart(self, rows, columns, x_label, y_label, has_total_column=False):
"""
Get a MultiBarChart model for the given set of rows and columns.
:param rows: 2D list of report data. Assumes index 0 of each row is the row label
:param columns: list of DatabaseColumn objects
"""
end = len(columns)
if has_total_column:
end -= 1
categories = [c.data_tables_column.html for c in columns[1:end]]
chart = MultiBarChart('', x_axis=Axis(x_label), y_axis=Axis(y_label, ' ,d'))
chart.rotateLabels = -45
chart.marginBottom = 120
self._chart_data(chart, categories, rows)
return [chart]
def _chart_data(self, chart, series, data, start_index=1, x_fn=None, y_fn=None):
xfn = x_fn or (lambda x: x['html'])
yfn = y_fn or (lambda y: y['sort_key'])
for i, s in enumerate(series):
chart.add_dataset(s, [{'x': xfn(d[0]), 'y': yfn(d[start_index + i])} for d in data])
class CVSUReport(MultiReportPage):
no_value = {'sort_key': 0, 'html': u'\u2014'}
default_rows = 100
datespan_default_days = 30
printable = True
exportable = True
filter_group_name = 'All CVSUs'
@property
def location(self):
cvsu = 'All CVSU'
group = 'All Districts'
if self.group_id == ALL_CVSU_GROUP:
return group
if self.individual:
cvsu = self.CommCareUser.get_by_user_id(self.individual).raw_username
if self.group and self.group_id != ALL_CVSU_GROUP:
group = self.group.name
return '%s, %s' % (cvsu, group)
@property
def daterange(self):
format = "%d %b %Y"
st = self.datespan.startdate.strftime(format)
en = self.datespan.enddate.strftime(format)
return "%s to %s" % (st, en)
@property
def subtitle(self):
if self.needs_filters:
return dict(subtitle1='', subtitle2='')
return dict(subtitle1="Date range: %s" % self.daterange,
subtitle2="CVSU Location: %s" % self.location)
@property
def age(self):
return AgeFilter.get_value(self.request, self.domain)
@property
def age_display(self):
return AgeFilter.age_display_map[self.age] if self.age else 'All'
@property
def gender(self):
return GenderFilter.get_value(self.request, self.domain)
@property
def individual(self):
return GroupUserFilter.get_value(self.request, self.domain).get('cvsu')
@property
def report_context(self):
context = super(CVSUReport, self).report_context
context.update(self.subtitle)
return context
class ChildProtectionReport(CVSUReport):
title = 'CVSU CHILD PROTECTION AND GENDER BASED VIOLENCE LOCATION REPORT'
name = "Location report"
slug = "child_protection_location"
fields = (DatespanMixin.datespan_field, "custom.apps.cvsu.filters.GroupFilter",
"custom.apps.cvsu.filters.AgeFilter", "custom.apps.cvsu.filters.GenderFilter")
@property
def group_id(self):
return GroupFilter.get_value(self.request, self.domain)
@property
def subtitle(self):
if self.needs_filters:
return dict(subtitle='')
gender = self.gender or 'All'
subtitle = super(ChildProtectionReport, self).subtitle
subtitle.update({
'subtitle2': "%s, Survivor age: %s, Survivor gender: %s" % (subtitle['subtitle2'], self.age_display, gender)
})
return subtitle
@property
def report_config(self):
config = dict(
domain=self.domain,
datespan=self.datespan,
age=self.age,
gender=self.gender,
group_id=self.group_id,
user_id=self.individual
)
return config
@property
@memoized
def data_providers(self):
config = self.report_config
return [
ChildProtectionData(config=config),
ChildrenInHouseholdData(config=config)
]
class ChildProtectionReportTrend(ChildProtectionReport):
title = 'CVSU CHILD PROTECTION AND GENDER BASED VIOLENCE TREND REPORT'
name = "Trend report"
slug = "child_protection_trend"
fields = (DatespanMixin.datespan_field, "custom.apps.cvsu.filters.GroupUserFilter",
"custom.apps.cvsu.filters.AgeFilter", "custom.apps.cvsu.filters.GenderFilter")
@property
def group_id(self):
return GroupUserFilter.get_value(self.request, self.domain).get('district')
@property
@memoized
def data_providers(self):
config = self.report_config
return [
ChildProtectionDataTrend(config=config),
ChildrenInHouseholdDataTrend(config=config)
]
class CVSUPerformanceReport(CVSUReport):
title = 'CVSU PERFORMANCE EVALUATION LOCATION REPORT'
name = "Location report"
slug = "cvsu_performance_location"
fields = (DatespanMixin.datespan_field, "custom.apps.cvsu.filters.GroupFilter")
@property
def group_id(self):
return GroupFilter.get_value(self.request, self.domain)
@property
def report_config(self):
config = dict(
domain=self.domain,
datespan=self.datespan,
group_id=self.group_id,
user_id=self.individual
)
return config
@property
@memoized
def data_providers(self):
config = self.report_config
return [
CVSUActivityData(config=config),
CVSUServicesData(config=config),
CVSUIncidentResolutionData(config=config)
]
class CVSUPerformanceReportTrend(CVSUPerformanceReport):
title = 'CVSU PERFORMANCE EVALUATION TREND REPORT'
name = "Trend report"
slug = "cvsu_performance_trend"
fields = (DatespanMixin.datespan_field, "custom.apps.cvsu.filters.GroupUserFilter")
@property
def group_id(self):
return GroupUserFilter.get_value(self.request, self.domain).get('district')
@property
@memoized
def data_providers(self):
config = self.report_config
return [
CVSUActivityDataTrend(config=config),
CVSUServicesDataTrend(config=config),
CVSUIncidentResolutionDataTrend(config=config)
]
|
|
"""
Tests for space utilities.
"""
import itertools
import warnings
import numpy as np
import theano
from theano import tensor
# Can't use nose.tools.assert_raises, only introduced in python 2.7. Use
# numpy.testing.assert_raises instead
from pylearn2.space import (SimplyTypedSpace,
VectorSpace,
Conv2DSpace,
CompositeSpace,
VectorSequenceSpace,
IndexSequenceSpace,
IndexSpace,
NullSpace,
is_symbolic_batch)
from pylearn2.utils import function, safe_zip
def test_np_format_as_vector2vector():
vector_space_initial = VectorSpace(dim=8*8*3, sparse=False)
vector_space_final = VectorSpace(dim=8*8*3, sparse=False)
data = np.arange(5*8*8*3).reshape(5, 8*8*3)
rval = vector_space_initial.np_format_as(data, vector_space_final)
assert np.all(rval == data)
def test_np_format_as_index2index():
index_space_initial = IndexSpace(max_labels=10, dim=1)
index_space_final = IndexSpace(max_labels=10, dim=1)
data = np.array([[0], [2], [1], [3], [5], [8], [1]])
rval = index_space_initial.np_format_as(data, index_space_final)
assert index_space_initial == index_space_final
assert np.all(rval == data)
index_space_downcast = IndexSpace(max_labels=10, dim=1, dtype='int32')
rval = index_space_initial.np_format_as(data, index_space_downcast)
assert index_space_initial != index_space_downcast
assert np.all(rval == data)
assert rval.dtype == 'int32' and data.dtype == 'int64'
def test_np_format_as_conv2d2conv2d():
conv2d_space_initial = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
conv2d_space_final = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
data = np.arange(5*8*8*3).reshape(5, 3, 8, 8)
rval = conv2d_space_initial.np_format_as(data, conv2d_space_final)
assert np.all(rval == data)
conv2d_space1 = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('c', 'b', 1, 0))
conv2d_space0 = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
data = np.arange(5*8*8*3).reshape(5, 3, 8, 8)
rval = conv2d_space0.np_format_as(data, conv2d_space1)
nval = data.transpose(1, 0, 3, 2)
assert np.all(rval == nval)
def test_np_format_as_vector2conv2d():
vector_space = VectorSpace(dim=8*8*3, sparse=False)
conv2d_space = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
data = np.arange(5*8*8*3).reshape(5, 8*8*3)
rval = vector_space.np_format_as(data, conv2d_space)
# Get data in a Conv2DSpace with default axes
new_axes = conv2d_space.default_axes
axis_to_shape = {'b': 5, 'c': 3, 0: 8, 1: 8}
new_shape = tuple([axis_to_shape[ax] for ax in new_axes])
nval = data.reshape(new_shape)
# Then transpose
nval = nval.transpose(*[new_axes.index(ax)
for ax in conv2d_space.axes])
assert np.all(rval == nval)
def test_np_format_as_conv2d2vector():
vector_space = VectorSpace(dim=8*8*3, sparse=False)
conv2d_space = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
data = np.arange(5*8*8*3).reshape(5, 3, 8, 8)
rval = conv2d_space.np_format_as(data, vector_space)
nval = data.transpose(*[conv2d_space.axes.index(ax)
for ax in conv2d_space.default_axes])
nval = nval.reshape(5, 3 * 8 * 8)
assert np.all(rval == nval)
vector_space = VectorSpace(dim=8*8*3, sparse=False)
conv2d_space = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('c', 'b', 0, 1))
data = np.arange(5*8*8*3).reshape(3, 5, 8, 8)
rval = conv2d_space.np_format_as(data, vector_space)
nval = data.transpose(*[conv2d_space.axes.index(ax)
for ax in conv2d_space.default_axes])
nval = nval.reshape(5, 3 * 8 * 8)
assert np.all(rval == nval)
def test_np_format_as_conv2d_vector_conv2d():
conv2d_space1 = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('c', 'b', 1, 0))
vector_space = VectorSpace(dim=8*8*3, sparse=False)
conv2d_space0 = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
data = np.arange(5*8*8*3).reshape(5, 3, 8, 8)
vecval = conv2d_space0.np_format_as(data, vector_space)
rval1 = vector_space.np_format_as(vecval, conv2d_space1)
rval2 = conv2d_space0.np_format_as(data, conv2d_space1)
assert np.allclose(rval1, rval2)
nval = data.transpose(1, 0, 3, 2)
assert np.allclose(nval, rval1)
def test_np_format_as_vectorsequence2vectorsequence():
vector_sequence_space1 = VectorSequenceSpace(dim=3, dtype='float32')
vector_sequence_space2 = VectorSequenceSpace(dim=3, dtype='float64')
data = np.random.uniform(low=0.0, high=1.0, size=(10, 3))
rval = vector_sequence_space1.np_format_as(data, vector_sequence_space2)
assert np.all(rval == data)
def test_np_format_as_indexsequence2indexsequence():
index_sequence_space1 = IndexSequenceSpace(max_labels=6, dim=1,
dtype='int16')
index_sequence_space2 = IndexSequenceSpace(max_labels=6, dim=1,
dtype='int32')
data = np.random.randint(low=0, high=5, size=(10, 1))
rval = index_sequence_space1.np_format_as(data, index_sequence_space2)
assert np.all(rval == data)
def test_np_format_as_indexsequence2vectorsequence():
index_sequence_space = IndexSequenceSpace(max_labels=6, dim=1)
vector_sequence_space = VectorSequenceSpace(dim=6)
data = np.array([[0], [1], [4], [3]])
rval = index_sequence_space.np_format_as(data, vector_sequence_space)
true_val = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0]])
assert np.all(rval == true_val)
def test_np_format_as_sequence2other():
vector_sequence_space = VectorSequenceSpace(dim=3)
vector_space = VectorSpace(dim=3)
data = np.random.uniform(low=0.0, high=1.0, size=(10, 3))
np.testing.assert_raises(ValueError, vector_sequence_space.np_format_as,
data, vector_space)
index_sequence_space = IndexSequenceSpace(max_labels=6, dim=1)
index_space = IndexSpace(max_labels=6, dim=1)
data = np.random.randint(low=0, high=5, size=(10, 1))
np.testing.assert_raises(ValueError, index_sequence_space.np_format_as,
data, index_space)
def test_np_format_as_composite_composite():
"""
Test using CompositeSpace.np_format_as() to convert between
composite spaces that have the same tree structure, but different
leaf spaces.
"""
def make_composite_space(image_space):
"""
Returns a compsite space with a particular tree structure.
"""
return CompositeSpace((CompositeSpace((image_space,)*2),
VectorSpace(dim=1)))
shape = np.array([8, 11])
channels = 3
datum_size = channels * shape.prod()
composite_topo = make_composite_space(Conv2DSpace(shape=shape,
num_channels=channels))
composite_flat = make_composite_space(VectorSpace(dim=datum_size))
def make_vector_data(batch_size, space):
"""
Returns a batch of synthetic data appropriate to the provided space.
Supports VectorSpaces, and CompositeSpaces of VectorSpaces. synthetic
data.
"""
if isinstance(space, CompositeSpace):
return tuple(make_vector_data(batch_size, subspace)
for subspace in space.components)
else:
assert isinstance(space, VectorSpace)
result = np.random.rand(batch_size, space.dim)
if space.dtype is not None:
return np.asarray(result, dtype=space.dtype)
else:
return result
batch_size = 5
flat_data = make_vector_data(batch_size, composite_flat)
composite_flat.np_validate(flat_data)
topo_data = composite_flat.np_format_as(flat_data, composite_topo)
composite_topo.np_validate(topo_data)
new_flat_data = composite_topo.np_format_as(topo_data,
composite_flat)
def get_shape(batch):
"""
Returns the (nested) shape(s) of a (nested) batch.
"""
if isinstance(batch, np.ndarray):
return batch.shape
else:
return tuple(get_shape(b) for b in batch)
def batch_equals(batch_0, batch_1):
"""
Returns true if all corresponding elements of two batches are
equal. Supports composite data (i.e. nested tuples of data).
"""
assert type(batch_0) == type(batch_1)
if isinstance(batch_0, tuple):
if len(batch_0) != len(batch_1):
return False
return np.all(tuple(batch_equals(b0, b1)
for b0, b1 in zip(batch_0, batch_1)))
else:
assert isinstance(batch_0, np.ndarray)
return np.all(batch_0 == batch_1)
assert batch_equals(new_flat_data, flat_data)
def test_vector_to_conv_c01b_invertible():
"""
Tests that the format_as methods between Conv2DSpace
and VectorSpace are invertible for the ('c', 0, 1, 'b')
axis format.
"""
rng = np.random.RandomState([2013, 5, 1])
batch_size = 3
rows = 4
cols = 5
channels = 2
conv = Conv2DSpace([rows, cols],
channels=channels,
axes=('c', 0, 1, 'b'))
vec = VectorSpace(conv.get_total_dimension())
X = conv.make_batch_theano()
Y = conv.format_as(X, vec)
Z = vec.format_as(Y, conv)
A = vec.make_batch_theano()
B = vec.format_as(A, conv)
C = conv.format_as(B, vec)
f = function([X, A], [Z, C])
X = rng.randn(*(conv.get_origin_batch(batch_size).shape)).astype(X.dtype)
A = rng.randn(*(vec.get_origin_batch(batch_size).shape)).astype(A.dtype)
Z, C = f(X, A)
np.testing.assert_allclose(Z, X)
np.testing.assert_allclose(C, A)
def test_broadcastable():
v = VectorSpace(5).make_theano_batch(batch_size=1)
np.testing.assert_(v.broadcastable[0])
c = Conv2DSpace((5, 5), channels=3,
axes=['c', 0, 1, 'b']).make_theano_batch(batch_size=1)
np.testing.assert_(c.broadcastable[-1])
d = Conv2DSpace((5, 5), channels=3,
axes=['b', 0, 1, 'c']).make_theano_batch(batch_size=1)
np.testing.assert_(d.broadcastable[0])
def test_compare_index():
dims = [5, 5, 5, 6]
max_labels = [10, 10, 9, 10]
index_spaces = [IndexSpace(dim=dim, max_labels=max_label)
for dim, max_label in zip(dims, max_labels)]
assert index_spaces[0] == index_spaces[1]
assert not any(index_spaces[i] == index_spaces[j]
for i, j in itertools.combinations([1, 2, 3], 2))
vector_space = VectorSpace(dim=5)
conv2d_space = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
composite_space = CompositeSpace((index_spaces[0],))
assert not any(index_space == vector_space for index_space in index_spaces)
assert not any(index_space == composite_space
for index_space in index_spaces)
assert not any(index_space == conv2d_space for index_space in index_spaces)
def test_np_format_as_index2vector():
# Test 5 random batches for shape, number of non-zeros
for _ in xrange(5):
max_labels = np.random.randint(2, 10)
batch_size = np.random.randint(1, 10)
labels = np.random.randint(1, 10)
batch = np.random.random_integers(max_labels - 1,
size=(batch_size, labels))
index_space = IndexSpace(dim=labels, max_labels=max_labels)
vector_space_merge = VectorSpace(dim=max_labels)
vector_space_concatenate = VectorSpace(dim=max_labels * labels)
merged = index_space.np_format_as(batch, vector_space_merge)
concatenated = index_space.np_format_as(batch,
vector_space_concatenate)
assert merged.shape == (batch_size, max_labels)
assert concatenated.shape == (batch_size, max_labels * labels)
assert np.count_nonzero(merged) <= batch.size
assert np.count_nonzero(concatenated) == batch.size
assert np.all(np.unique(concatenated) == np.array([0, 1]))
# Make sure Theano variables give the same result
batch = tensor.lmatrix('batch')
single = tensor.lvector('single')
batch_size = np.random.randint(1, 10)
np_batch = np.random.random_integers(max_labels - 1,
size=(batch_size, labels))
np_single = np.random.random_integers(max_labels - 1,
size=(labels))
f_batch_merge = theano.function(
[batch], index_space._format_as_impl(False, batch, vector_space_merge)
)
f_batch_concatenate = theano.function(
[batch], index_space._format_as_impl(False, batch,
vector_space_concatenate)
)
f_single_merge = theano.function(
[single], index_space._format_as_impl(False, single,
vector_space_merge)
)
f_single_concatenate = theano.function(
[single], index_space._format_as_impl(False, single,
vector_space_concatenate)
)
np.testing.assert_allclose(
f_batch_merge(np_batch),
index_space._format_as_impl(True, np_batch, vector_space_merge)
)
np.testing.assert_allclose(
f_batch_concatenate(np_batch),
index_space._format_as_impl(True, np_batch, vector_space_concatenate)
)
np.testing.assert_allclose(
f_single_merge(np_single),
index_space._format_as_impl(True, np_single, vector_space_merge)
)
np.testing.assert_allclose(
f_single_concatenate(np_single),
index_space._format_as_impl(True, np_single, vector_space_concatenate)
)
def test_dtypes():
batch_size = 2
dtype_is_none_msg = ("self.dtype is None, so you must provide a "
"non-None dtype argument to this method.")
all_scalar_dtypes = tuple(t.dtype
for t in theano.scalar.all_types)
def underspecifies_dtypes(from_space, to_dtype):
"""
Returns True iff the from_space and to_dtype are both None. If
from_space is a CompositeSpace, this recurses into its tree of
subspaces.
"""
if isinstance(from_space, CompositeSpace):
if not isinstance(to_dtype, tuple):
return any(underspecifies_dtypes(s, to_dtype)
for s in from_space.components)
else:
return any(underspecifies_dtypes(s, d)
for s, d
in safe_zip(from_space.components, to_dtype))
else:
assert not isinstance(to_dtype, tuple), ("Tree structure "
"mismatch between "
"from_space and "
"to_dtype.")
return from_space.dtype is None and to_dtype is None
def get_expected_batch_dtype(from_space, to_dtype):
"""
Returns the expected dtype of a batch returned from
from_space.f(batch, to_dtype), where f is one of the three batch
creation methods (get_origin_batch, make_theano_batch, and
make_shared_batch)
"""
if to_dtype == 'floatX':
to_dtype = theano.config.floatX
if isinstance(from_space, CompositeSpace):
if not isinstance(to_dtype, tuple):
to_dtype = (to_dtype, ) * len(from_space.components)
return tuple(get_expected_batch_dtype(subspace, subtype)
for subspace, subtype
in safe_zip(from_space.components, to_dtype))
else:
assert not (from_space.dtype is None and to_dtype is None)
return from_space.dtype if to_dtype is None else to_dtype
def get_batch_dtype(batch):
"""
Returns the dtype of a batch, as a string, or nested tuple of strings.
For simple batches such as ndarray, this returns str(batch.dtype).
For the None batches "used" by NullSpace, this returns a special string
"NullSpace dtype".
For composite batches, this returns (nested) tuples of dtypes.
"""
if isinstance(batch, tuple):
return tuple(get_batch_dtype(b) for b in batch)
elif batch is None:
return "NullSpace dtype"
else:
return batch.dtype
def test_get_origin_batch(from_space, to_type):
# Expect failure if neither we nor the from_space specifies a dtype
if underspecifies_dtypes(from_space, to_type):
try:
from_space.get_origin_batch(batch_size, dtype=to_type)
except TypeError, ex:
assert dtype_is_none_msg in str(ex)
except Exception, unexpected_ex:
print ("Expected an exception of type TypeError with message "
"%s, got a %s instead with message %s." %
(dtype_is_none_msg,
type(unexpected_ex),
str(unexpected_ex)))
raise unexpected_ex
finally:
return
batch = from_space.get_origin_batch(batch_size, dtype=to_type)
assert get_batch_dtype(batch) == get_expected_batch_dtype(from_space,
to_type)
def test_make_shared_batch(from_space, to_type):
if underspecifies_dtypes(from_space, to_type):
try:
from_space.make_shared_batch(batch_size, dtype=to_type)
except TypeError, ex:
assert dtype_is_none_msg in str(ex)
except Exception, unexpected_ex:
print ("Expected an exception of type TypeError with message "
"%s, got a %s instead with message %s." %
(dtype_is_none_msg,
type(unexpected_ex),
str(unexpected_ex)))
raise unexpected_ex
finally:
return
batch = from_space.make_shared_batch(batch_size=batch_size,
name='batch',
dtype=to_type)
assert (get_batch_dtype(batch) ==
get_expected_batch_dtype(from_space, to_type)), \
("\nget_batch_dtype(batch): %s\n"
"get_expected_batch_dtype(from_space, to_type): %s" %
(get_batch_dtype(batch),
get_expected_batch_dtype(from_space, to_type)))
def test_make_theano_batch(from_space, to_type):
kwargs = {'name': 'batch',
'dtype': to_type}
# Sparse VectorSpaces throw an exception if batch_size is specified.
if not (isinstance(from_space, VectorSpace) and from_space.sparse):
kwargs['batch_size'] = batch_size
if underspecifies_dtypes(from_space, to_type):
try:
from_space.make_theano_batch(**kwargs)
except TypeError, ex:
assert dtype_is_none_msg in str(ex)
except Exception, unexpected_ex:
print ("Expected an exception of type TypeError with message "
"%s, got a %s instead with message %s." %
(dtype_is_none_msg,
type(unexpected_ex),
str(unexpected_ex)))
raise unexpected_ex
finally:
return
batch = from_space.make_theano_batch(**kwargs)
assert get_batch_dtype(batch) == get_expected_batch_dtype(from_space,
to_type)
def test_format(from_space, to_space, using_numeric_batch):
"""
Unit test for a call to from_space.np_format_as(batch, to_space)
"""
# Type-checks the arguments
for space, name in zip((from_space, to_space),
("from_space", "to_space")):
if not isinstance(space,
(VectorSpace, Conv2DSpace, CompositeSpace)):
raise TypeError("This test only supports spaces of type "
"VectorSpace, Conv2DSpace, and "
"CompositeSpace, not %s's type %s" %
(name, type(space)))
def get_batch(space, using_numeric_batch):
"""
Uses space.get_origin_batch() to return a numeric batch,
or space.get_theano_batch() to return a symbolic
Uses a fallback dtype if the space itself doesn't have one.
"""
def specifies_all_dtypes(space):
"""
Returns True iff space has a completely specified dtype.
"""
if isinstance(space, CompositeSpace):
return all(specifies_all_dtypes(subspace)
for subspace in space.components)
else:
return space.dtype is not None
def replace_none_dtypes(dtype, fallback_dtype):
"""
Returns dtype, with any Nones replaced by fallback_dtype.
"""
if isinstance(dtype, tuple):
return tuple(replace_none_dtypes(d, fallback_dtype)
for d in dtype)
else:
return fallback_dtype if dtype is None else dtype
kwargs = {"batch_size": batch_size}
# Use this when space doesn't specify a dtype
fallback_dtype = theano.config.floatX
if not specifies_all_dtypes(space):
kwargs["dtype"] = replace_none_dtypes(space.dtype,
fallback_dtype)
if using_numeric_batch:
return space.get_origin_batch(**kwargs)
else:
# Sparse VectorSpaces throw an exception if batch_size is
# specified
if isinstance(space, VectorSpace) and space.sparse:
del kwargs["batch_size"]
kwargs["name"] = "space-generated batch"
return space.make_theano_batch(**kwargs)
def get_expected_warning(from_space, from_batch, to_space):
# composite -> composite
if isinstance(from_space, CompositeSpace) and \
isinstance(to_space, CompositeSpace):
for fs, fb, ts in safe_zip(from_space.components,
from_batch,
to_space.components):
warning, message = get_expected_warning(fs, fb, ts)
if warning is not None:
return warning, message
return None, None
# composite -> simple
if isinstance(from_space, CompositeSpace):
for fs, fb in safe_zip(from_space.components, from_batch):
warning, message = get_expected_warning(fs, fb, to_space)
if warning is not None:
return warning, message
return None, None
# simple -> composite
if isinstance(to_space, CompositeSpace):
if isinstance(from_space, VectorSpace) and \
isinstance(from_batch, theano.sparse.SparseVariable):
assert from_space.sparse
return (UserWarning,
'Formatting from a sparse VectorSpace to a '
'CompositeSpace is currently (2 Jan 2014) a '
'non-differentiable action. This is because it '
'calls slicing operations on a sparse batch '
'(e.g. "my_matrix[r:R, c:C]", which Theano does '
'not yet have a gradient operator for. If '
'autodifferentiation is reporting an error, '
'this may be why.')
for ts in to_space.components:
warning, message = get_expected_warning(from_space,
from_batch,
ts)
if warning is not None:
return warning, message
return None, None
# simple -> simple
return None, None
def get_expected_error(from_space, from_batch, to_space):
"""
Returns the type of error to be expected when calling
from_space.np_format_as(batch, to_space). Returns None if no error
should be expected.
"""
def contains_different_dtypes(space):
"""
Returns true if space contains different dtypes. None is
considered distinct from all actual dtypes.
"""
assert isinstance(space, CompositeSpace)
def get_shared_dtype_if_any(space):
"""
Returns space's dtype. If space is composite, returns the
dtype used by all of its subcomponents. Returns False if
the subcomponents use different dtypes.
"""
if isinstance(space, CompositeSpace):
dtypes = tuple(get_shared_dtype_if_any(c)
for c in space.components)
assert(len(dtypes) > 0)
if any(d != dtypes[0] for d in dtypes[1:]):
return False
return dtypes[0] # could be False, but that's fine
else:
return space.dtype
return get_shared_dtype_if_any(space) is False
assert (isinstance(from_space, CompositeSpace) ==
isinstance(from_batch, tuple))
# composite -> composite
if isinstance(from_space, CompositeSpace) and \
isinstance(to_space, CompositeSpace):
for fs, fb, ts in safe_zip(from_space.components,
from_batch,
to_space.components):
error, message = get_expected_error(fs, fb, ts)
if error is not None:
return error, message
return None, None
# composite -> simple
if isinstance(from_space, CompositeSpace):
if isinstance(to_space, Conv2DSpace):
return (NotImplementedError,
"CompositeSpace does not know how to format as "
"Conv2DSpace")
for fs, fb in safe_zip(from_space.components, from_batch):
error, message = get_expected_error(fs, fb, to_space)
if error is not None:
return error, message
if isinstance(to_space, VectorSpace) and \
contains_different_dtypes(from_space) and \
to_space.dtype is None:
return (TypeError,
"Tried to format components with differing dtypes "
"into a VectorSpace with no dtype of its own. "
"dtypes: ")
return None, None
# simple -> composite
if isinstance(to_space, CompositeSpace):
if isinstance(from_space, VectorSpace) and \
isinstance(from_batch, theano.sparse.SparseVariable):
assert from_space.sparse
return (UserWarning,
'Formatting from a sparse VectorSpace to a '
'CompositeSpace is currently (2 Jan 2014) a '
'non-differentiable action. This is because it '
'calls slicing operations on a sparse batch '
'(e.g. "my_matrix[r:R, c:C]", which Theano does '
'not yet have a gradient operator for. If '
'autodifferentiation is reporting an error, '
'this may be why.')
if isinstance(from_space, Conv2DSpace):
return (NotImplementedError,
"Conv2DSpace does not know how to format as "
"CompositeSpace")
for ts in to_space.components:
error, message = get_expected_error(from_space,
from_batch,
ts)
if error is not None:
return error, message
return None, None
#
# simple -> simple
#
def is_sparse(space):
return isinstance(space, VectorSpace) and space.sparse
def is_complex(arg):
"""
Returns whether a space or a batch has a complex dtype.
"""
return (arg.dtype is not None and
str(arg.dtype).startswith('complex'))
if isinstance(from_batch, tuple):
return (TypeError,
"This space only supports simple dtypes, but received "
"a composite batch.")
if is_complex(from_batch) and not is_complex(from_space):
return (TypeError,
"This space has a non-complex dtype (%s), and "
"thus cannot support complex batches of type %s." %
(from_space.dtype, from_batch.dtype))
if from_space.dtype is not None and \
from_space.dtype != from_batch.dtype:
return (TypeError,
"This space is for dtype %s, but recieved a "
"batch of dtype %s." %
(from_space.dtype, from_batch.dtype))
if is_sparse(from_space) and isinstance(to_space, Conv2DSpace):
return (TypeError,
"Formatting a SparseVariable to a Conv2DSpace "
"is not supported, since neither scipy nor "
"Theano has sparse tensors with more than 2 "
"dimensions. We need 4 dimensions to "
"represent a Conv2DSpace batch")
if is_complex(from_space) and not is_complex(to_space):
if is_symbolic_batch(from_batch):
return (TypeError,
"Casting from complex to real is ambiguous")
else:
return (np.ComplexWarning,
"Casting complex values to real discards the "
"imaginary part")
return None, None
def get_expected_formatted_dtype(from_batch, to_space):
"""
Returns the expected dtype of the batch returned from a call to
from_batch.format_as(batch, to_space). If the returned batch is a
nested tuple, the expected dtype will also a nested tuple.
"""
def get_single_dtype(batch):
"""
Returns the dtype shared by all leaf nodes of the nested batch.
If the nested batch contains differing dtypes, this throws an
AssertionError. None counts as a different dtype than non-None.
"""
if isinstance(batch, tuple):
assert len(batch) > 0
child_dtypes = tuple(get_single_dtype(b) for b in batch)
if any(c != child_dtypes[0] for c in child_dtypes[1:]):
return False
return child_dtypes[0] # may be False, but that's correct.
else:
return batch.dtype
# composite -> composite
if isinstance(from_batch, tuple) and \
isinstance(to_space, CompositeSpace):
return tuple(get_expected_formatted_dtype(b, s)
for b, s in safe_zip(from_batch,
to_space.components))
# composite -> simple
elif isinstance(from_batch, tuple):
if to_space.dtype is not None:
return to_space.dtype
else:
result = get_batch_dtype(from_batch)
if result is False:
raise TypeError("From_batch doesn't have a single "
"dtype: %s" %
str(get_batch_dtype(from_batch)))
return result
# simple -> composite
elif isinstance(to_space, CompositeSpace):
return tuple(get_expected_formatted_dtype(from_batch, s)
for s in to_space.components)
# simple -> simple with no dtype
elif to_space.dtype is None:
assert from_batch.dtype is not None
return str(from_batch.dtype)
# simple -> simple with a dtype
else:
return to_space.dtype
from_batch = get_batch(from_space, using_numeric_batch)
expected_error, expected_error_msg = get_expected_error(from_space,
from_batch,
to_space)
# For some reason, the "with assert_raises(expected_error) as context:"
# idiom isn't catching all the expceted_errors. Use this instead:
if expected_error is not None:
try:
# temporarily upgrades warnings to exceptions within this block
with warnings.catch_warnings():
warnings.simplefilter("error")
from_space._format_as(using_numeric_batch,
from_batch,
to_space)
except expected_error, ex:
assert str(ex).find(expected_error_msg) >= 0
except Exception, unknown_ex:
print "Expected exception of type %s, got %s." % \
(expected_error.__name__, type(unknown_ex))
raise unknown_ex
finally:
return
to_batch = from_space._format_as(using_numeric_batch,
from_batch,
to_space)
expected_dtypes = get_expected_formatted_dtype(from_batch, to_space)
actual_dtypes = get_batch_dtype(to_batch)
assert expected_dtypes == actual_dtypes, \
("\nexpected_dtypes: %s,\n"
"actual_dtypes: %s \n"
"from_space: %s\n"
"from_batch's dtype: %s\n"
"from_batch is theano?: %s\n"
"to_space: %s" % (expected_dtypes,
actual_dtypes,
from_space,
get_batch_dtype(from_batch),
is_symbolic_batch(from_batch),
to_space))
#
#
# End of test_format() function.
def test_dtype_getter(space):
"""
Tests the getter method of space's dtype property.
"""
def assert_composite_dtype_eq(space, dtype):
"""
Asserts that dtype is a nested tuple with exactly the same tree
structure as space, and that the dtypes of space's components and
their corresponding elements in <dtype> are equal.
"""
assert (isinstance(space, CompositeSpace) ==
isinstance(dtype, tuple))
if isinstance(space, CompositeSpace):
for s, d in safe_zip(space.components, dtype):
assert_composite_dtype_eq(s, d)
else:
assert space.dtype == dtype
if isinstance(space, SimplyTypedSpace):
assert space.dtype == space._dtype
elif isinstance(space, NullSpace):
assert space.dtype == "NullSpace's dtype"
elif isinstance(space, CompositeSpace):
assert_composite_dtype_eq(space, space.dtype)
def test_dtype_setter(space, dtype):
"""
Tests the setter method of space's dtype property.
"""
def get_expected_error(space, dtype):
"""
If calling space.dtype = dtype is expected to throw an exception,
this returns (exception_class, exception_message).
If no exception is to be expected, this returns (None, None).
"""
if isinstance(space, CompositeSpace):
if isinstance(dtype, tuple):
if len(space.components) != len(dtype):
return ValueError, "Argument 0 has length "
for s, d in safe_zip(space.components, dtype):
error, message = get_expected_error(s, d)
if error is not None:
return error, message
else:
for s in space.components:
error, message = get_expected_error(s, dtype)
if error is not None:
return error, message
return None, None
if isinstance(space, SimplyTypedSpace):
if not any((dtype is None,
dtype == 'floatX',
dtype in all_scalar_dtypes)):
return (TypeError,
'Unrecognized value "%s" (type %s) for dtype arg' %
(dtype, type(dtype)))
return None, None
if isinstance(space, NullSpace):
nullspace_dtype = NullSpace().dtype
if dtype != nullspace_dtype:
return (TypeError,
'NullSpace can only take the bogus dtype "%s"' %
nullspace_dtype)
return None, None
raise NotImplementedError("%s not yet supported by this test" %
type(space))
def assert_dtype_equiv(space, dtype):
"""
Asserts that space.dtype and dtype are equivalent.
"""
if isinstance(space, CompositeSpace):
if isinstance(dtype, tuple):
for s, d in safe_zip(space.components, dtype):
assert_dtype_equiv(s, d)
else:
for s in space.components:
assert_dtype_equiv(s, dtype)
else:
assert not isinstance(dtype, tuple)
if dtype == 'floatX':
dtype = theano.config.floatX
assert space.dtype == dtype, ("%s not equal to %s" %
(space.dtype, dtype))
expected_error, expected_message = get_expected_error(space, dtype)
if expected_error is not None:
try:
space.dtype = dtype
except expected_error, ex:
assert expected_message in str(ex)
except Exception:
print "Expected exception of type %s, got %s instead." % \
(expected_error.__name__, type(ex))
raise ex
return
else:
space.dtype = dtype
assert_dtype_equiv(space, dtype)
#
#
# End of test_dtype_setter() function
shape = np.array([2, 3, 4], dtype='int')
assert len(shape) == 3 # This test depends on this being true
dtypes = ('floatX', None) + all_scalar_dtypes
#
# spaces with the same number of elements
#
vector_spaces = tuple(VectorSpace(dim=shape.prod(), dtype=dt, sparse=s)
for dt in dtypes for s in (True, False))
conv2d_spaces = tuple(Conv2DSpace(shape=shape[:2],
dtype=dt,
num_channels=shape[2])
for dt in dtypes)
# no need to make CompositeSpaces with components spanning all possible
# dtypes. Just try 2 dtype combos. No need to try different sparsities
# either. That will be tested by the non-composite space conversions.
n_dtypes = 2
old_nchannels = shape[2]
shape[2] = old_nchannels / 2
assert shape[2] * 2 == old_nchannels, \
("test code is broken: # of channels should start as an even "
"number, not %d." % old_nchannels)
def make_composite_space(dtype0, dtype1, use_conv2d):
if use_conv2d:
second_space = Conv2DSpace(shape=shape[:2],
dtype=dtype1,
num_channels=shape[2])
else:
second_space = VectorSpace(dim=np.prod(shape),
dtype=dtype1)
return CompositeSpace((VectorSpace(dim=shape.prod(), dtype=dtype0),
second_space))
composite_spaces = tuple(make_composite_space(dtype0, dtype1, use_conv2d)
for dtype0, dtype1 in zip(dtypes[:n_dtypes],
dtypes[-n_dtypes:])
for use_conv2d in [True, False])
del n_dtypes
# A few composite dtypes to try throwing at CompositeSpace's batch-making
# methods.
composite_dtypes = ((None, 'int8'),
('complex128', theano.config.floatX))
# Tests CompositeSpace's batch-making methods and dtype setter
# with composite dtypes
for from_space in composite_spaces:
for to_dtype in composite_dtypes:
test_get_origin_batch(from_space, to_dtype)
test_make_shared_batch(from_space, to_dtype)
test_make_theano_batch(from_space, to_dtype)
test_dtype_setter(from_space, to_dtype)
all_spaces = vector_spaces + conv2d_spaces + composite_spaces
for from_space in all_spaces:
test_dtype_getter(from_space)
# Tests batch-making and dtype setting methods with non-composite
# dtypes.
for to_dtype in dtypes:
test_get_origin_batch(from_space, to_dtype)
test_make_shared_batch(from_space, to_dtype)
test_make_theano_batch(from_space, to_dtype)
test_dtype_setter(from_space, to_dtype)
# Tests _format_as
for to_space in all_spaces:
for is_numeric in (True, False):
test_format(from_space, to_space, is_numeric)
|
|
import operator
import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import Categorical, DataFrame, Series, date_range
from pandas.tests.arrays.categorical.common import TestCategorical
import pandas.util.testing as tm
class TestCategoricalOpsWithFactor(TestCategorical):
def test_categories_none_comparisons(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
tm.assert_categorical_equal(factor, self.factor)
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
tm.assert_categorical_equal(result, expected)
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
tm.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
tm.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = Categorical(
["a", "b", "c"], categories=["c", "b", "a"], ordered=True)
cat_rev_base = Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = Categorical(["a", "b", "c"], ordered=True)
cat_base = Categorical(
["b", "b", "b"], categories=cat.categories, ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
tm.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
with pytest.raises(TypeError):
cat > cat_rev
cat_rev_base2 = Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
with pytest.raises(TypeError):
cat_rev > cat_rev_base2
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
assert not (cat > cat).any()
with pytest.raises(TypeError):
cat > cat_unorderd
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
msg = ("Cannot compare a Categorical for op __gt__ with type"
r" <class 'numpy\.ndarray'>")
with pytest.raises(TypeError, match=msg):
cat > s
with pytest.raises(TypeError, match=msg):
cat_rev > s
with pytest.raises(TypeError, match=msg):
s < cat
with pytest.raises(TypeError, match=msg):
s < cat_rev
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
with pytest.raises(TypeError, match=msg):
cat > a
with pytest.raises(TypeError, match=msg):
cat_rev > a
# Make sure that unequal comparison take the categories order in
# account
cat_rev = Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
tm.assert_numpy_array_equal(res, exp)
# check that zero-dim array gets unboxed
res = cat_rev > np.array("b")
tm.assert_numpy_array_equal(res, exp)
class TestCategoricalOps:
def test_compare_frame(self):
# GH#24282 check that Categorical.__cmp__(DataFrame) defers to frame
data = ["a", "b", 2, "a"]
cat = Categorical(data)
df = DataFrame(cat)
for op in [operator.eq, operator.ne, operator.ge,
operator.gt, operator.le, operator.lt]:
with pytest.raises(ValueError):
# alignment raises unless we transpose
op(cat, df)
result = cat == df.T
expected = DataFrame([[True, True, True, True]])
tm.assert_frame_equal(result, expected)
result = cat[::-1] != df.T
expected = DataFrame([[False, True, True, False]])
tm.assert_frame_equal(result, expected)
def test_datetime_categorical_comparison(self):
dt_cat = Categorical(date_range('2014-01-01', periods=3), ordered=True)
tm.assert_numpy_array_equal(dt_cat > dt_cat[0],
np.array([False, True, True]))
tm.assert_numpy_array_equal(dt_cat[0] < dt_cat,
np.array([False, True, True]))
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = Categorical([1, 2, 3], ordered=True)
tm.assert_numpy_array_equal(cat > cat[0],
np.array([False, True, True]))
tm.assert_numpy_array_equal(cat[0] < cat,
np.array([False, True, True]))
def test_comparison_with_unknown_scalars(self):
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Categorical([1, 2, 3], ordered=True)
msg = ("Cannot compare a Categorical for op __{}__ with a scalar,"
" which is not a category")
with pytest.raises(TypeError, match=msg.format('lt')):
cat < 4
with pytest.raises(TypeError, match=msg.format('gt')):
cat > 4
with pytest.raises(TypeError, match=msg.format('gt')):
4 < cat
with pytest.raises(TypeError, match=msg.format('lt')):
4 > cat
tm.assert_numpy_array_equal(cat == 4,
np.array([False, False, False]))
tm.assert_numpy_array_equal(cat != 4,
np.array([True, True, True]))
def test_comparison_of_ordered_categorical_with_nan_to_scalar(
self, compare_operators_no_eq_ne):
# https://github.com/pandas-dev/pandas/issues/26504
# BUG: fix ordered categorical comparison with missing values (#26504 )
# and following comparisons with scalars in categories with missing
# values should be evaluated as False
cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)
scalar = 2
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
expected = getattr(np.array(cat),
compare_operators_no_eq_ne)(scalar)
actual = getattr(cat, compare_operators_no_eq_ne)(scalar)
tm.assert_numpy_array_equal(actual, expected)
def test_comparison_of_ordered_categorical_with_nan_to_listlike(
self, compare_operators_no_eq_ne):
# https://github.com/pandas-dev/pandas/issues/26504
# and following comparisons of missing values in ordered Categorical
# with listlike should be evaluated as False
cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)
other = Categorical([2, 2, 2, 2], categories=[1, 2, 3], ordered=True)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
expected = getattr(np.array(cat), compare_operators_no_eq_ne)(2)
actual = getattr(cat, compare_operators_no_eq_ne)(other)
tm.assert_numpy_array_equal(actual, expected)
@pytest.mark.parametrize('data,reverse,base', [
(list("abc"), list("cba"), list("bbb")),
([1, 2, 3], [3, 2, 1], [2, 2, 2])]
)
def test_comparisons(self, data, reverse, base):
cat_rev = Series(
Categorical(data, categories=reverse, ordered=True))
cat_rev_base = Series(
Categorical(base, categories=reverse, ordered=True))
cat = Series(Categorical(data, ordered=True))
cat_base = Series(
Categorical(base, categories=cat.cat.categories, ordered=True))
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
with pytest.raises(TypeError):
cat > cat_rev
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
msg = ("Cannot compare a Categorical for op __gt__ with type"
r" <class 'numpy\.ndarray'>")
with pytest.raises(TypeError, match=msg):
cat > s
with pytest.raises(TypeError, match=msg):
cat_rev > s
with pytest.raises(TypeError, match=msg):
cat > a
with pytest.raises(TypeError, match=msg):
cat_rev > a
with pytest.raises(TypeError, match=msg):
s < cat
with pytest.raises(TypeError, match=msg):
s < cat_rev
with pytest.raises(TypeError, match=msg):
a < cat
with pytest.raises(TypeError, match=msg):
a < cat_rev
@pytest.mark.parametrize('ctor', [
lambda *args, **kwargs: Categorical(*args, **kwargs),
lambda *args, **kwargs: Series(Categorical(*args, **kwargs)),
])
def test_unordered_different_order_equal(self, ctor):
# https://github.com/pandas-dev/pandas/issues/16014
c1 = ctor(['a', 'b'], categories=['a', 'b'], ordered=False)
c2 = ctor(['a', 'b'], categories=['b', 'a'], ordered=False)
assert (c1 == c2).all()
c1 = ctor(['a', 'b'], categories=['a', 'b'], ordered=False)
c2 = ctor(['b', 'a'], categories=['b', 'a'], ordered=False)
assert (c1 != c2).all()
c1 = ctor(['a', 'a'], categories=['a', 'b'], ordered=False)
c2 = ctor(['b', 'b'], categories=['b', 'a'], ordered=False)
assert (c1 != c2).all()
c1 = ctor(['a', 'a'], categories=['a', 'b'], ordered=False)
c2 = ctor(['a', 'b'], categories=['b', 'a'], ordered=False)
result = c1 == c2
tm.assert_numpy_array_equal(np.array(result), np.array([True, False]))
def test_unordered_different_categories_raises(self):
c1 = Categorical(['a', 'b'], categories=['a', 'b'], ordered=False)
c2 = Categorical(['a', 'c'], categories=['c', 'a'], ordered=False)
with pytest.raises(TypeError, match=("Categoricals can "
"only be compared")):
c1 == c2
def test_compare_different_lengths(self):
c1 = Categorical([], categories=['a', 'b'])
c2 = Categorical([], categories=['a'])
msg = "Categories are different lengths"
with pytest.raises(TypeError, match=msg):
c1 == c2
def test_compare_unordered_different_order(self):
# https://github.com/pandas-dev/pandas/issues/16603#issuecomment-
# 349290078
a = pd.Categorical(['a'], categories=['a', 'b'])
b = pd.Categorical(['b'], categories=['b', 'a'])
assert not a.equals(b)
def test_numeric_like_ops(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
# numeric ops should not succeed
for op, str_rep in [('__add__', r'\+'),
('__sub__', '-'),
('__mul__', r'\*'),
('__truediv__', '/')]:
msg = r"Series cannot perform the operation {}".format(str_rep)
with pytest.raises(TypeError, match=msg):
getattr(df, op)(df)
# reduction ops should not succeed (unless specifically defined, e.g.
# min/max)
s = df['value_group']
for op in ['kurt', 'skew', 'var', 'std', 'mean', 'sum', 'median']:
msg = "Categorical cannot perform the operation {}".format(op)
with pytest.raises(TypeError, match=msg):
getattr(s, op)(numeric_only=False)
# mad technically works because it takes always the numeric data
# numpy ops
s = Series(Categorical([1, 2, 3, 4]))
with pytest.raises(TypeError):
np.sum(s)
# numeric ops on a Series
for op, str_rep in [('__add__', r'\+'),
('__sub__', '-'),
('__mul__', r'\*'),
('__truediv__', '/')]:
msg = r"Series cannot perform the operation {}".format(str_rep)
with pytest.raises(TypeError, match=msg):
getattr(s, op)(2)
# invalid ufunc
with pytest.raises(TypeError):
np.log(s)
def test_contains(self):
# GH21508
c = pd.Categorical(list('aabbca'), categories=list('cab'))
assert 'b' in c
assert 'z' not in c
assert np.nan not in c
with pytest.raises(TypeError):
assert [1] in c
# assert codes NOT in index
assert 0 not in c
assert 1 not in c
c = pd.Categorical(list('aabbca') + [np.nan], categories=list('cab'))
assert np.nan in c
|
|
#!/usr/bin/env python
"""Configuration parameters for the client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_core.lib import config_lib
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
# General Client options.
config_lib.DEFINE_string(
"Client.name", "GRR", "The name of the client. This will be used as a base "
"name to generate many other default parameters such "
"as binary names and service names. Note that on "
"Linux we lowercase the name to confirm with most "
"linux naming conventions.")
config_lib.DEFINE_string("Client.binary_name", "%(Client.name)",
"The name of the client binary.")
config_lib.DEFINE_list("Client.labels", [], "Labels for this client.")
config_lib.DEFINE_string("Client.company_name", "GRR Project",
"The name of the company which made the client.")
config_lib.DEFINE_string("Client.description", "%(name) %(platform) %(arch)",
"A description of this specific client build.")
config_lib.DEFINE_string("Client.platform", "windows",
"The platform we are running on.")
config_lib.DEFINE_string("Client.arch", "amd64",
"The architecture we are running on.")
config_lib.DEFINE_string("Client.build_time", "Unknown",
"The time the client was built.")
config_lib.DEFINE_string("Client.deploy_time", "Unknown",
"The time the client was deployed.")
config_lib.DEFINE_string(
"Client.build_environment", None,
"The output of Uname.FromCurrentSystem.signature() "
"on the system the client was built on.")
config_lib.DEFINE_integer("Client.rsa_key_length", 2048,
"The key length of the client keys in bits.")
config_lib.DEFINE_string(
name="Client.install_path",
default=r"%(SystemRoot|env)\\System32\\%(name)\\%(Template.version_string)",
help="Where the client binaries are installed.")
config_lib.DEFINE_list(
name="Client.server_urls", default=[], help="Base URL for client control.")
config_lib.DEFINE_list(
"Client.control_urls", [],
"DEPRECATED List of URLs of the controlling server. "
"Use server_urls instead.")
config_lib.DEFINE_integer("Client.http_timeout", 100,
"Timeout for HTTP requests.")
config_lib.DEFINE_string("Client.plist_path",
"/Library/LaunchDaemons/%(Client.plist_filename)",
"Location of our launchctl plist.")
config_lib.DEFINE_string("Client.plist_filename", "%(Client.plist_label).plist",
"Filename of launchctl plist.")
config_lib.DEFINE_string(
"Client.plist_label",
"%(Client.plist_label_prefix).google.code.%(Client.name)",
"Identifier label for launchd")
config_lib.DEFINE_string("Client.plist_label_prefix", "com",
"Domain for launchd label.")
config_lib.DEFINE_float("Client.poll_min", 0.2,
"Minimum time between polls in seconds.")
config_lib.DEFINE_float("Client.poll_max", 600,
"Maximum time between polls in seconds.")
config_lib.DEFINE_float(
"Client.error_poll_min", 60,
"Minimum time between polls in seconds if the server "
"reported an error.")
config_lib.DEFINE_list(
name="Client.proxy_servers",
help="List of valid proxy servers the client should try.",
default=[])
config_lib.DEFINE_integer("Client.max_post_size", 40000000,
"Maximum size of the post.")
config_lib.DEFINE_integer("Client.max_out_queue", 51200000,
"Maximum size of the output queue.")
config_lib.DEFINE_integer(
"Client.foreman_check_frequency", 1800,
"The minimum number of seconds before checking with "
"the foreman for new work.")
config_lib.DEFINE_float(
"Client.rss_max", 1000, "Maximum memory footprint in MB (soft limit). "
"Exceeding this will result in an orderly shutdown.")
config_lib.DEFINE_float(
"Client.rss_max_hard", 2000, "Maximum memory footprint in MB (hard limit). "
"Exceeding this will result in aborting the current "
"client action and restarting.")
config_lib.DEFINE_string(
name="Client.tempfile_prefix",
help="Prefix to use for temp files created by the GRR client.",
default="tmp%(Client.name)")
config_lib.DEFINE_list(
name="Client.tempdir_roots",
help="List of temporary directories to use on the client.",
default=["/var/tmp/"])
config_lib.DEFINE_string(
name="Client.grr_tempdir",
help="Default subdirectory in the temp directory to use for GRR.",
default="%(Client.name)")
config_lib.DEFINE_list(
name="Client.vfs_virtualroots",
help=("If this is set for a VFS type, client VFS operations will always be"
" relative to the given root. Format is os:/mount/disk."),
default=[])
# Windows client specific options.
config_lib.DEFINE_string(
"Client.config_hive",
"HKEY_LOCAL_MACHINE",
help="The registry hive where the client "
"configuration will be stored.")
config_lib.DEFINE_string(
"Client.config_key",
r"Software\\%(Client.name)",
help="The registry key where client configuration "
"will be stored.")
# Client Cryptographic options. Here we define defaults for key values.
config_lib.DEFINE_semantic_value(
rdf_crypto.RSAPrivateKey,
"Client.private_key",
help="Client private key in pem format. If not provided this "
"will be generated by the enrollment process.",
)
config_lib.DEFINE_semantic_value(
rdf_crypto.RDFX509Cert,
"CA.certificate",
help="Trusted CA certificate in X509 pem format",
)
config_lib.DEFINE_semantic_value(
rdf_crypto.RSAPublicKey,
"Client.executable_signing_public_key",
help="public key for verifying executable signing.")
config_lib.DEFINE_semantic_value(
rdf_crypto.RSAPrivateKey,
"PrivateKeys.executable_signing_private_key",
help="Private keys for signing executables. NOTE: This "
"key is usually kept offline and is thus not present in the "
"configuration file.")
config_lib.DEFINE_integer("Client.server_serial_number", 0,
"Minimal serial number we accept for server cert.")
config_lib.DEFINE_integer(
"Client.gc_frequency", 10,
"Defines how often the client calls garbage collection (seconds).")
# The following configuration options are defined here but are used in
# the windows nanny code (grr/client/nanny/windows_nanny.h).
config_lib.DEFINE_string(
"Nanny.child_binary",
default=r"%(Client.install_path)\\%(Client.binary_name)",
help="The location to the client binary.")
config_lib.DEFINE_string(
"Nanny.child_command_line",
default=(r"%(child_binary) "
r'--config "%(Client.install_path)\\%(Client.binary_name).yaml"'),
help="The command line to launch the client binary.")
config_lib.DEFINE_string("Client.transaction_log_file",
"%(Logging.path)/transaction.log",
"The file where we write the nanny transaction log.")
config_lib.DEFINE_string(
"Nanny.service_name",
"%(Client.name) Monitor",
help="The name of the nanny.")
config_lib.DEFINE_string(
"Nanny.service_description",
"%(Client.name) Monitor Service",
help="The description of the nanny service.")
config_lib.DEFINE_string("Nanny.statusfile", "/var/run/nanny.status",
"The file where we write the nanny status.")
config_lib.DEFINE_string(
"Nanny.binary",
r"%(Client.install_path)\\%(service_binary_name)",
help="The full location to the nanny binary.")
config_lib.DEFINE_string(
"Nanny.service_binary_name",
"%(Client.name)service.exe",
help="The executable name of the nanny binary.")
config_lib.DEFINE_integer(
"Nanny.unresponsive_kill_period", 60,
"The time in seconds after which the nanny kills us.")
config_lib.DEFINE_integer(
"Network.api", 3, "The version of the network protocol the client "
"uses.")
# Installer options.
config_lib.DEFINE_string(
name="Installer.logfile",
default="%(Logging.path)/%(Client.name)_installer.txt",
help=("A specific log file which is used for logging the "
"installation process."))
config_lib.DEFINE_string(
"Client.fleetspeak_unsigned_services_regkey",
"HKEY_LOCAL_MACHINE\\Software\\FleetspeakClient\\textservices",
"Registry key (on Windows) where Fleetspeak expects services "
"to write their unsigned configs to.")
config_lib.DEFINE_string(
"Client.fleetspeak_unsigned_config_fname",
"%(Client.name)_fleetspeak_service_config.txt",
"File-name for the Fleetspeak service config generated "
"when repacking templates.")
# osquery options.
config_lib.DEFINE_string(
"Osquery.path", default="", help="A path to the osquery executable.")
config_lib.DEFINE_integer(
"Osquery.max_chunk_size",
default=1024 * 1024, # 1 MiB.
help=("A size (in bytes) of maximum response size. Queries for which the "
"output exceedes the specified limit are going to be divided into "
"multiple responses."))
|
|
#!/usr/bin/env python
import glob
import logging
import os
import re
import simplejson as json
import struct
import threading
import couchstore
import couchbaseConstants
import pump
from cbcollections import defaultdict
from cbqueue import PumpQueue
SFD_SCHEME = "couchstore-files://"
SFD_VBUCKETS = 1024
SFD_REV_META = ">QIIBB" # cas, exp, flg
SFD_REV_SEQ = ">Q"
SFD_DB_SEQ = ">Q"
SFD_RE = "^([0-9]+)\\.couch\\.([0-9]+)$"
# TODO: (1) SFDSource - total_msgs.
# TODO: (1) SFDSink - ensure right user for bucket_dir.
# TODO: (1) SFDSink - ensure right user for couchstore file.
class SFDSource(pump.Source):
"""Reads couchstore files from a couchbase server data directory."""
def __init__(self, opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur):
super(SFDSource, self).__init__(opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur)
self.done = False
self.queue = None
@staticmethod
def can_handle(opts, spec):
return spec.startswith(SFD_SCHEME)
@staticmethod
def check(opts, spec):
rv, d = data_dir(spec)
if rv != 0:
return rv
buckets = []
for bucket_dir in sorted(glob.glob(d + "/*/")):
if not glob.glob(bucket_dir + "/*.couch.*"):
continue
bucket_name = os.path.basename(os.path.dirname(bucket_dir))
if not bucket_name:
return "error: bucket_name too short: " + bucket_dir, None
rv, v = SFDSource.vbucket_states(opts, spec, bucket_dir)
if rv != 0:
return rv, None
buckets.append({'name': bucket_name,
'nodes': [{'hostname': 'N/A',
'vbucket_states': v}]})
if not buckets:
return "error: no bucket subdirectories at: " + d, None
return 0, {'spec': spec, 'buckets': buckets}
@staticmethod
def vbucket_states(opts, spec, bucket_dir):
"""Reads all the latest couchstore files in a directory, and returns
map of state string (e.g., 'active') to map of vbucket_id to doc."""
vbucket_states = defaultdict(dict)
for f in latest_couch_files(bucket_dir):
vbucket_id = int(re.match(SFD_RE, os.path.basename(f)).group(1))
try:
store = couchstore.CouchStore(f, 'r')
try:
doc_str = store.localDocs['_local/vbstate']
if doc_str:
doc = json.loads(doc_str)
state = doc.get('state', None)
if state:
vbucket_states[state][vbucket_id] = doc
else:
return "error: missing vbucket_state from: %s" \
% (f), None
except Exception, e:
return ("error: could not read _local/vbstate from: %s" +
"; exception: %s") % (f, e), None
store.close()
except Exception, e:
return ("error: could not read couchstore file: %s" +
"; exception: %s") % (f, e), None
if vbucket_states:
return 0, vbucket_states
return "error: no vbucket_states in files: %s" % (bucket_dir), None
@staticmethod
def provide_design(opts, source_spec, source_bucket, source_map):
rv, d = data_dir(source_spec)
if rv != 0:
return rv, None
bucket_dir = d + '/' + source_bucket['name']
if not os.path.isdir(bucket_dir):
return 0, None
rv, store, store_path = \
open_latest_store(bucket_dir,
"master.couch.*",
"^(master)\\.couch\\.([0-9]+)$",
"master.couch.0",
mode='r')
if rv != 0 or not store:
return rv, None
rows = []
for doc_info in store.changesSince(0):
if not doc_info.deleted:
try:
doc_contents = doc_info.getContents(options=couchstore.CouchStore.DECOMPRESS)
except Exception, e:
return ("error: could not read design doc: %s" +
"; source_spec: %s; exception: %s") % \
(doc_info.id, source_spec, e), None
try:
doc = json.loads(doc_contents)
except ValueError, e:
return ("error: could not parse design doc: %s" +
"; source_spec: %s; exception: %s") % \
(doc_info.id, source_spec, e), None
doc['id'] = doc.get('id', doc_info.id)
doc['_rev'] = doc.get('_rev', doc_info.revSequence)
rows.append({'id': doc_info.id, 'doc': doc})
store.close()
return 0, json.dumps(rows)
def provide_batch(self):
if self.done:
return 0, None
if not self.queue:
name = "c" + threading.currentThread().getName()[1:]
self.queue = PumpQueue(2)
self.thread = threading.Thread(target=self.loader, name=name)
self.thread.daemon = True
self.thread.start()
rv, batch = self.queue.get()
self.queue.task_done()
if rv != 0 or batch is None:
self.done = True
return rv, batch
def loader(self):
rv, d = data_dir(self.spec)
if rv != 0:
self.queue.put((rv, None))
return
source_vbucket_state = \
getattr(self.opts, 'source_vbucket_state', 'active')
source_nodes = self.source_bucket['nodes']
if len(source_nodes) != 1:
self.queue.put(("error: expected 1 node in source_bucket: %s"
% (self.source_bucket['name']), None))
return
vbucket_states = source_nodes[0].get('vbucket_states', None)
if not vbucket_states:
self.queue.put(("error: missing vbucket_states in source_bucket: %s"
% (self.source_bucket['name']), None))
return
vbuckets = vbucket_states.get(source_vbucket_state, None)
if vbuckets is None: # Empty dict is valid.
self.queue.put(("error: missing vbuckets in source_bucket: %s"
% (self.source_bucket['name']), None))
return
batch_max_size = self.opts.extra['batch_max_size']
batch_max_bytes = self.opts.extra['batch_max_bytes']
store = None
vbucket_id = None
# Level of indirection since we can't use python 3 nonlocal statement.
abatch = [pump.Batch(self)]
def change_callback(doc_info):
if doc_info:
key = doc_info.id
if self.skip(key, vbucket_id):
return
if doc_info.deleted:
cmd = couchbaseConstants.CMD_TAP_DELETE
val = ''
else:
cmd = couchbaseConstants.CMD_TAP_MUTATION
val = doc_info.getContents(options=couchstore.CouchStore.DECOMPRESS)
cas, exp, flg, flex_meta, dtype = struct.unpack(SFD_REV_META, doc_info.revMeta)
meta = struct.pack(SFD_REV_SEQ, doc_info.revSequence)
seqno = struct.pack(SFD_DB_SEQ, doc_info.sequence)
nmeta = 0
msg = (cmd, vbucket_id, key, flg, exp, cas, meta, val, seqno, dtype, nmeta)
abatch[0].append(msg, len(val))
if (abatch[0].size() >= batch_max_size or
abatch[0].bytes >= batch_max_bytes):
self.queue.put((0, abatch[0]))
abatch[0] = pump.Batch(self)
for f in latest_couch_files(d + '/' + self.source_bucket['name']):
vbucket_id = int(re.match(SFD_RE, os.path.basename(f)).group(1))
if not vbucket_id in vbuckets:
continue
try:
store = couchstore.CouchStore(f, 'r')
except Exception, e:
self.queue.put(("error: could not open couchstore file: %s"
"; exception: %s" % (f, e), None))
return
store.forEachChange(0, change_callback)
store.close()
if abatch[0].size():
self.queue.put((0, abatch[0]))
self.queue.put((0, None))
class SFDSink(pump.Sink):
"""Sink for couchstore in couchbase server/file/directory layout."""
def __init__(self, opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur):
super(SFDSink, self).__init__(opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur)
self.rehash = opts.extra.get("rehash", 0)
self.init_worker(SFDSink.run)
@staticmethod
def run(self):
destination_vbucket_state = \
getattr(self.opts, 'destination_vbucket_state', 'active')
vbucket_states = self.source_node.get('vbucket_states', {})
while not self.ctl['stop']:
batch, future = self.pull_next_batch()
if not batch:
return self.future_done(future, 0)
vbuckets = batch.group_by_vbucket_id(SFD_VBUCKETS, self.rehash)
for vbucket_id, msgs in vbuckets.iteritems():
checkpoint_id = 0
max_deleted_seqno = 0
rv, store, store_path = self.open_store(vbucket_id)
if rv != 0:
return self.future_done(future, rv)
bulk_keys = []
bulk_vals = []
for i, msg in enumerate(msgs):
cmd, _vbucket_id, key, flg, exp, cas, meta, val, seqno, dtype, nmeta = msg
if self.skip(key, vbucket_id):
continue
d = couchstore.DocumentInfo(str(key))
flex_meta = 1
d.revMeta = str(struct.pack(SFD_REV_META, cas, exp, flg, flex_meta, dtype))
if meta:
if len(meta) > 8:
meta = meta[0:8]
if len(meta) < 8:
meta = ('\x00\x00\x00\x00\x00\x00\x00\x00' + meta)[-8:]
d.revSequence, = struct.unpack(SFD_REV_SEQ, meta)
else:
d.revSequence = 1
if seqno:
d.sequence = int(seqno)
if cmd == couchbaseConstants.CMD_TAP_MUTATION:
v = str(val)
try:
if (re.match('^\\s*{', v) and
json.loads(v) is not None):
d.contentType = couchstore.DocumentInfo.IS_JSON
except ValueError:
pass # NON_JSON is already the default contentType.
elif cmd == couchbaseConstants.CMD_TAP_DELETE:
v = None
else:
self.future_done(future,
"error: SFDSink bad cmd: " + str(cmd))
store.close()
return
bulk_keys.append(d)
bulk_vals.append(v)
try:
if bulk_keys and bulk_vals:
vm = vbucket_states.get(destination_vbucket_state, None)
if vm:
vi = vm.get(vbucket_id, None)
if vi:
c = int(vi.get("checkpoint_id", checkpoint_id))
checkpoint_id = max(checkpoint_id, c)
m = int(vi.get("max_deleted_seqno", max_deleted_seqno))
max_deleted_seqno = max(max_deleted_seqno, m)
rv = self.save_vbucket_state(store, vbucket_id,
destination_vbucket_state,
checkpoint_id,
max_deleted_seqno)
if rv != 0:
self.future_done(future, rv)
store.close()
return
store.saveMultiple(bulk_keys, bulk_vals,
options=couchstore.CouchStore.COMPRESS)
store.commit()
store.close()
except Exception, e:
self.future_done(future,
"error: could not save couchstore data"
"; vbucket_id: %s; store_path: %s"
"; exception: %s"
% (vbucket_id, store_path, e))
return
self.future_done(future, 0) # No return to keep looping.
def save_vbucket_state(self, store, vbucket_id,
state, checkpoint_id, max_deleted_seqno):
doc = json.dumps({'state': state,
'checkpoint_id': str(checkpoint_id),
'max_deleted_seqno': str(max_deleted_seqno)})
try:
store.localDocs['_local/vbstate'] = doc
except Exception, e:
return "error: save_vbucket_state() failed: " + str(e)
return 0
@staticmethod
def can_handle(opts, spec):
return spec.startswith(SFD_SCHEME)
@staticmethod
def check_base(opts, spec):
if getattr(opts, "destination_operation", None) != None:
return ("error: --destination-operation" +
" is not supported by this destination: %s") % (spec)
# Skip immediate superclass Sink.check_base(),
# since SFDSink can handle different vbucket states.
return pump.EndPoint.check_base(opts, spec)
@staticmethod
def check(opts, spec, source_map):
# TODO: (2) SFDSink - check disk space.
rv, dir = data_dir(spec)
if rv != 0:
return rv
if not os.path.isdir(dir):
return "error: not a directory: " + dir, None
if not os.access(dir, os.W_OK):
return "error: directory is not writable: " + dir, None
return 0, None
@staticmethod
def consume_design(opts, sink_spec, sink_map,
source_bucket, source_map, source_design):
if not source_design:
return 0
try:
sd = json.loads(source_design)
except ValueError, e:
return "error: could not parse source_design: " + source_design
rv, d = data_dir(sink_spec)
if rv != 0:
return rv
bucket_dir = d + '/' + source_bucket['name']
if not os.path.isdir(bucket_dir):
os.mkdir(bucket_dir)
rv, store, store_path = \
open_latest_store(bucket_dir,
"master.couch.*",
"^(master)\\.couch\\.([0-9]+)$",
"master.couch.1")
if rv != 0:
return rv
bulk_keys = []
bulk_vals = []
if sd:
for row in sd['rows']:
logging.debug("design_doc row: " + str(row))
d = couchstore.DocumentInfo(str(row['id']))
if '_rev' in row['doc']:
d.revMeta = str(row['doc']['_rev'])
del row['doc']['_rev']
d.contentType = couchstore.DocumentInfo.IS_JSON
bulk_keys.append(d)
bulk_vals.append(json.dumps(row['doc']))
if bulk_keys and bulk_vals:
store.saveMultiple(bulk_keys, bulk_vals) # TODO: Compress ddocs?
store.commit()
store.close()
return 0
def consume_batch_async(self, batch):
return self.push_next_batch(batch, pump.SinkBatchFuture(self, batch))
def open_store(self, vbucket_id):
# data_dir => /opt/couchbase/var/lib/couchbase/data/
# bucket_dir => default/
# store_path => VBUCKET_ID.couch.COMPACTION_NUM
if vbucket_id >= SFD_VBUCKETS:
return "error: vbucket_id too large: %s" % (vbucket_id), None, None
rv, bucket_dir = self.find_bucket_dir()
if rv != 0:
return rv, None, None
return open_latest_store(bucket_dir, "%s.couch.*" % (vbucket_id), SFD_RE,
str(vbucket_id) + ".couch.1", mode='c')
def find_bucket_dir(self):
rv, d = data_dir(self.spec)
if rv != 0:
return rv, None
bucket_dir = d + '/' + self.source_bucket['name']
if not os.path.isdir(bucket_dir):
try:
os.mkdir(bucket_dir)
except OSError, e:
return ("error: could not create bucket_dir: %s; exception: %s"
% (bucket_dir, e)), None
return 0, bucket_dir
def open_latest_store(bucket_dir, glob_pattern, filter_re, default_name, mode='c'):
store_paths = latest_couch_files(bucket_dir,
glob_pattern=glob_pattern,
filter_re=filter_re)
if not store_paths:
if mode == 'r':
return 0, None, None
store_paths = [bucket_dir + '/' + default_name]
if len(store_paths) != 1:
return ("error: no single, latest couchstore file: %s" +
"; found: %s") % (glob_pattern, store_paths), None, None
try:
return 0, couchstore.CouchStore(str(store_paths[0]), mode), store_paths[0]
except Exception, e:
return ("error: could not open couchstore file: %s" +
"; exception: %s") % (store_paths[0], e), None, None
def latest_couch_files(bucket_dir, glob_pattern='*.couch.*', filter_re=SFD_RE):
"""Given directory of *.couch.VER files, returns files with largest VER suffixes."""
files = glob.glob(bucket_dir + '/' + glob_pattern)
files = [f for f in files if re.match(filter_re, os.path.basename(f))]
matches = [(re.match(filter_re, os.path.basename(f)), f) for f in files]
latest = {}
for match, file in matches:
top, _ = latest.get(match.group(1), (-1, None))
cur = int(match.group(2))
if cur > top:
latest[match.group(1)] = (cur, file)
return sorted([file for top, file in latest.values()])
def data_dir(spec):
if not spec.startswith(SFD_SCHEME):
return "error: wrong scheme in spec: " + spec, None
dir = spec[len(SFD_SCHEME):]
if dir:
return 0, os.path.normpath(dir)
else:
return "error: missing dir in spec: " + spec, None
|
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements an Ephemeral Value Adjustment Agent.
See https://arxiv.org/abs/1810.08163.
The algorithm queries trajectories from a replay buffer based on similarities
to embedding representations and uses a parametric model to compute values for
counterfactual state-action pairs when integrating across those trajectories.
Finally, a weighted average between the parametric (DQN in this case) and the
non-parametric model is used to compute the policy.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python import rl_agent
from open_spiel.python import simple_nets
from open_spiel.python.algorithms import dqn
# Temporarily disable TF2 behavior until we update the code.
tf.disable_v2_behavior()
MEM_KEY_NAME = "embedding"
ValueBufferElement = collections.namedtuple("ValueElement", "embedding value")
ReplayBufferElement = collections.namedtuple(
"ReplayElement", "embedding info_state action reward next_info_state "
"is_final_step legal_actions_mask")
# TODO(author3) Refactor into data structures lib.
class QueryableFixedSizeRingBuffer(dqn.ReplayBuffer):
"""ReplayBuffer of fixed size with a FIFO replacement policy.
Stored transitions can be sampled uniformly. This extends the DQN replay
buffer by allowing the contents to be fetched by L2 proximity to a query
value.
The underlying datastructure is a ring buffer, allowing 0(1) adding and
sampling.
"""
def knn(self, key, key_name, k, trajectory_len=1):
"""Computes top-k neighbours based on L2 distance.
Args:
key: (np.array) key value to query memory.
key_name: (str) attribute name of key in memory elements.
k: (int) number of neighbours to fetch.
trajectory_len: (int) length of trajectory to fetch from replay buffer.
Returns:
List of tuples (L2 negative distance, BufferElement) sorted in increasing
order by the negative L2 distqances from the key.
"""
distances = [(np.linalg.norm(getattr(sample, key_name) - key, 2,
axis=0), sample) for sample in self._data]
return sorted(distances, key=lambda v: -v[0])[:k]
class EVAAgent(object):
"""Implements a solver for Ephemeral VAlue Adjustment.
See https://arxiv.org/abs/1810.08163.
Define all networks and sampling buffers/memories. Derive losses & learning
steps. Initialize the game state and algorithmic variables.
"""
def __init__(self,
session,
game,
player_id,
state_size,
num_actions,
embedding_network_layers=(128,),
embedding_size=16,
dqn_hidden_layers=(128, 128),
batch_size=16,
trajectory_len=10,
num_neighbours=5,
learning_rate=1e-4,
mixing_parameter=0.9,
memory_capacity=int(1e6),
discount_factor=1.0,
update_target_network_every=1000,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_duration=int(1e4),
embedding_as_parametric_input=False):
"""Initialize the Ephemeral VAlue Adjustment algorithm.
Args:
session: (tf.Session) TensorFlow session.
game: (rl_environment.Environment) Open Spiel game.
player_id: (int) Player id for this player.
state_size: (int) Size of info state vector.
num_actions: (int) number of actions.
embedding_network_layers: (list[int]) Layer sizes of strategy net MLP.
embedding_size: (int) Size of memory embeddings.
dqn_hidden_layers: (list(int)) MLP layer sizes of DQN network.
batch_size: (int) Size of batches for DQN learning steps.
trajectory_len: (int) Length of trajectories from replay buffer.
num_neighbours: (int) Number of neighbours to fetch from replay buffer.
learning_rate: (float) Learning rate.
mixing_parameter: (float) Value mixing parameter between 0 and 1.
memory_capacity: Number af samples that can be stored in memory.
discount_factor: (float) Discount factor for Q-Learning.
update_target_network_every: How often to update DQN target network.
epsilon_start: (float) Starting epsilon-greedy value.
epsilon_end: (float) Final epsilon-greedy value.
epsilon_decay_duration: (float) Number of steps over which epsilon decays.
embedding_as_parametric_input: (bool) Whether we use embeddings as input
to the parametric model.
"""
assert (mixing_parameter >= 0 and mixing_parameter <= 1)
self._game = game
self._session = session
self.player_id = player_id
self._env = game
self._num_actions = num_actions
self._info_state_size = state_size
self._embedding_size = embedding_size
self._lambda = mixing_parameter
self._trajectory_len = trajectory_len
self._num_neighbours = num_neighbours
self._discount = discount_factor
self._epsilon_start = epsilon_start
self._epsilon_end = epsilon_end
self._epsilon_decay_duration = epsilon_decay_duration
self._last_time_step = None
self._last_action = None
self._embedding_as_parametric_input = embedding_as_parametric_input
# Create required TensorFlow placeholders to perform the Q-network updates.
self._info_state_ph = tf.placeholder(
shape=[None, self._info_state_size],
dtype=tf.float32,
name="info_state_ph")
self._embedding_network = simple_nets.MLP(self._info_state_size,
list(embedding_network_layers),
embedding_size)
self._embedding = self._embedding_network(self._info_state_ph)
# The DQN agent requires this be an integer.
if not isinstance(memory_capacity, int):
raise ValueError("Memory capacity not an integer.")
# Initialize the parametric & non-parametric Q-networks.
self._agent = dqn.DQN(
session,
player_id,
state_representation_size=self._info_state_size,
num_actions=self._num_actions,
hidden_layers_sizes=list(dqn_hidden_layers),
replay_buffer_capacity=memory_capacity,
replay_buffer_class=QueryableFixedSizeRingBuffer,
batch_size=batch_size,
learning_rate=learning_rate,
update_target_network_every=update_target_network_every,
learn_every=batch_size,
discount_factor=1.0,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_duration=int(1e6))
# Initialize Value Buffers - Fetch Replay buffers from agents.
self._value_buffer = QueryableFixedSizeRingBuffer(memory_capacity)
self._replay_buffer = self._agent.replay_buffer
# Initialize non-parametric & EVA Q-values.
self._v_np = collections.defaultdict(float)
self._q_np = collections.defaultdict(lambda: [0] * self._num_actions)
self._q_eva = collections.defaultdict(lambda: [0] * self._num_actions)
@property
def env(self):
return self._env
@property
def loss(self):
return self._agent.loss
def _add_transition_value(self, infostate_embedding, value):
"""Adds the embedding and value to the ValueBuffer.
Args:
infostate_embedding: (np.array) embeddig vector.
value: (float) Value associated with state embeding.
"""
transition = ValueBufferElement(embedding=infostate_embedding, value=value)
self._value_buffer.add(transition)
def _add_transition_replay(self, infostate_embedding, time_step):
"""Adds the new transition using `time_step` to the replay buffer.
Adds the transition from `self._prev_timestep` to `time_step` by
`self._prev_action`.
Args:
infostate_embedding: embeddig vector.
time_step: an instance of rl_environment.TimeStep.
"""
prev_timestep = self._last_time_step
assert prev_timestep is not None
legal_actions = (
prev_timestep.observations["legal_actions"][self.player_id])
legal_actions_mask = np.zeros(self._num_actions)
legal_actions_mask[legal_actions] = 1.0
reward = time_step.rewards[self.player_id] if time_step.rewards else 0.0
transition = ReplayBufferElement(
embedding=infostate_embedding,
info_state=(prev_timestep.observations["info_state"][self.player_id]),
action=self._last_action,
reward=reward,
next_info_state=time_step.observations["info_state"][self.player_id],
is_final_step=float(time_step.last()),
legal_actions_mask=legal_actions_mask)
self._replay_buffer.add(transition)
def step(self, time_step, is_evaluation=False):
"""Returns the action to be taken and updates the value functions.
Args:
time_step: an instance of rl_environment.TimeStep.
is_evaluation: bool, whether this is a training or evaluation call.
Returns:
A `rl_agent.StepOutput` containing the action probs and chosen action.
"""
# Act step: don't act at terminal info states.
if not time_step.last():
info_state = time_step.observations["info_state"][self.player_id]
legal_actions = time_step.observations["legal_actions"][self.player_id]
epsilon = self._get_epsilon(self._agent.step_counter, is_evaluation)
# Sample an action from EVA via epsilon greedy policy.
action, probs = self._epsilon_greedy(self._q_eva[tuple(info_state)],
legal_actions, epsilon)
# Update Step: Only with transitions and not when evaluating.
if (not is_evaluation and self._last_time_step is not None):
info_state = self._last_time_step.observations["info_state"][
self.player_id]
legal_actions = self._last_time_step.observations["legal_actions"][
self.player_id]
epsilon = self._get_epsilon(self._agent.step_counter, is_evaluation)
# Get embedding.
infostate_embedding = self._session.run(
self._embedding,
feed_dict={self._info_state_ph: np.expand_dims(info_state,
axis=0)})[0]
neighbours_value = self._value_buffer.knn(infostate_embedding,
MEM_KEY_NAME,
self._num_neighbours, 1)
# collect trace values of knn from L (value buffer) .. Q_np(s_k)
neighbours_replay = self._replay_buffer.knn(infostate_embedding,
MEM_KEY_NAME,
self._num_neighbours,
self._trajectory_len)
# Take a step with the parametric model and get q-values. Use embedding as
# input to the parametric meodel.
# TODO(author6) Recompute embeddings for buffers on learning steps.
if self._embedding_as_parametric_input:
last_time_step_copy = copy.deepcopy(self._last_time_step)
last_time_step_copy.observations["info_state"][
self.player_id] = infostate_embedding
self._agent.step(last_time_step_copy, add_transition_record=False)
else:
self._agent.step(self._last_time_step, add_transition_record=False)
q_values = self._session.run(
self._agent.q_values,
feed_dict={
self._agent.info_state_ph: np.expand_dims(info_state, axis=0)
})[0]
# Update EVA: Q_eva = lambda q_theta(s_t) + (1-lambda) sum(Q_np(s_k, .))/K
for a in legal_actions:
q_theta = q_values[a]
self._q_eva[tuple(info_state)][a] = (
self._lambda * q_theta + (1 - self._lambda) *
sum([elem[1].value
for elem in neighbours_value]) / self._num_neighbours)
# Append (e,s,a,r,s') to Replay Buffer
self._add_transition_replay(infostate_embedding, time_step)
# update Q_np with Traces using TCP
self._trajectory_centric_planning(neighbours_replay)
# Append Q_np(s, a) to Value Buffer
self._add_transition_value(
infostate_embedding, self._q_np[tuple(info_state)][self._last_action])
# Prepare for the next episode.
if time_step.last():
self._last_time_step = None
self._last_action = None
return
self._last_time_step = time_step
self._last_action = action
return rl_agent.StepOutput(action=action, probs=probs)
def _trajectory_centric_planning(self, trajectories):
"""Performs trajectory centric planning.
Uses trajectories from the replay buffer to update the non-parametric values
while supplying counter-factual values with the parametric model.
Args:
trajectories: Current OpenSpiel game state.
"""
# Calculate non-parametric values over the trajectories.
# Iterate backward through trajectories
for t in range(len(trajectories) - 1, 0, -1):
elem = trajectories[t][1]
s_tp1 = tuple(elem.next_info_state)
s_t = tuple(elem.info_state)
a_t = elem.action
r_t = elem.reward
legal_actions = elem.legal_actions_mask
if t < len(trajectories) - 1:
for action in range(len(legal_actions)):
if not legal_actions[action]:
continue
if action == elem.action:
self._q_np[s_t][a_t] = (r_t + self._discount * self._v_np[s_tp1])
else:
q_values_parametric = self._session.run(
self._agent.q_values,
feed_dict={
self._agent.info_state_ph:
np.expand_dims(elem.info_state, axis=0)
})
self._q_np[s_t][a_t] = q_values_parametric[0][action]
# Set V(s_t)
if t == len(trajectories) - 1:
# Sample from the parametric model.
q_values_parametric = self._session.run(
self._agent.q_values,
feed_dict={
self._agent.info_state_ph:
np.expand_dims(elem.info_state, axis=0)
})
self._v_np[s_t] = np.max(q_values_parametric)
else:
self._v_np[s_t] = max(self._q_np[s_t])
def _epsilon_greedy(self, q_values, legal_actions, epsilon):
"""Returns a valid epsilon-greedy action and valid action probs.
Action probabilities are given by a softmax over legal q-values.
Args:
q_values: list of Q-values by action.
legal_actions: list of legal actions at `info_state`.
epsilon: float, probability of taking an exploratory action.
Returns:
A valid epsilon-greedy action and valid action probabilities.
"""
probs = np.zeros(self._num_actions)
q_values = np.array(q_values)
if np.random.rand() < epsilon:
action = np.random.choice(legal_actions)
probs[legal_actions] = 1.0 / len(legal_actions)
else:
legal_q_values = q_values[legal_actions]
action = legal_actions[np.argmax(legal_q_values)]
# Reduce max_q for numerical stability. Result is the same.
max_q = np.max(legal_q_values)
e_x = np.exp(legal_q_values - max_q)
probs[legal_actions] = e_x / e_x.sum(axis=0)
return action, probs
def _get_epsilon(self, step_counter, is_evaluation):
"""Returns the evaluation or decayed epsilon value."""
if is_evaluation:
return 0.0
decay_steps = min(step_counter, self._epsilon_decay_duration)
decayed_epsilon = (
self._epsilon_end + (self._epsilon_start - self._epsilon_end) *
(1 - decay_steps / self._epsilon_decay_duration))
return decayed_epsilon
def action_probabilities(self, state):
"""Returns action probabilites dict for a single batch."""
# TODO(author3, author6): Refactor this to expect pre-normalized form.
if hasattr(state, "information_state_tensor"):
state_rep = tuple(state.information_state_tensor(self.player_id))
elif hasattr(state, "observation_tensor"):
state_rep = tuple(state.observation_tensor(self.player_id))
else:
raise AttributeError("Unable to extract normalized state vector.")
legal_actions = state.legal_actions(self.player_id)
if legal_actions:
_, probs = self._epsilon_greedy(
self._q_eva[state_rep], legal_actions, epsilon=0.0)
return {a: probs[a] for a in range(self._num_actions)}
else:
raise ValueError("Node has no legal actions to take.")
|
|
import logging
import requests
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.db import transaction
from django.urls import reverse as django_reverse
from django.utils.translation import activate
from django.utils.translation import ugettext as _
from mozilla_django_oidc.auth import OIDCAuthenticationBackend
from kitsune.customercare.tasks import update_zendesk_identity
from kitsune.products.models import Product
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.models import Profile
from kitsune.users.utils import add_to_contributors, get_oidc_fxa_setting
log = logging.getLogger("k.users")
class SumoOIDCAuthBackend(OIDCAuthenticationBackend):
def authenticate(self, request, **kwargs):
"""Authenticate a user based on the OIDC code flow."""
# If the request has the /fxa/callback/ path then probably there is a login
# with Firefox Accounts. In this case just return None and let
# the FxA backend handle this request.
if request and not request.path == django_reverse("oidc_authentication_callback"):
return None
return super(SumoOIDCAuthBackend, self).authenticate(request, **kwargs)
class FXAAuthBackend(OIDCAuthenticationBackend):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.refresh_token = None
@staticmethod
def get_settings(attr, *args):
"""Override settings for Firefox Accounts Provider."""
val = get_oidc_fxa_setting(attr)
if val is not None:
return val
return super(FXAAuthBackend, FXAAuthBackend).get_settings(attr, *args)
def get_token(self, payload):
token_info = super().get_token(payload)
self.refresh_token = token_info.get("refresh_token")
return token_info
@classmethod
def refresh_access_token(cls, refresh_token, ttl=None):
"""Gets a new access_token by using a refresh_token.
returns: the actual token or an empty dictionary
"""
if not refresh_token:
return {}
obj = cls()
payload = {
"client_id": obj.OIDC_RP_CLIENT_ID,
"client_secret": obj.OIDC_RP_CLIENT_SECRET,
"grant_type": "refresh_token",
"refresh_token": refresh_token,
}
if ttl:
payload.update({"ttl": ttl})
try:
return obj.get_token(payload=payload)
except requests.exceptions.HTTPError:
return {}
def create_user(self, claims):
"""Override create user method to mark the profile as migrated."""
user = super(FXAAuthBackend, self).create_user(claims)
# Create a user profile for the user and populate it with data from
# Firefox Accounts
profile, created = Profile.objects.get_or_create(user=user)
profile.is_fxa_migrated = True
profile.fxa_uid = claims.get("uid")
profile.fxa_avatar = claims.get("avatar", "")
profile.name = claims.get("displayName", "")
subscriptions = claims.get("subscriptions", [])
# Let's get the first element even if it's an empty string
# A few assertions return a locale of None so we need to default to empty string
fxa_locale = (claims.get("locale", "") or "").split(",")[0]
if fxa_locale in settings.SUMO_LANGUAGES:
profile.locale = fxa_locale
else:
profile.locale = self.request.session.get("login_locale", settings.LANGUAGE_CODE)
activate(profile.locale)
# If there is a refresh token, store it
if self.refresh_token:
profile.fxa_refresh_token = self.refresh_token
profile.save()
# User subscription information
products = Product.objects.filter(codename__in=subscriptions)
profile.products.clear()
profile.products.add(*products)
# This is a new sumo profile, show edit profile message
messages.success(
self.request,
_(
"<strong>Welcome!</strong> You are now logged in using Firefox Accounts. "
+ "{a_profile}Edit your profile.{a_close}<br>"
+ "Already have a different Mozilla Support Account? "
+ "{a_more}Read more.{a_close}"
).format(
a_profile='<a href="' + reverse("users.edit_my_profile") + '" target="_blank">',
a_more='<a href="'
+ reverse("wiki.document", args=["firefox-accounts-mozilla-support-faq"])
+ '" target="_blank">',
a_close="</a>",
),
extra_tags="safe",
)
if self.request.session.get("is_contributor", False):
add_to_contributors(user, profile.locale)
del self.request.session["is_contributor"]
return user
def filter_users_by_claims(self, claims):
"""Match users by FxA uid or email."""
fxa_uid = claims.get("uid")
user_model = get_user_model()
users = user_model.objects.none()
# something went terribly wrong. Return None
if not fxa_uid:
log.warning("Failed to get Firefox Account UID.")
return users
# A existing user is attempting to connect a Firefox Account to the SUMO profile
# NOTE: this section will be dropped when the migration is complete
if self.request and self.request.user and self.request.user.is_authenticated:
return [self.request.user]
users = user_model.objects.filter(profile__fxa_uid=fxa_uid)
if not users:
# We did not match any users so far. Let's call the super method
# which will try to match users based on email
users = super(FXAAuthBackend, self).filter_users_by_claims(claims)
return users
def get_userinfo(self, access_token, id_token, payload):
"""Return user details and subscription information dictionary."""
user_info = super(FXAAuthBackend, self).get_userinfo(access_token, id_token, payload)
if not settings.FXA_OP_SUBSCRIPTION_ENDPOINT:
return user_info
# Fetch subscription information
try:
sub_response = requests.get(
settings.FXA_OP_SUBSCRIPTION_ENDPOINT,
headers={"Authorization": "Bearer {0}".format(access_token)},
verify=self.get_settings("OIDC_VERIFY_SSL", True),
)
sub_response.raise_for_status()
except requests.exceptions.RequestException:
log.error("Failed to fetch subscription status", exc_info=True)
# if something went wrong, just return whatever the profile endpoint holds
return user_info
# This will override whatever the profile endpoint returns
# until https://github.com/mozilla/fxa/issues/2463 is fixed
user_info["subscriptions"] = sub_response.json().get("subscriptions", [])
return user_info
def update_user(self, user, claims):
"""Update existing user with new claims, if necessary save, and return user"""
profile = user.profile
fxa_uid = claims.get("uid")
email = claims.get("email")
user_attr_changed = False
# Check if the user has active subscriptions
subscriptions = claims.get("subscriptions", [])
if not profile.is_fxa_migrated:
# Check if there is already a Firefox Account with this ID
if Profile.objects.filter(fxa_uid=fxa_uid).exists():
msg = _("This Firefox Account is already used in another profile.")
messages.error(self.request, msg)
return None
# If it's not migrated, we can assume that there isn't an FxA id too
profile.is_fxa_migrated = True
profile.fxa_uid = fxa_uid
# This is the first time an existing user is using FxA. Redirect to profile edit
# in case the user wants to update any settings.
self.request.session["oidc_login_next"] = reverse("users.edit_my_profile")
messages.info(self.request, "fxa_notification_updated")
# There is a change in the email in Firefox Accounts. Let's update user's email
# unless we have a superuser
if user.email != email and not user.is_staff:
if User.objects.exclude(id=user.id).filter(email=email).exists():
msg = _(
"The email used with this Firefox Account is already "
"linked in another profile."
)
messages.error(self.request, msg)
return None
user.email = email
user_attr_changed = True
# Follow avatars from FxA profiles
profile.fxa_avatar = claims.get("avatar", "")
# User subscription information
products = Product.objects.filter(codename__in=subscriptions)
profile.products.clear()
profile.products.add(*products)
# Users can select their own display name.
if not profile.name:
profile.name = claims.get("displayName", "")
# If there is a refresh token, store it
if self.refresh_token:
profile.fxa_refresh_token = self.refresh_token
with transaction.atomic():
if user_attr_changed:
user.save()
profile.save()
# If we have an updated email, let's update Zendesk too
# the check is repeated for now but it will save a few
# API calls if we trigger the task only when we know that we have new emails
if user_attr_changed:
update_zendesk_identity.delay(user.id, email)
return user
def authenticate(self, request, **kwargs):
"""Authenticate a user based on the OIDC/oauth2 code flow."""
# If the request has the /oidc/callback/ path then probably there is a login
# attempt in the admin interface. In this case just return None and let
# the OIDC backend handle this request.
if request and request.path == django_reverse("oidc_authentication_callback"):
return None
return super(FXAAuthBackend, self).authenticate(request, **kwargs)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Module implementing the main loop and the telegram API."""
import enum
import datetime
from functools import partial
import logging
import logging.handlers
from queue import Queue
import random
import os
from signal import signal, SIGINT, SIGTERM, SIGABRT
import sys
import time
from telegram.ext import CommandHandler, Dispatcher, Filters, MessageHandler
from telegram.ext import Updater
import telegram
from src.messages import Messages
from src.model_interface import User, DbConnection
from src.model_interface import UserBadUseError, UserDoesNotExistError
from src import model_interface
from src.utils import Rut
from src import utils
from src import web
from src.web import ParsingException, Web, WebRetriever
logger = logging.getLogger('bot_main_logger') # pylint: disable=invalid-name
logger.setLevel(logging.DEBUG)
# Rotating file handler, rotates every 4 mondays.
try:
_LOG_HANDLER = logging.handlers.TimedRotatingFileHandler(
'log/bot.log', when='W0', interval=4,
utc=True) # type: logging.Handler
_LOG_HANDLER.setLevel(logging.DEBUG)
except FileNotFoundError:
print('log dir not found for file logging')
_LOG_HANDLER = logging.StreamHandler()
_LOG_HANDLER.setLevel(logging.DEBUG)
_LOG_FORMAT = (
"%(asctime)s - %(name)s - [%(filename)s:%(lineno)d] - %(levelname)s "
"- %(message)s")
_LOG_HANDLER.setFormatter(logging.Formatter(_LOG_FORMAT))
logger.addHandler(_LOG_HANDLER)
logger.info('Logging started')
# Gets the bot token from the environment.
TOKEN = os.getenv("BOT_TOKEN", None)
# Minimum hours before automatically update a cached result from a user
HOURS_TO_UPDATE = 33
SUBSCRIBED = Queue() # type: Queue
class ValeVistaBot():
"""Class with all the telegram handlers for the bot."""
# Testing purposes.
username = "valevistabot"
# Arguments are dependency injection for test purposes.
def __init__(self, db_connection: DbConnection,
web_retriever: WebRetriever = None,
cache: model_interface.Cache = None) -> None:
if web_retriever is None:
self._web_retriever = web.WebPageDownloader() # type: WebRetriever
else:
self._web_retriever = web_retriever
self._cache = cache or model_interface.Cache(db_connection)
self._running = True
self._db_connection = db_connection
# Command handlers.
@staticmethod
def start(unused_bot, update: telegram.Update):
"""Prints the start message."""
logger.debug('USR[%s]; START', update.message.from_user.id)
name = (update.message.from_user.first_name or
update.message.from_user.username)
update.message.reply_text(Messages.START_MSG % name)
# Sends a help message to the user.
@staticmethod
def help(unused_bot, update: telegram.Update):
"""Prints help message."""
logger.debug('USR[%s]; HELP', update.message.from_user.id)
update.message.reply_text(Messages.HELP_MSG)
# Query the service using the stored rut.
def get_rut(self, unused_bot, update: telegram.Update):
"""Query info for a previously set rut."""
telegram_id = update.message.from_user.id
rut = User(self._db_connection).get_rut(telegram_id)
if rut:
logger.debug('USR[%s]; GET_RUT[%s]', telegram_id, rut)
self.query_the_bank_and_reply(telegram_id, rut,
update.message.reply_text,
self.ReplyWhen.ALWAYS)
return
logger.debug('USR[%s]; GET_NO_RUT', telegram_id)
update.message.reply_text(Messages.NO_RUT_MSG)
def set_rut(self, unused_bot, update: telegram.Update):
"""Set a rut to easily query it in the future."""
spl = update.message.text.split(' ')
if len(spl) < 2:
logger.debug('USR[%s]; EMPTY_RUT', update.message.from_user.id)
update.message.reply_text(Messages.SET_EMPTY_RUT)
return
rut = Rut.build_rut(spl[1])
if rut is None:
logger.debug('USR[%s]; INVALID_RUT', update.message.from_user.id)
update.message.reply_text(Messages.SET_INVALID_RUT)
return
User(self._db_connection).set_rut(update.message.from_user.id, rut)
logger.debug("USR[%s]; SET_RUT[%s]", update.message.from_user.id, rut)
update.message.reply_text(Messages.SET_RUT % rut)
def subscribe(self, unused_bot, update: telegram.Update):
"""Subscribe and get updates on valevista changes for your rut."""
logger.debug("USR:[%s]; SUBSC", update.message.from_user.id)
chat_type = update.message.chat.type
if chat_type != 'private':
logger.debug('USR[%s]; FROM NON PRIVATE CHAT[%s]',
update.message.from_user.id, chat_type)
update.message.reply_text(Messages.FROM_NON_PRIVATE_CHAT)
return
try:
User(self._db_connection).subscribe(
update.message.from_user.id, update.message.chat.id)
except UserBadUseError as bad_user_exep:
logger.warning(bad_user_exep.public_message)
update.message.reply_text(bad_user_exep.public_message)
else:
update.message.reply_text(Messages.SUBSCRIBED)
def unsubscribe(self, unused_bot, update: telegram.Update):
"""Stop getting updates."""
logger.debug("USR:[%s]; UNSUBSC", update.message.from_user.id)
try:
User(self._db_connection).unsubscribe(update.message.from_user.id,
update.message.chat.id)
except UserBadUseError as bad_user_exep:
logger.warning(bad_user_exep.public_message)
update.message.reply_text(bad_user_exep.public_message)
except UserDoesNotExistError as user_exep:
logger.warning(user_exep.public_message)
update.message.reply_text(Messages.UNSUBSCRIBE_NON_SUBSCRIBED)
else:
logger.info("User %s unsubscribed", update.message.from_user.id)
update.message.reply_text(Messages.UNSUBSCRIBED)
@staticmethod
def debug(bot, update: telegram.Update):
"""Telegram framework debug handler."""
logger.info("Debug: %s, %s", bot, update)
@staticmethod
def error(unused_bot, update: telegram.Update, error):
"""Telegram framework error handler."""
logger.warning("Update %s caused error: %s", update, error)
# Non command messages
def msg(self, bot, update: telegram.Update):
"""Handler when a message arrives."""
# Log every msg received.
logger.debug("USR:[%s]; MSG:[%s]", update.message.from_user.id,
update.message.text)
rut = Rut.build_rut(update.message.text)
if rut:
self.query_the_bank_and_reply(update.message.from_user.id, rut,
update.message.reply_text,
self.ReplyWhen.ALWAYS)
elif Rut.looks_like_rut(update.message.text):
update.message.reply_text(Messages.LOOKS_LIKE_RUT)
else:
self.echo(bot, update)
# Non telegram handlers.
@staticmethod
def echo(unused_bot, update):
"""Replies with the message received."""
update.message.reply_text(update.message.text)
@staticmethod
def send_message_retry(send_fx, retries):
"""Calls send_fx and it retries if it fails due network issues. """
while True:
try:
send_fx()
except telegram.error.NetworkError:
if retries == 0:
logger.exception("Network error, retrying...")
break
else:
retries = retries - 1
logger.warning("Network error, retrying.")
break
class ReplyWhen(enum.Enum):
"""When to send a message to the user."""
ALWAYS = 1 # Send a message even if not useful data is found.
IS_USEFUL_FOR_USER = 2 # Only send a message if there is useful data.
def query_the_bank_and_reply(self, telegram_id: int, rut: Rut, reply_fn,
reply_when: ReplyWhen):
"""Query the bank for updates, and send a message to the user.
If reply_when is set to always, send a message to the user even if
there are no changes from the last time we queried. Otherwise will
send a message to the user only if new and useful information was
retrieved.
"""
def reply(msg):
"""Wrapper for retrying on network error."""
return self.send_message_retry(lambda: reply_fn(msg), 3)
try:
web_result = Web(self._db_connection, rut, telegram_id,
self._cache, self._web_retriever)
response = web_result.get_results()
# Expected exception.
except ParsingException as parsing_exep:
if reply_when == self.ReplyWhen.ALWAYS:
reply(parsing_exep.public_message)
return
except Exception: # pylint: disable=broad-except
logger.exception("Error:")
if reply_when == self.ReplyWhen.ALWAYS:
reply(Messages.INTERNAL_ERROR)
return
if reply_when == self.ReplyWhen.ALWAYS:
reply(response)
elif reply_when == self.ReplyWhen.IS_USEFUL_FOR_USER:
if web_result.is_useful_info_for_user():
logger.debug('USR[%s]; Useful[%s]', telegram_id, response)
reply(response)
else:
logger.error('Not handled enum: %s', reply_when)
# Bot helping functions.
def add_handlers(self, dispatcher: Dispatcher) -> None:
"""Adds all ValeVistaBot handlers to 'dispatcher'."""
dispatcher.add_handler(CommandHandler("start", self.start))
dispatcher.add_handler(CommandHandler("set", self.set_rut))
dispatcher.add_handler(CommandHandler("get", self.get_rut))
dispatcher.add_handler(CommandHandler("debug", self.debug))
dispatcher.add_handler(CommandHandler("help", self.help))
dispatcher.add_handler(CommandHandler("subscribe", self.subscribe))
dispatcher.add_handler(CommandHandler("unsubscribe", self.unsubscribe))
dispatcher.add_handler(MessageHandler(Filters.text, self.msg))
dispatcher.add_error_handler(self.error)
def signal_handler(self, unused_signum, unused_frame):
"""Gracefully stops the bot on a received signal."""
if self._running:
self._running = False
else:
logger.error("Exiting now!")
sys.exit(1)
def step(self, updater, hours=HOURS_TO_UPDATE):
"""Checks the bank for subscribed users.
If useful new data is available, send a message to the user.
"""
user_conn = User(self._db_connection)
users_to_update = user_conn.get_subscribers_to_update(hours)
if not users_to_update:
return
user_to_update = users_to_update[random.randint(
0, len(users_to_update) - 1)]
logger.debug("To update queue length: %s. Updating: user_id=%s",
len(users_to_update), user_to_update.id)
rut = Rut.build_rut_sin_digito(user_to_update.rut)
user_chat_id = user_conn.get_chat_id(user_to_update.id)
try:
self.query_the_bank_and_reply(
user_to_update.telegram_id, rut,
partial(updater.bot.sendMessage, user_chat_id),
ValeVistaBot.ReplyWhen.IS_USEFUL_FOR_USER)
except telegram.error.Unauthorized:
logger.debug(
'USR[%s]; CHAT_ID[%s] Unauthorized us, unsubscribing...',
user_to_update.telegram_id, user_chat_id)
user_conn.unsubscribe(user_to_update.telegram_id, user_chat_id)
def loop(self, updater):
"""Background loop to check for updates."""
while self._running:
try:
if utils.is_a_proper_time(datetime.datetime.utcnow()):
self.step(updater)
except Exception: # pylint: disable=broad-except
logger.exception("step failed")
# Between 5 and 25 minutes
time.sleep(random.randint(5 * 60, 25 * 60))
updater.stop()
def main():
"""Entry point."""
bot = ValeVistaBot(DbConnection())
stop_signals = (SIGINT, SIGTERM, SIGABRT)
for sig in stop_signals:
signal(sig, bot.signal_handler)
updater = Updater(TOKEN)
dispatcher = updater.dispatcher
bot.add_handlers(dispatcher)
updater.start_webhook(listen="0.0.0.0", port=80,
url_path="/bot-valevista")
bot.loop(updater)
if __name__ == "__main__":
main()
|
|
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = '''
Test compress plugin
'''
# This test case is very bare-bones. It only covers a few scenarios that have caused problems.
# Skip if plugins not present.
#
Test.SkipUnless(
Condition.PluginExists('compress.so'),
Condition.PluginExists('conf_remap.so'),
Condition.HasATSFeature('TS_HAS_BROTLI')
)
server = Test.MakeOriginServer("server", options={'--load': '{}/compress_observer.py'.format(Test.TestDirectory)})
def repeat(str, count):
result = ""
while count > 0:
result += str
count -= 1
return result
# Need a fairly big body, otherwise the plugin will refuse to compress
body = repeat("lets go surfin now everybodys learnin how\n", 24)
body = body + "lets go surfin now everybodys learnin how"
# expected response from the origin server
response_header = {
"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n" +
'Etag: "359670651"\r\n' +
"Cache-Control: public, max-age=31536000\r\n" +
"Accept-Ranges: bytes\r\n" +
"Content-Type: text/javascript\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": body
}
for i in range(3):
# add request/response to the server dictionary
request_header = {
"headers": "GET /obj{} HTTP/1.1\r\nHost: just.any.thing\r\n\r\n".format(i), "timestamp": "1469733493.993", "body": ""
}
server.addResponse("sessionfile.log", request_header, response_header)
# post for the origin server
post_request_header = {
"headers": "POST /obj3 HTTP/1.1\r\nHost: just.any.thing\r\nContent-Type: application/x-www-form-urlencoded\r\nContent-Length: 11\r\n\r\n",
"timestamp": "1469733493.993",
"body": "knock knock"}
server.addResponse("sessionfile.log", post_request_header, response_header)
def curl(ts, idx, encodingList):
return (
"curl --verbose --proxy http://127.0.0.1:{}".format(ts.Variables.port) +
" --header 'X-Ats-Compress-Test: {}/{}'".format(idx, encodingList) +
" --header 'Accept-Encoding: {0}' 'http://ae-{1}/obj{1}'".format(encodingList, idx) +
" 2>> compress_long.log ; printf '\n===\n' >> compress_long.log"
)
def curl_post(ts, idx, encodingList):
return (
"curl --verbose -d 'knock knock' --proxy http://127.0.0.1:{}".format(ts.Variables.port) +
" --header 'X-Ats-Compress-Test: {}/{}'".format(idx, encodingList) +
" --header 'Accept-Encoding: {0}' 'http://ae-{1}/obj{1}'".format(encodingList, idx) +
" 2>> compress_long.log ; printf '\n===\n' >> compress_long.log"
)
waitForServer = True
waitForTs = True
ts = Test.MakeATSProcess("ts", enable_cache=False)
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'compress',
'proxy.config.http.normalize_ae': 0,
})
ts.Disk.remap_config.AddLine(
'map http://ae-0/ http://127.0.0.1:{}/'.format(server.Variables.Port) +
' @plugin=compress.so @pparam={}/compress.config'.format(Test.TestDirectory)
)
ts.Disk.remap_config.AddLine(
'map http://ae-1/ http://127.0.0.1:{}/'.format(server.Variables.Port) +
' @plugin=conf_remap.so @pparam=proxy.config.http.normalize_ae=1' +
' @plugin=compress.so @pparam={}/compress.config'.format(Test.TestDirectory)
)
ts.Disk.remap_config.AddLine(
'map http://ae-2/ http://127.0.0.1:{}/'.format(server.Variables.Port) +
' @plugin=conf_remap.so @pparam=proxy.config.http.normalize_ae=2' +
' @plugin=compress.so @pparam={}/compress2.config'.format(Test.TestDirectory)
)
ts.Disk.remap_config.AddLine(
'map http://ae-3/ http://127.0.0.1:{}/'.format(server.Variables.Port) +
' @plugin=compress.so @pparam={}/compress.config'.format(Test.TestDirectory)
)
for i in range(3):
tr = Test.AddTestRun()
if (waitForTs):
tr.Processes.Default.StartBefore(ts)
waitForTs = False
if (waitForServer):
tr.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.Port))
waitForServer = False
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Command = curl(ts, i, 'gzip, deflate, sdch, br')
tr = Test.AddTestRun()
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Command = curl(ts, i, "gzip")
tr = Test.AddTestRun()
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Command = curl(ts, i, "br")
tr = Test.AddTestRun()
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Command = curl(ts, i, "deflate")
# Test Aceept-Encoding normalization.
tr = Test.AddTestRun()
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Command = curl(ts, 0, "gzip;q=0.666")
tr = Test.AddTestRun()
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Command = curl(ts, 0, "gzip;q=0.666x")
tr = Test.AddTestRun()
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Command = curl(ts, 0, "gzip;q=#0.666")
tr = Test.AddTestRun()
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Command = curl(ts, 0, "gzip; Q = 0.666")
tr = Test.AddTestRun()
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Command = curl(ts, 0, "gzip;q=0.0")
tr = Test.AddTestRun()
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Command = curl(ts, 0, "gzip;q=-0.1")
tr = Test.AddTestRun()
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Command = curl(ts, 0, "aaa, gzip;q=0.666, bbb")
tr = Test.AddTestRun()
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Command = curl(ts, 0, " br ; q=0.666, bbb")
tr = Test.AddTestRun()
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Command = curl(ts, 0, "aaa, gzip;q=0.666 , ")
# post
tr = Test.AddTestRun()
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Command = curl_post(ts, 3, "gzip")
# compress_long.log contains all the output from the curl commands. The tr removes the carriage returns for easier
# readability. Curl seems to have a bug, where it will neglect to output an end of line before outputing an HTTP
# message header line. The sed command is a work-around for this problem. greplog.sh uses the grep command to
# select HTTP request/response line that should be consitent every time the test runs.
#
tr = Test.AddTestRun()
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Command = (
r"tr -d '\r' < compress_long.log | sed 's/\(..*\)\([<>]\)/\1\n\2/' | {0}/greplog.sh > compress_short.log"
).format(Test.TestDirectory)
f = tr.Disk.File("compress_short.log")
f.Content = "compress.gold"
tr = Test.AddTestRun()
tr.Processes.Default.Command = "echo"
f = tr.Disk.File("compress_userver.log")
f.Content = "compress_userver.gold"
|
|
# -*- coding: utf-8 -*-
from functools import wraps
from io import BytesIO
import datetime as dt
import gzip
import json
from flask import Blueprint, current_app, jsonify, request
from doorman.database import db
from doorman.extensions import log_tee
from doorman.models import (
Node, Tag,
DistributedQueryTask, DistributedQueryResult,
StatusLog,
)
from doorman.tasks import analyze_result, notify_of_node_enrollment
from doorman.utils import process_result
blueprint = Blueprint('api', __name__)
def node_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
# in v1.7.4, the Content-Encoding header is set when
# --logger_tls_compress=true
if 'Content-Encoding' in request.headers and \
request.headers['Content-Encoding'] == 'gzip':
request._cached_data = gzip.GzipFile(
fileobj=BytesIO(request.get_data())).read()
request_json = request.get_json()
if not request_json or 'node_key' not in request_json:
current_app.logger.error(
"%s - Request did not contain valid JSON data. This could "
"be an attempt to gather information about this endpoint "
"or an automated scanner.",
request.remote_addr
)
# Return nothing
return ""
node_key = request_json.get('node_key')
node = Node.query.filter_by(node_key=node_key) \
.options(db.lazyload('*')).first()
if not node:
current_app.logger.error(
"%s - Could not find node with node_key %s",
request.remote_addr, node_key
)
return jsonify(node_invalid=True)
if not node.is_active:
current_app.logger.error(
"%s - Node %s came back from the dead!",
request.remote_addr, node_key
)
return jsonify(node_invalid=True)
node.update(
last_checkin=dt.datetime.utcnow(),
last_ip=request.remote_addr,
commit=False
)
return f(node=node, *args, **kwargs)
return decorated_function
@blueprint.route('/')
def index():
return '', 204
@blueprint.route('/enroll', methods=['POST', 'PUT'])
@blueprint.route('/v1/enroll', methods=['POST', 'PUT'])
def enroll():
'''
Enroll an endpoint with osquery.
:returns: a `node_key` unique id. Additionally `node_invalid` will
be true if the node failed to enroll.
'''
request_json = request.get_json()
if not request_json:
current_app.logger.error(
"%s - Request did not contain valid JSON data. This could "
"be an attempt to gather information about this endpoint "
"or an automated scanner.",
request.remote_addr
)
# Return nothing
return ""
enroll_secret = request_json.get(
current_app.config.get('DOORMAN_ENROLL_OVERRIDE', 'enroll_secret'))
if not enroll_secret:
current_app.logger.error(
"%s - No enroll_secret provided by remote host",
request.remote_addr
)
return jsonify(node_invalid=True)
# If we pre-populate node table with a per-node enroll_secret,
# let's query it now.
if current_app.config.get('DOORMAN_ENROLL_SECRET_TAG_DELIMITER'):
delimiter = current_app.config.get('DOORMAN_ENROLL_SECRET_TAG_DELIMITER')
enroll_secret, _, enroll_tags = enroll_secret.partition(delimiter)
enroll_tags = set([tag.strip() for tag in enroll_tags.split(delimiter)[:10]])
else:
enroll_secret, enroll_tags = enroll_secret, set()
node = Node.query.filter(Node.enroll_secret == enroll_secret).first()
if not node and enroll_secret not in current_app.config['DOORMAN_ENROLL_SECRET']:
current_app.logger.error("%s - Invalid enroll_secret %s",
request.remote_addr, enroll_secret
)
return jsonify(node_invalid=True)
host_identifier = request_json.get('host_identifier')
if node and node.enrolled_on:
current_app.logger.warn(
"%s - %s already enrolled on %s, returning existing node_key",
request.remote_addr, node, node.enrolled_on
)
if node.host_identifier != host_identifier:
current_app.logger.info(
"%s - %s changed their host_identifier to %s",
request.remote_addr, node, host_identifier
)
node.host_identifier = host_identifier
node.update(
last_checkin=dt.datetime.utcnow(),
last_ip=request.remote_addr
)
return jsonify(node_key=node.node_key, node_invalid=False)
existing_node = None
if host_identifier:
existing_node = Node.query.filter(
Node.host_identifier == host_identifier
).first()
if existing_node and not existing_node.enroll_secret:
current_app.logger.warning(
"%s - Duplicate host_identifier %s, already enrolled %s",
request.remote_addr, host_identifier, existing_node.enrolled_on
)
if current_app.config['DOORMAN_EXPECTS_UNIQUE_HOST_ID'] is True:
current_app.logger.info(
"%s - Unique host identification is true, %s already enrolled "
"returning existing node key %s",
request.remote_addr, host_identifier, existing_node.node_key
)
existing_node.update(
last_checkin=dt.datetime.utcnow(),
last_ip=request.remote_addr
)
return jsonify(node_key=existing_node.node_key, node_invalid=False)
now = dt.datetime.utcnow()
if node:
node.update(host_identifier=host_identifier,
last_checkin=now,
enrolled_on=now,
last_ip=request.remote_addr)
else:
node = Node(host_identifier=host_identifier,
last_checkin=now,
enrolled_on=now,
last_ip=request.remote_addr)
enroll_tags.update(current_app.config.get('DOORMAN_ENROLL_DEFAULT_TAGS', []))
for value in sorted((t.strip() for t in enroll_tags if t)):
tag = Tag.query.filter_by(value=value).first()
if tag and tag not in node.tags:
node.tags.append(tag)
elif not tag:
node.tags.append(Tag(value=value))
node.save()
current_app.logger.info("%s - Enrolled new node %s",
request.remote_addr, node
)
notify_of_node_enrollment.delay(node.to_dict())
return jsonify(node_key=node.node_key, node_invalid=False)
@blueprint.route('/config', methods=['POST', 'PUT'])
@blueprint.route('/v1/config', methods=['POST', 'PUT'])
@node_required
def configuration(node=None):
'''
Retrieve an osquery configuration for a given node.
:returns: an osquery configuration file
'''
current_app.logger.info(
"%s - %s checking in to retrieve a new configuration",
request.remote_addr, node
)
config = node.get_config()
# write last_checkin, last_ip
db.session.add(node)
db.session.commit()
return jsonify(node_invalid=False, **config)
@blueprint.route('/log', methods=['POST', 'PUT'])
@blueprint.route('/v1/log', methods=['POST', 'PUT'])
@node_required
def logger(node=None):
'''
'''
data = request.get_json()
log_type = data['log_type']
log_level = current_app.config['DOORMAN_MINIMUM_OSQUERY_LOG_LEVEL']
if current_app.debug:
current_app.logger.debug(json.dumps(data, indent=2))
if log_type == 'status':
log_tee.handle_status(data, host_identifier=node.host_identifier)
status_logs = []
for item in data.get('data', []):
if int(item['severity']) < log_level:
continue
status_logs.append(StatusLog(node_id=node.id, **item))
else:
db.session.add(node)
db.session.bulk_save_objects(status_logs)
db.session.commit()
elif log_type == 'result':
db.session.add(node)
db.session.bulk_save_objects(process_result(data, node))
db.session.commit()
log_tee.handle_result(data, host_identifier=node.host_identifier)
analyze_result.delay(data, node.to_dict())
else:
current_app.logger.error("%s - Unknown log_type %r",
request.remote_addr, log_type
)
current_app.logger.info(json.dumps(data))
# still need to write last_checkin, last_ip
db.session.add(node)
db.session.commit()
return jsonify(node_invalid=False)
@blueprint.route('/distributed/read', methods=['POST', 'PUT'])
@blueprint.route('/v1/distributed/read', methods=['POST', 'PUT'])
@node_required
def distributed_read(node=None):
'''
'''
data = request.get_json()
current_app.logger.info(
"%s - %s checking in to retrieve distributed queries",
request.remote_addr, node
)
queries = node.get_new_queries()
# need to write last_checkin, last_ip, and update distributed
# query state
db.session.add(node)
db.session.commit()
return jsonify(queries=queries, node_invalid=False)
@blueprint.route('/distributed/write', methods=['POST', 'PUT'])
@blueprint.route('/v1/distributed/write', methods=['POST', 'PUT'])
@node_required
def distributed_write(node=None):
'''
'''
data = request.get_json()
if current_app.debug:
current_app.logger.debug(json.dumps(data, indent=2))
queries = data.get('queries', {})
statuses = data.get('statuses', {})
for guid, results in queries.items():
task = DistributedQueryTask.query.filter(
DistributedQueryTask.guid == guid,
DistributedQueryTask.status == DistributedQueryTask.PENDING,
DistributedQueryTask.node == node,
).first()
if not task:
current_app.logger.error(
"%s - Got result for distributed query not in PENDING "
"state: %s: %s",
request.remote_addr, guid, json.dumps(data)
)
continue
# non-zero status indicates sqlite errors
if not statuses.get(guid, 0):
status = DistributedQueryTask.COMPLETE
else:
current_app.logger.error(
"%s - Got non-zero status code (%d) on distributed query %s",
request.remote_addr, statuses.get(guid), guid
)
status = DistributedQueryTask.FAILED
for columns in results:
result = DistributedQueryResult(
columns,
distributed_query=task.distributed_query,
distributed_query_task=task
)
db.session.add(result)
else:
task.status = status
db.session.add(task)
else:
# need to write last_checkin, last_ip on node
db.session.add(node)
db.session.commit()
return jsonify(node_invalid=False)
|
|
# # tests
import logging
import sys
import csv
import StringIO
import operator
from datetime import datetime
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponseRedirect, HttpResponseForbidden, Http404, HttpResponse
from django.shortcuts import render, get_object_or_404
from django.views.decorators.cache import cache_page
from django.utils import timezone
from dojo.filters import TemplateFindingFilter
from dojo.forms import NoteForm, TestForm, FindingForm, \
DeleteTestForm, AddFindingForm, \
ImportScanForm, ReImportScanForm, FindingBulkUpdateForm, JIRAFindingForm
from dojo.models import Finding, Test, Notes, \
BurpRawRequestResponse, Endpoint, Stub_Finding, Finding_Template, JIRA_PKey, Cred_User, Cred_Mapping, Dojo_User
from dojo.tools.factory import import_parser_factory
from dojo.utils import get_page_items, add_breadcrumb, get_cal_event, message, \
process_notifications, get_system_setting, create_notification
from dojo.tasks import add_issue_task
logging.basicConfig(
level=logging.DEBUG,
format='[%(asctime)s] %(levelname)s [%(name)s:%(lineno)d] %(message)s',
datefmt='%d/%b/%Y %H:%M:%S',
filename=settings.DOJO_ROOT + '/../django_app.log',
)
logger = logging.getLogger(__name__)
@user_passes_test(lambda u: u.is_staff)
def view_test(request, tid):
test = Test.objects.get(id=tid)
notes = test.notes.all()
person = request.user.username
findings = Finding.objects.filter(test=test)
stub_findings = Stub_Finding.objects.filter(test=test)
cred_test = Cred_Mapping.objects.filter(test=test).select_related('cred_id').order_by('cred_id')
creds = Cred_Mapping.objects.filter(engagement=test.engagement).select_related('cred_id').order_by('cred_id')
if request.method == 'POST':
form = NoteForm(request.POST)
if form.is_valid():
new_note = form.save(commit=False)
new_note.author = request.user
new_note.date = timezone.now()
new_note.save()
test.notes.add(new_note)
form = NoteForm()
url = request.build_absolute_uri(reverse("view_test", args=(test.id,)))
title="Test: %s on %s" % (test.test_type.name, test.engagement.product.name)
process_notifications(request, new_note, url, title)
messages.add_message(request,
messages.SUCCESS,
'Note added successfully.',
extra_tags='alert-success')
else:
form = NoteForm()
fpage = get_page_items(request, findings, 25)
sfpage = get_page_items(request, stub_findings, 25)
show_re_upload = any(test.test_type.name in code for code in ImportScanForm.SCAN_TYPE_CHOICES)
add_breadcrumb(parent=test, top_level=False, request=request)
return render(request, 'dojo/view_test.html',
{'test': test,
'findings': fpage,
'stub_findings': sfpage,
'form': form,
'notes': notes,
'person': person,
'request': request,
'show_re_upload': show_re_upload,
'creds': creds,
'cred_test': cred_test
})
@user_passes_test(lambda u: u.is_staff)
def edit_test(request, tid):
test = get_object_or_404(Test, pk=tid)
form = TestForm(instance=test)
if request.method == 'POST':
form = TestForm(request.POST, instance=test)
if form.is_valid():
new_test = form.save()
tags = request.POST.getlist('tags')
t = ", ".join(tags)
new_test.tags = t
messages.add_message(request,
messages.SUCCESS,
'Test saved.',
extra_tags='alert-success')
form.initial['target_start'] = test.target_start.date()
form.initial['target_end'] = test.target_end.date()
form.initial['tags'] = [tag.name for tag in test.tags]
add_breadcrumb(parent=test, title="Edit", top_level=False, request=request)
return render(request, 'dojo/edit_test.html',
{'test': test,
'form': form,
})
@user_passes_test(lambda u: u.is_staff)
def delete_test(request, tid):
test = get_object_or_404(Test, pk=tid)
eng = test.engagement
form = DeleteTestForm(instance=test)
from django.contrib.admin.utils import NestedObjects
from django.db import DEFAULT_DB_ALIAS
collector = NestedObjects(using=DEFAULT_DB_ALIAS)
collector.collect([test])
rels = collector.nested()
if request.method == 'POST':
if 'id' in request.POST and str(test.id) == request.POST['id']:
form = DeleteTestForm(request.POST, instance=test)
if form.is_valid():
del test.tags
test.delete()
messages.add_message(request,
messages.SUCCESS,
'Test and relationships removed.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('view_engagement', args=(eng.id,)))
add_breadcrumb(parent=test, title="Delete", top_level=False, request=request)
return render(request, 'dojo/delete_test.html',
{'test': test,
'form': form,
'rels': rels,
'deletable_objects': rels,
})
@user_passes_test(lambda u: u.is_staff)
def delete_test_note(request, tid, nid):
note = Notes.objects.get(id=nid)
test = Test.objects.get(id=tid)
if note.author == request.user:
test.notes.remove(note)
note.delete()
messages.add_message(request,
messages.SUCCESS,
'Note removed.',
extra_tags='alert-success')
return view_test(request, tid)
return HttpResponseForbidden()
@user_passes_test(lambda u: u.is_staff)
@cache_page(60 * 5) # cache for 5 minutes
def test_calendar(request):
if not 'lead' in request.GET or '0' in request.GET.getlist('lead'):
tests = Test.objects.all()
else:
filters = []
leads = request.GET.getlist('lead','')
if '-1' in request.GET.getlist('lead'):
leads.remove('-1')
filters.append(Q(lead__isnull=True))
filters.append(Q(lead__in=leads))
tests = Test.objects.filter(reduce(operator.or_, filters))
add_breadcrumb(title="Test Calendar", top_level=True, request=request)
return render(request, 'dojo/calendar.html', {
'caltype': 'tests',
'leads': request.GET.getlist('lead', ''),
'tests': tests,
'users': Dojo_User.objects.all()})
@user_passes_test(lambda u: u.is_staff)
def test_ics(request, tid):
test = get_object_or_404(Test, id=tid)
start_date = datetime.combine(test.target_start, datetime.min.time())
end_date = datetime.combine(test.target_end, datetime.max.time())
uid = "dojo_test_%d_%d_%d" % (test.id, test.engagement.id, test.engagement.product.id)
cal = get_cal_event(start_date,
end_date,
"Test: %s (%s)" % (test.test_type.name, test.engagement.product.name),
"Set aside for test %s, on product %s. Additional detail can be found at %s" % (
test.test_type.name, test.engagement.product.name,
request.build_absolute_uri((reverse("view_test", args=(test.id,))))),
uid)
output = cal.serialize()
response = HttpResponse(content=output)
response['Content-Type'] = 'text/calendar'
response['Content-Disposition'] = 'attachment; filename=%s.ics' % test.test_type.name
return response
@user_passes_test(lambda u: u.is_staff)
def add_findings(request, tid):
test = Test.objects.get(id=tid)
form_error = False
enabled = False
jform = None
form = AddFindingForm(initial={'date': timezone.now().date()})
if get_system_setting('enable_jira') and JIRA_PKey.objects.filter(product=test.engagement.product).count() != 0:
enabled = JIRA_PKey.objects.get(product=test.engagement.product).push_all_issues
jform = JIRAFindingForm(enabled=enabled, prefix='jiraform')
else:
jform = None
if request.method == 'POST':
form = AddFindingForm(request.POST)
if form.is_valid():
new_finding = form.save(commit=False)
new_finding.test = test
new_finding.reporter = request.user
new_finding.numerical_severity = Finding.get_numerical_severity(
new_finding.severity)
if new_finding.false_p or new_finding.active is False:
new_finding.mitigated = timezone.now()
new_finding.mitigated_by = request.user
create_template = new_finding.is_template
# always false now since this will be deprecated soon in favor of new Finding_Template model
new_finding.is_template = False
new_finding.save()
new_finding.endpoints = form.cleaned_data['endpoints']
new_finding.save()
if 'jiraform-push_to_jira' in request.POST:
jform = JIRAFindingForm(request.POST, prefix='jiraform', enabled=enabled)
if jform.is_valid():
add_issue_task.delay(new_finding, jform.cleaned_data.get('push_to_jira'))
messages.add_message(request,
messages.SUCCESS,
'Finding added successfully.',
extra_tags='alert-success')
if create_template:
templates = Finding_Template.objects.filter(title=new_finding.title)
if len(templates) > 0:
messages.add_message(request,
messages.ERROR,
'A finding template was not created. A template with this title already '
'exists.',
extra_tags='alert-danger')
else:
template = Finding_Template(title=new_finding.title,
cwe=new_finding.cwe,
severity=new_finding.severity,
description=new_finding.description,
mitigation=new_finding.mitigation,
impact=new_finding.impact,
references=new_finding.references,
numerical_severity=new_finding.numerical_severity)
template.save()
messages.add_message(request,
messages.SUCCESS,
'A finding template was also created.',
extra_tags='alert-success')
if '_Finished' in request.POST:
return HttpResponseRedirect(reverse('view_test', args=(test.id,)))
else:
return HttpResponseRedirect(reverse('add_findings', args=(test.id,)))
else:
if 'endpoints' in form.cleaned_data:
form.fields['endpoints'].queryset = form.cleaned_data['endpoints']
else:
form.fields['endpoints'].queryset = Endpoint.objects.none()
form_error = True
messages.add_message(request,
messages.ERROR,
'The form has errors, please correct them below.',
extra_tags='alert-danger')
add_breadcrumb(parent=test, title="Add Finding", top_level=False, request=request)
return render(request, 'dojo/add_findings.html',
{'form': form,
'test': test,
'temp': False,
'tid': tid,
'form_error': form_error,
'jform': jform,
})
@user_passes_test(lambda u: u.is_staff)
def add_temp_finding(request, tid, fid):
jform = None
test = get_object_or_404(Test, id=tid)
finding = get_object_or_404(Finding_Template, id=fid)
findings = Finding_Template.objects.all()
if request.method == 'POST':
form = FindingForm(request.POST)
if form.is_valid():
new_finding = form.save(commit=False)
new_finding.test = test
new_finding.reporter = request.user
new_finding.numerical_severity = Finding.get_numerical_severity(
new_finding.severity)
new_finding.date = datetime.today()
if new_finding.false_p or new_finding.active is False:
new_finding.mitigated = timezone.now()
new_finding.mitigated_by = request.user
create_template = new_finding.is_template
# is template always False now in favor of new model Finding_Template
# no further action needed here since this is already adding from template.
new_finding.is_template = False
new_finding.save()
new_finding.endpoints = form.cleaned_data['endpoints']
new_finding.save()
if 'jiraform-push_to_jira' in request.POST:
jform = JIRAFindingForm(request.POST, prefix='jiraform', enabled=True)
add_issue_task.delay(new_finding, jform.cleaned_data.get('push_to_jira'))
messages.add_message(request,
messages.SUCCESS,
'Finding from template added successfully.',
extra_tags='alert-success')
if create_template:
templates = Finding_Template.objects.filter(title=new_finding.title)
if len(templates) > 0:
messages.add_message(request,
messages.ERROR,
'A finding template was not created. A template with this title already '
'exists.',
extra_tags='alert-danger')
else:
template = Finding_Template(title=new_finding.title,
cwe=new_finding.cwe,
severity=new_finding.severity,
description=new_finding.description,
mitigation=new_finding.mitigation,
impact=new_finding.impact,
references=new_finding.references,
numerical_severity=new_finding.numerical_severity)
template.save()
messages.add_message(request,
messages.SUCCESS,
'A finding template was also created.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('view_test', args=(test.id,)))
else:
messages.add_message(request,
messages.ERROR,
'The form has errors, please correct them below.',
extra_tags='alert-danger')
else:
form = FindingForm(initial={'active': False,
'date': timezone.now().date(),
'verified': False,
'false_p': False,
'duplicate': False,
'out_of_scope': False,
'title': finding.title,
'description': finding.description,
'cwe': finding.cwe,
'severity': finding.severity,
'mitigation': finding.mitigation,
'impact': finding.impact,
'references': finding.references,
'numerical_severity': finding.numerical_severity})
if get_system_setting('enable_jira'):
enabled = JIRA_PKey.objects.get(product=test.engagement.product).push_all_issues
jform = JIRAFindingForm(enabled=enabled, prefix='jiraform')
else:
jform = None
add_breadcrumb(parent=test, title="Add Finding", top_level=False, request=request)
return render(request, 'dojo/add_findings.html',
{'form': form,
'jform': jform,
'findings': findings,
'temp': True,
'fid': finding.id,
'tid': test.id,
'test': test,
})
def search(request, tid):
test = get_object_or_404(Test, id=tid)
templates = Finding_Template.objects.all()
templates = TemplateFindingFilter(request.GET, queryset=templates).qs
paged_templates = get_page_items(request, templates, 25)
title_words = [word
for finding in templates
for word in finding.title.split() if len(word) > 2]
title_words = sorted(set(title_words))
add_breadcrumb(parent=test, title="Add From Template", top_level=False, request=request)
return render(request, 'dojo/templates.html',
{'templates': paged_templates,
'filtered': templates,
'title_words': title_words,
'tid': tid,
'add_from_template': True,
})
@user_passes_test(lambda u: u.is_staff)
def finding_bulk_update(request, tid):
test = get_object_or_404(Test, id=tid)
finding = test.finding_set.all()[0]
form = FindingBulkUpdateForm(request.POST)
if request.method == "POST":
if form.is_valid():
finding_to_update = request.POST.getlist('finding_to_update')
finds = Finding.objects.filter(test=test, id__in=finding_to_update)
finds.update(severity=form.cleaned_data['severity'],
active=form.cleaned_data['active'],
verified=form.cleaned_data['verified'],
false_p=form.cleaned_data['false_p'],
duplicate=form.cleaned_data['duplicate'],
out_of_scope=form.cleaned_data['out_of_scope'])
messages.add_message(request,
messages.SUCCESS,
'Bulk edit of findings was successful. Check to make sure it is what you intended.',
extra_tags='alert-success')
else:
messages.add_message(request,
messages.ERROR,
'Unable to process bulk update. Required fields are invalid, '
'please update individually.',
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('view_test', args=(test.id,)))
@user_passes_test(lambda u: u.is_staff)
def re_import_scan_results(request, tid):
additional_message = "When re-uploading a scan, any findings not found in original scan will be updated as " \
"mitigated. The process attempts to identify the differences, however manual verification " \
"is highly recommended."
t = get_object_or_404(Test, id=tid)
scan_type = t.test_type.name
engagement = t.engagement
form = ReImportScanForm()
form.initial['tags'] = [tag.name for tag in t.tags]
if request.method == "POST":
form = ReImportScanForm(request.POST, request.FILES)
if form.is_valid():
scan_date = form.cleaned_data['scan_date']
min_sev = form.cleaned_data['minimum_severity']
file = request.FILES['file']
scan_type = t.test_type.name
active = form.cleaned_data['active']
verified = form.cleaned_data['verified']
tags = request.POST.getlist('tags')
ts = ", ".join(tags)
t.tags = ts
try:
parser = import_parser_factory(file, t)
except ValueError:
raise Http404()
try:
items = parser.items
original_items = t.finding_set.all().values_list("id", flat=True)
new_items = []
mitigated_count = 0
finding_count = 0
finding_added_count = 0
reactivated_count = 0
for item in items:
sev = item.severity
if sev == 'Information' or sev == 'Informational':
sev = 'Info'
if Finding.SEVERITIES[sev] > Finding.SEVERITIES[min_sev]:
continue
if scan_type == 'Veracode Scan' or scan_type == 'Arachni Scan':
find = Finding.objects.filter(title=item.title,
test__id=t.id,
severity=sev,
numerical_severity=Finding.get_numerical_severity(sev),
description=item.description
)
else:
find = Finding.objects.filter(title=item.title,
test__id=t.id,
severity=sev,
numerical_severity=Finding.get_numerical_severity(sev),
)
if len(find) == 1:
find = find[0]
if find.mitigated:
# it was once fixed, but now back
find.mitigated = None
find.mitigated_by = None
find.active = True
find.verified = verified
find.save()
note = Notes(entry="Re-activated by %s re-upload." % scan_type,
author=request.user)
note.save()
find.notes.add(note)
reactivated_count += 1
new_items.append(find.id)
else:
item.test = t
item.date = t.target_start
item.reporter = request.user
item.last_reviewed = timezone.now()
item.last_reviewed_by = request.user
item.verified = verified
item.active = active
item.save()
finding_added_count += 1
new_items.append(item.id)
find = item
if hasattr(item, 'unsaved_req_resp') and len(item.unsaved_req_resp) > 0:
for req_resp in item.unsaved_req_resp:
burp_rr = BurpRawRequestResponse(finding=find,
burpRequestBase64=req_resp["req"],
burpResponseBase64=req_resp["resp"],
)
burp_rr.clean()
burp_rr.save()
if item.unsaved_request is not None and item.unsaved_response is not None:
burp_rr = BurpRawRequestResponse(finding=find,
burpRequestBase64=item.unsaved_request,
burpResponseBase64=item.unsaved_response,
)
burp_rr.clean()
burp_rr.save()
if find:
finding_count += 1
for endpoint in item.unsaved_endpoints:
ep, created = Endpoint.objects.get_or_create(protocol=endpoint.protocol,
host=endpoint.host,
path=endpoint.path,
query=endpoint.query,
fragment=endpoint.fragment,
product=t.engagement.product)
find.endpoints.add(ep)
if item.unsaved_tags is not None:
find.tags = item.unsaved_tags
# calculate the difference
to_mitigate = set(original_items) - set(new_items)
for finding_id in to_mitigate:
finding = Finding.objects.get(id=finding_id)
finding.mitigated = datetime.combine(scan_date, timezone.now().time())
finding.mitigated_by = request.user
finding.active = False
finding.save()
note = Notes(entry="Mitigated by %s re-upload." % scan_type,
author=request.user)
note.save()
finding.notes.add(note)
mitigated_count += 1
messages.add_message(request,
messages.SUCCESS,
'%s processed, a total of ' % scan_type + message(finding_count, 'finding',
'processed'),
extra_tags='alert-success')
if finding_added_count > 0:
messages.add_message(request,
messages.SUCCESS,
'A total of ' + message(finding_added_count, 'finding',
'added') + ', that are new to scan.',
extra_tags='alert-success')
if reactivated_count > 0:
messages.add_message(request,
messages.SUCCESS,
'A total of ' + message(reactivated_count, 'finding',
'reactivated') + ', that are back in scan results.',
extra_tags='alert-success')
if mitigated_count > 0:
messages.add_message(request,
messages.SUCCESS,
'A total of ' + message(mitigated_count, 'finding',
'mitigated') + '. Please manually verify each one.',
extra_tags='alert-success')
create_notification(event='results_added', title='Results added', finding_count=finding_count, test=t, engagement=engagement, url=request.build_absolute_uri(reverse('view_test', args=(t.id,))))
return HttpResponseRedirect(reverse('view_test', args=(t.id,)))
except SyntaxError:
messages.add_message(request,
messages.ERROR,
'There appears to be an error in the XML report, please check and try again.',
extra_tags='alert-danger')
add_breadcrumb(parent=t, title="Re-upload a %s" % scan_type, top_level=False, request=request)
return render(request,
'dojo/import_scan_results.html',
{'form': form,
'eid': engagement.id,
'additional_message': additional_message,
})
@user_passes_test(lambda u: u.is_staff)
def download_cvffv1_test(request, tid):
test = get_object_or_404(Test, pk=tid)
output = StringIO.StringIO()
wr = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)
if str(test.test_type) == 'Veracode Scan':
headings = ["finding_id", "issue_id", "title", "cwe", "url", "severity", "description", "mitigation", "impact",
"line_number", "sourcefile", "sourcefilepath", "Function", "WSO2_resolution", "WSO2_offset", "WSO2_comment"]
wr.writerow(headings)
offsets = [0, 10, 20, 50, 100, 150]
for finding in Finding.objects.filter(test_id=tid):
data = [finding.id, finding.issue_id, finding.title, finding.cwe, finding.url, finding.severity,
finding.description, finding.mitigation, finding.impact, finding.line_number, finding.sourcefile,
finding.sourcefilepath, finding.function]
comment_found = 0
for offset in offsets:
similarFindingsWithNotes = [];
if offset == 0:
similarFindingsWithNotes = Finding.objects.filter(title=finding.title, sourcefile=finding.sourcefile, function=finding.function, line_number=finding.line_number).exclude(notes=None).order_by('-id')
else:
similarFindingsWithNotes = Finding.objects.filter(title=finding.title, sourcefile=finding.sourcefile, function=finding.function).filter(line_number__gte=(int(finding.line_number) - offset), line_number__lte=(int(finding.line_number) + offset)).exclude(notes=None).order_by('-id')
if similarFindingsWithNotes:
note = similarFindingsWithNotes[0].notes.all()[0]
if note.entry.find("] ~ ") > -1:
if str(note.entry[1:note.entry.find("] ~ ")]).strip().replace(" ","") != "":
data.append(str(note.entry[1:note.entry.find("] ~ ")]).strip())
data.append(str(offset))
data.append(note.entry[note.entry.find("] ~ ") + 4:])
comment_found = 1
break
if comment_found == 0:
if int(finding.cwe) == 117:
data.append("Already Mitigated")
data.append("0");
data.append("CRLF prevention in HTTP headers is already handled in Tomcat level. "
"Therefore it is not required to do additional validation in applications. "
"Please refer to CARBON-15811 (https://wso2.org/jira/browse/CARBON-15811) for details.")
comment_found = 1
wr.writerow(data)
elif str(test.test_type) == 'Qualys Scan (Webapp)' or str(test.test_type) == 'Qualys Scan (Single Scan)':
headings = ["finding_id", "issue_id", "title", "severity", "impact", "url", "param", "payload", "description", "mitigation",
"WSO2_resolution", "WSO2_comment"]
wr.writerow(headings)
for finding in Finding.objects.filter(test_id=tid):
endpoint = ""
if finding.endpoints:
if finding.endpoints.all():
endpoint = finding.endpoints.all()[0];
data = [finding.id, finding.issue_id, finding.title, finding.severity, finding.impact, str(endpoint) + "\r\n(" + finding.url + ")", finding.param,
finding.payload, finding.description, finding.mitigation]
comment_found = 0
if finding.param:
similarFindingsWithNotes = Finding.objects.filter(title=finding.title,
param=finding.param,
url=finding.url).exclude(notes=None).order_by('-id')
if similarFindingsWithNotes:
note = similarFindingsWithNotes[0].notes.all()[0]
if note.entry.find("] ~ ") > -1:
data.append(str(note.entry[1:note.entry.find("] ~ ")]).strip())
data.append("N/A");
data.append(note.entry[note.entry.find("] ~ ") + 4:])
comment_found = 1
wr.writerow(data)
response = HttpResponse(output.getvalue(), content_type='plain/text')
response['Content-Disposition'] = 'attachment; filename=' + str(test.test_type).replace(' ','_') + "-" + tid + '.csv'
return response
|
|
#!/usr/bin/env python3
"""
s3-sign.srv.py
Originally by: Mark Feltner (https://github.com/FineUploader/server-examples/tree/master/python/flask-fine-uploader-s3)
Server-side S3 upload example for Fine Uploader
Features:
* Upload to S3
* Delete from S3
* Sign Policy documents (simple uploads) and REST requests (chunked/multipart) uploads
* non-CORS environment
Enhanced by: Keiran Raine
* Converted to python3
* Added HTTPS
* More configuration via environment
* Indicate clear points for server side hooks
* Standardised access to request data for server side hooks
"""
import base64, hmac, hashlib, os, sys, re
from flask import (Flask, json, jsonify, make_response, render_template,
request, abort)
AWS_CLIENT_SECRET_KEY = os.getenv('AWS_CLIENT_SECRET_KEY')
AWS_CLIENT_ACCESS_KEY = os.getenv('AWS_CLIENT_ACCESS_KEY')
AWS_ENDPOINT = os.getenv('AWS_ENDPOINT')
app = Flask(__name__)
app.config.from_object(__name__)
def sign_policy(policy):
""" Sign and return the policy document for a simple upload.
http://aws.amazon.com/articles/1434/#signyours3postform """
signed_policy = base64.b64encode(policy)
encoded_key = str(AWS_CLIENT_SECRET_KEY).encode()
hmac_v = hmac.new(encoded_key,
signed_policy,
hashlib.sha1)
signature = base64.b64encode(hmac_v.digest())
return {
'policy': signed_policy.decode("utf-8"),
'signature': signature.decode("utf-8")
}
def sign_headers(headers):
""" Sign and return the headers for a chunked upload. """
encoded_key = str(AWS_CLIENT_SECRET_KEY).encode()
hmac_v = hmac.new(encoded_key,
bytearray(headers, 'utf-8'), # hmac doesn't want unicode
hashlib.sha1)
signature = base64.b64encode(hmac_v.digest())
return {
'signature': signature.decode("utf-8")
}
def challenge_from_headers(headers):
print(">>>>" + headers)
patt = re.compile(r'(x-amz-meta-[^:]+):(.+)')
for_challenge = {}
for (key, value) in re.findall(patt, headers):
for_challenge[key] = value
# now figure out bucket key and uuid from request
url_data = headers.split('\n')[-1].strip()
(bucket, uuid, ext) = re.match(r'/([^/]+)/([^.]+)\.([^?]+)\?uploads', url_data).groups()
for_challenge['bucket'] = bucket
for_challenge['uuid'] = uuid
for_challenge['key'] = uuid + '.' + ext
return for_challenge
def challenge_from_conditions(conditions):
for_challenge = {}
for item in conditions:
for key, value in item.items():
for_challenge[key] = value
return for_challenge
def challenge_is_good(to_challenge):
"""
This is where you would run checks based on the 'x-aws-meta-' header elements
set by fine-uploaders js.
By default you get:
key - name in bucket after
uuid - uuid of key without file extension
name - original file name from client (no path)
bucket - destination bucket
Recommended you augment this with additional request.params fields in the js object.
"""
transfer_req_for = '%s/%s/%s' % (to_challenge['bucket'],
to_challenge['x-amz-meta-dataset'],
to_challenge['x-amz-meta-qqfilename'])
# this simulates signing rejection based on data being expected
# REMOVE/REPLACE BLOCK IN PRODUCTION CODE
if os.getenv('P3S3F_EXAMPLE_ALLOW_SMALL') is not None:
if (transfer_req_for == os.getenv('P3S3F_EXAMPLE_ALLOW_SMALL') or
transfer_req_for == os.getenv('P3S3F_EXAMPLE_ALLOW_LARGE')):
return True
return False
return True
def challenge_request(request):
request_payload = request.get_json()
response_data = None
challenge_data = None
if request_payload.get('headers'):
# this if is where you'd do some checking against the back end to check allowed to upload
# signifies first element of chunked data
if request_payload['headers'].startswith('POST') and 'uploadId' not in request_payload['headers']:
print("\t**** Chunked signing request ****", file=sys.stderr)
challenge_data = challenge_from_headers(request_payload['headers'])
response_data = sign_headers(request_payload['headers'])
else:
# this if is where you'd do some checking against the back end to check allowed to upload
print("\t**** Un-Chunked signing request ****", file=sys.stderr)
challenge_data = challenge_from_conditions(request_payload['conditions'])
response_data = sign_policy(request.data)
# although we've already done the signing, now do the actual challenge
if challenge_data is not None:
print('\t' + str(challenge_data), file=sys.stderr)
if challenge_is_good(challenge_data) is False:
return None
return response_data
@app.route("/s3/sign", methods=['POST'])
def s3_signature():
""" Route for signing the policy document or REST headers. """
response_data = challenge_request(request)
if response_data is None:
response_data = {'error': 'This file has not been approved for transfer, check upload is to correct dataset.'}
return jsonify(response_data)
# Probably delete this completely for systems that should't allow delete
@app.route("/s3/delete/<key>", methods=['POST', 'DELETE'])
def s3_delete(key=None):
""" Route for deleting files off S3. Uses the SDK. """
request_payload = request.values
print("\t**** THIS DATA USED TO NOTIFY BACKEND OF DELETED DATA ****", file=sys.stderr)
print("\tBucket: %s\n\tKey: %s" % (request_payload.get('bucket'), request_payload.get('key')), file=sys.stderr)
print("\t**********************************************************", file=sys.stderr)
try:
import boto3
from botocore.utils import fix_s3_host
s3 = boto3.resource("s3",
aws_access_key_id = AWS_CLIENT_ACCESS_KEY,
aws_secret_access_key = AWS_CLIENT_SECRET_KEY,
endpoint_url=AWS_ENDPOINT)
s3.meta.client.meta.events.unregister('before-sign.s3', fix_s3_host)
s3.meta.client.delete_object(Bucket=request_payload.get('bucket'),
Key=request_payload.get('key'))
return make_response('', 200)
except ImportError:
abort(500)
@app.route("/s3/success", methods=['GET', 'POST'])
def s3_success():
""" Success redirect endpoint for <=IE9. """
print("\t**** THIS DATA USED TO NOTIFY BACKEND OF COMPLETED DATA ****", file=sys.stderr)
for key, value in request.form.items():
# these don't have 'x-aws-meta-' prefix
print("\t%s : %s" % (key, value), file=sys.stderr)
print("\t************************************************************", file=sys.stderr)
return make_response()
@app.route("/")
def index():
data = None
with open('index.html', 'r') as myfile:
data = myfile.read()
return data
def main(argv=None):
print("\n#####\n!\tWARNING: This example is using app.run() please see:\n!\t\thttp://flask.pocoo.org/docs/latest/api/#flask.Flask.run\n#####\n", file=sys.stderr)
threaded = False
if os.getenv('P3S3F_THREADED') == '1' :
threaded = True
if os.getenv('P3S3F_USE_HTTPS') == '1' :
print(os.getenv('P3S3F_SRV_CRT'))
context = (os.getenv('P3S3F_SRV_CRT'),
os.getenv('P3S3F_SRV_KEY'))
app.run(host=os.getenv('P3S3F_HOST_NAME'),
port=os.getenv('P3S3F_HOST_PORT'),
ssl_context=context,
threaded=threaded) # debug=True
else:
app.run(host=os.getenv('P3S3F_HOST_NAME'),
port=os.getenv('P3S3F_HOST_PORT'),
threaded=threaded) # debug=True
return 0 # success
if __name__ == '__main__':
status = main()
sys.exit(status)
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import boto3
import botocore
from botocore.exceptions import ClientError
from dependencies_resolver.config.configuration import \
REGEX_MULTIPART_UPLOAD_PATTERN
from dependencies_resolver.utils import md5_checksum
from dependencies_resolver.utils.exception_handler import handle_exception
# Credentials are being read from shared credentials file configured for
# the AWS command line. Usually this configured as: ~/.aws/credentials
# More details at:
# http://boto3.readthedocs.io/en/latest/guide/configuration.html#shared-credentials-file
s3_client = boto3.client('s3')
def get_object_md5_checksum(bucket, key):
"""This function returns the MD5 checksum for the remote file.
If the file was uploaded as a single-part file, the MD5 checksum will be
the checksum of the file content.
However, if the file was uploaded as multi-part file,
AWS is calculating the MD5 the following way (Based on AWS documentation):
1. Calculate the MD5 md5_hash for each uploaded part of the file.
2. Concatenate the hashes into a single binary string.
3. Calculate the MD5 md5_hash of that result.
4. Concatenate the resulted MD5 md5_hash with a dash
and number of file parts.
:param bucket: The name of the bucket.
:param key: The full path to the remote file.
:return: The MD5 checksum for the remote file.
"""
try:
md5_checksum = s3_client.head_object(
Bucket=bucket,
Key=key
)['ETag'][1:-1]
except botocore.exceptions.ClientError:
md5_checksum = ''
return md5_checksum
def get_latest_version(bucket, name):
"""This function gets a bucket and a name of resource, and returns the
latest version stored for this resource.
:param bucket: The name of the bucket.
:param name: The name of the resource.
:return: The latest version for this resource stored in the bucket.
"""
keys_list = []
bucket_keys = s3_client.list_objects(Bucket=bucket, Prefix=name)
for key in bucket_keys['Contents']:
keys_list.append(key['Key'])
most_recent_key = sorted(keys_list, reverse=True)[0]
# The key is in the format of: BINARY-NAME/YYYY-mm-dd-HH-mm-ss/BINARY-FILE
# and we want only the date (which refers as the version) to be extracted
# hence the regex
extracted_version = re.search(r'/([^/]+)/', most_recent_key).group(1)
return extracted_version
def version_exists(bucket, name, version):
"""This function gets a bucket, resource name and version and checks if
the version exists for that resource. Returns True if exists,
and False otherwise.
:param bucket: The name of the bucket.
:param name: The name of the resource.
:param version: The version of the resource.
:return: Returns True if this version exists, False otherwise.
"""
if version == 'latest':
version = get_latest_version(bucket, name)
return object_exists(bucket, name + '/' + version), version
def get_key(bucket, name, version):
"""This function gets a bucket name, resource name and version and
returns the constructed key if the version exists for that resource,
or ValueError exception is being raised otherwise.
:param bucket: The name of the bucket.
:param name: The name of the resource.
:param version: The version of the resource.
:return: The S3 key for downloading the resource if the version exists,
ValueError exception is being raised otherwise.
"""
_version_exists, version = version_exists(bucket, name, version)
if not _version_exists:
raise ValueError(
'Version ({0}) not found for this binary'.format(version))
return name + '/' + version + '/' + name
def object_exists(bucket, prefix):
"""This function gets a bucket name and a prefix
and returns True if the path exists and contains any content in it,
or False otherwise.
:param bucket: The name of the bucket.
:param prefix: The prefix to search inside the bucket.
:return: True if the path exists and contains anything, and False
otherwise.
"""
response = s3_client.list_objects(Bucket=bucket, Prefix=prefix)
return True if 'Contents' in response else False
def file_already_exists(bucket, key, download_path):
"""This function checks if there's already a local copy of the remote
file to be downloaded, and if so checks whether the local copy of the
remote file is identical (same MD5 checksum) to the remote file.
The MD5 hash check flow:
We check the MD5 checksum for the remote copy of that file.
The problem in the result of that checksum is that AWS has a unique
way of generating a checksum to a file on S3 that was uploaded using
multi-part operation.
What multi-part operation you're asking? Well, when a file is being
uploaded to S3 and is above some threshold, boto3's S3 upload library is
automatically breaking the file into parts and upload each part separately
to make the upload process easier. In that way, the file is then stored
in S3 as single-part file but actually composed of all the parts together.
Now, when asking the MD5 checksum of the file, if the file uploaded as a
single-part, that checksum will be traditional MD5 hash as we expect.
But, if the file was uploaded as multi-parts file, then AWS has a
different algorithm for calculating the checksum of the file:
1. Calculate the MD5 hash for each uploaded part of the file.
2. Concatenate (hex concatenation) the hashes into a single binary string.
3. Calculate the MD5 hash of that result.
4. Concatenate the resulted MD5 hash with a dash and number of file parts.
So an example for that MD5 hash could be something like:
d41d8cd98f00b204e9800998ecf8427e-2
Which mean the file in S3 is composed of 2 files.
In order to get that exact md5 hash for the local file to check the
identity of both files, we need to break the local file into the same number
of parts and make sure we have the same size for each part, and that's
been done in the code as well. Then, we can check for the md5 hashes and
determine whether the files are identical or not.
:param bucket: The name of the bucket.
:param key: The key to the remote file.
:param download_path: The path to the local copy of the remote file.
:return: True if the remote and local files are identical, and False
otherwise.
"""
identical_files = False
if os.path.isfile(download_path):
remote_file_checksum = get_object_md5_checksum(bucket, key)
multipart_regex = re.search(REGEX_MULTIPART_UPLOAD_PATTERN,
remote_file_checksum)
local_file_checksum = md5_checksum.get_aws_like_md5_checksum(
download_path,
multipart_regex)
identical_files = remote_file_checksum == local_file_checksum
return identical_files
def download(bucket, name, version, location, s3path=None):
"""This function is a wrapper function for boto3's S3 download function
which gets a bucket name, a resource name, a version and a location to
download to.
The function first checks if the specified resource exists locally and
has the same checksum as the remote file.
If so, the resource won't be downloaded as it already
in its the latest version.
Otherwise, the resource will be downloaded if it exists remotely with
the specified version.
Any failure will cause the exception to be printed to stdout, and the
program to exit.
:param bucket: The name of the bucket.
:param name: The name of the resource.
:param version: The version of the resource.
:param location: The location in which the resource would be downloaded to.
:return: Nothing, unless an exception is being raised.
"""
try:
path = s3path if s3path else name
if not object_exists(bucket, path):
raise ValueError(
'Binary ({0}) not found in the repository'.format(name))
try:
if not os.path.exists(location):
os.makedirs(location)
except OSError:
handle_exception(
'Directory could not be created. Check permissions '
'to ({0})'.format(location))
download_path = location + name
try:
key = s3path if s3path \
else get_key(bucket, name, version)
if not file_already_exists(bucket, key, download_path):
print('Downloading: [resource: {0}], [version: {1}]'.format(
name, version))
with open(download_path, 'wb') as obj:
s3_client.download_fileobj(bucket, key, obj)
else:
print('Skipping: [resource: {0}], [status: local copy is '
'up to date!]'.format(name))
except IOError:
handle_exception(
'File could not be saved. Check permissions to ({0})'.format(
location))
except ClientError as ex:
handle_exception(ex.message)
except ValueError as ex:
handle_exception(ex.message)
|
|
"""
Created on 22 Jul 2016
@author: Bruno Beloff ([email protected])
"""
import struct
import time
from scs_host.bus.i2c import I2C
from scs_host.lock.lock import Lock
# --------------------------------------------------------------------------------------------------------------------
class ADS1115(object):
"""
Texas Instruments ADS1115 ADC
"""
ADDR_AUX = 0x48
ADDR_WRK = 0x49
MUX_A0_A1 = 0x0000 # -000 ---- ---- ----
MUX_A0_A3 = 0x1000 # -001 ---- ---- ----
MUX_A1_A3 = 0x2000 # -010 ---- ---- ----
MUX_A2_A3 = 0x3000 # -011 ---- ---- ----
MUX_A0_GND = 0x4000 # -100 ---- ---- ----
MUX_A1_GND = 0x5000 # -101 ---- ---- ----
MUX_A2_GND = 0x6000 # -110 ---- ---- ----
MUX_A3_GND = 0x7000 # -111 ---- ---- ----
GAIN_6p144 = 0x0000 # ---- 000- ---- ---- 1 bit = 0.1875000 mV 0.2
GAIN_4p096 = 0x0200 # ---- 001- ---- ---- (default) 1 bit = 0.1250000 mV 0.1
GAIN_2p048 = 0x0400 # ---- 010- ---- ---- 1 bit = 0.0625000 mV 0.05
GAIN_1p024 = 0x0600 # ---- 011- ---- ---- 1 bit = 0.0312500 mV 0.025
GAIN_0p512 = 0x0800 # ---- 100- ---- ---- 1 bit = 0.0156250 mV 0.0125
GAIN_0p256 = 0x0a00 # ---- 101- ---- ---- 1 bit = 0.0078125 mV 0.00625
RATE_8 = 0x0000 # ---- ---- 000- ----
RATE_16 = 0x0020 # ---- ---- 001- ----
RATE_32 = 0x0040 # ---- ---- 010- ----
RATE_64 = 0x0060 # ---- ---- 011- ----
RATE_128 = 0x0080 # ---- ---- 100- ---- (default)
RATE_250 = 0x00a0 # ---- ---- 101- ----
RATE_475 = 0x00c0 # ---- ---- 110- ----
RATE_860 = 0x00e0 # ---- ---- 111- ----
# ----------------------------------------------------------------------------------------------------------------
__REG_CONV = 0x00
__REG_CONFIG = 0x01
__REG_LO_THRESH = 0x02
__REG_HI_THRESH = 0x03
__OS_START = 0x8000 # 1--- ---- ---- ----
__MODE_CONT = 0x0000 # ---- ---0 ---- ----
__MODE_SINGLE = 0x0100 # ---- ---1 ---- ---- (default)
__COMP_TRAD = 0x0000 # ---- ---- ---0 ---- (default)
__COMP_WINDOW = 0x0010 # ---- ---- ---1 ----
__COMP_POL_LOW = 0x0000 # ---- ---- ---- 0--- (default)
__COMP_POL_HIGH = 0x0008 # ---- ---- ---- 1---
__COMP_LATCH_OFF = 0x0000 # ---- ---- ---- -0-- (default)
__COMP_LATCH_ON = 0x0004 # ---- ---- ---- -1--
__COMP_QUEUE_1 = 0x0000 # ---- ---- ---- --00
__COMP_QUEUE_2 = 0x0001 # ---- ---- ---- --01
__COMP_QUEUE_4 = 0x0002 # ---- ---- ---- --10
__COMP_QUEUE_0 = 0x0003 # ---- ---- ---- --11 (default)
__GAIN = None
__FULL_SCALE = None
__TCONV = None
# ----------------------------------------------------------------------------------------------------------------
__LOCK_TIMEOUT = 10.0
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def init(cls):
cls.__GAIN = (
ADS1115.GAIN_0p256, # 0
ADS1115.GAIN_0p512, # 1
ADS1115.GAIN_1p024, # 2
ADS1115.GAIN_2p048, # 3
ADS1115.GAIN_4p096, # 4
ADS1115.GAIN_6p144 # 5
)
cls.__FULL_SCALE = {
ADS1115.GAIN_0p256: 0.256,
ADS1115.GAIN_0p512: 0.512,
ADS1115.GAIN_1p024: 1.024,
ADS1115.GAIN_2p048: 2.048,
ADS1115.GAIN_4p096: 4.096,
ADS1115.GAIN_6p144: 6.144
}
cls.__TCONV = {
ADS1115.RATE_8: 0.145,
ADS1115.RATE_16: 0.082,
ADS1115.RATE_32: 0.051,
ADS1115.RATE_64: 0.036,
ADS1115.RATE_128: 0.028,
ADS1115.RATE_250: 0.024,
ADS1115.RATE_475: 0.022,
ADS1115.RATE_860: 0.021
}
@classmethod
def gain(cls, index):
return cls.__GAIN[index]
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, addr, rate):
"""
initialise ADC with given gain and rate
"""
# fields...
self.__addr = addr
self.__rate = rate
self.__gain = None
self.__config = ADS1115.__MODE_SINGLE | self.__rate | ADS1115.__COMP_QUEUE_0
# write config...
try:
self.obtain_lock()
self.__write_config(self.__config)
finally:
self.release_lock()
# ----------------------------------------------------------------------------------------------------------------
def start_conversion(self, mux, gain):
"""
start single-shot conversion
wait for conv_time before reading
"""
self.__gain = gain
start = ADS1115.__OS_START | mux | gain | self.__config
self.obtain_lock()
self.__write_config(start)
def read_conversion(self):
"""
read most recent conversion
returned value is voltage
"""
try:
config = self.__read_config()
if not (config & ADS1115.__OS_START):
raise ValueError("ADS1115:read_conversion: conversion not ready.")
v = self.__read_conv()
finally:
self.release_lock()
return v
def convert(self, mux, gain):
"""
start single-shot conversion, waits for ready, then reads
warning: creates a high level of I2C traffic
returned value is voltage
"""
try:
self.start_conversion(mux, gain)
while True:
time.sleep(ADS1115.__TCONV[ADS1115.RATE_860])
config = self.__read_config()
if config & ADS1115.__OS_START:
break
finally:
self.release_lock()
v = self.__read_conv()
return v
# ----------------------------------------------------------------------------------------------------------------
def __read_config(self):
try:
I2C.Sensors.start_tx(self.__addr)
msb, lsb = I2C.Sensors.read_cmd(ADS1115.__REG_CONFIG, 2)
finally:
I2C.Sensors.end_tx()
config = (msb << 8) | lsb
return config
def __write_config(self, config):
try:
I2C.Sensors.start_tx(self.__addr)
I2C.Sensors.write(ADS1115.__REG_CONFIG, config >> 8, config & 0xff)
finally:
I2C.Sensors.end_tx()
def __read_conv(self):
try:
I2C.Sensors.start_tx(self.__addr)
msb, lsb = I2C.Sensors.read_cmd(ADS1115.__REG_CONV, 2)
finally:
I2C.Sensors.end_tx()
# render voltage...
unsigned = (msb << 8) | lsb
# print("unsigned: 0x%04x" % unsigned)
signed = struct.unpack('h', struct.pack('H', unsigned))
v = (signed[0] / 32767.5) * ADS1115.__FULL_SCALE[self.__gain]
return v
# ----------------------------------------------------------------------------------------------------------------
def obtain_lock(self):
Lock.acquire(self.__lock_name, ADS1115.__LOCK_TIMEOUT)
def release_lock(self):
Lock.release(self.__lock_name)
@property
def __lock_name(self):
return "%s-0x%02x" % (self.__class__.__name__, self.__addr)
# ----------------------------------------------------------------------------------------------------------------
@property
def addr(self):
return self.__addr
@property
def rate(self):
return self.__rate
@property
def tconv(self):
return ADS1115.__TCONV[self.__rate]
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "ADS1115:{addr:0x%0.2x, rate:0x%0.4x, config:0x%0.4x}" % (self.addr, self.rate, self.__config)
|
|
import unittest
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from ParamSklearn.implementations.Imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
class ImputationTest(unittest.TestCase):
def _check_statistics(self, X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape(self):
"""Verify the shapes of the imputed matrix for different strategies."""
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero(self):
"""Test imputation using the mean and median strategies, when
missing_values == 0."""
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
self._check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
self._check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def test_imputation_mean_median(self):
"""Test imputation using the mean and median strategies, when
missing_values != 0."""
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0] + 1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
self._check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases(self):
"""Test median imputation with sparse boundary cases
"""
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
self._check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent(self):
"""Test imputation using the most-frequent strategy."""
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
self._check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3],
-1)
def test_imputation_pipeline_grid_search(self):
"""Test imputation within a pipeline + gridsearch."""
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle(self):
"""Test for pickling imputers."""
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy(self):
"""Test imputation with copy"""
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests object_detection.core.hyperparams_builder."""
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
# TODO: Rewrite third_party imports.
from object_detection.builders import hyperparams_builder
from object_detection.protos import hyperparams_pb2
slim = tf.contrib.slim
class HyperparamsBuilderTest(tf.test.TestCase):
# TODO: Make this a public api in slim arg_scope.py.
def _get_scope_key(self, op):
return getattr(op, '_key_op', str(op))
def test_default_arg_scope_has_conv2d_op(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
self.assertTrue(self._get_scope_key(slim.conv2d) in scope)
def test_default_arg_scope_has_separable_conv2d_op(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
self.assertTrue(self._get_scope_key(slim.separable_conv2d) in scope)
def test_default_arg_scope_has_conv2d_transpose_op(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
self.assertTrue(self._get_scope_key(slim.conv2d_transpose) in scope)
def test_explicit_fc_op_arg_scope_has_fully_connected_op(self):
conv_hyperparams_text_proto = """
op: FC
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
self.assertTrue(self._get_scope_key(slim.fully_connected) in scope)
def test_separable_conv2d_and_conv2d_and_transpose_have_same_parameters(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
kwargs_1, kwargs_2, kwargs_3 = scope.values()
self.assertDictEqual(kwargs_1, kwargs_2)
self.assertDictEqual(kwargs_1, kwargs_3)
def test_return_l1_regularized_weights(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
conv_scope_arguments = scope.values()[0]
regularizer = conv_scope_arguments['weights_regularizer']
weights = np.array([1., -1, 4., 2.])
with self.test_session() as sess:
result = sess.run(regularizer(tf.constant(weights)))
self.assertAllClose(np.abs(weights).sum() * 0.5, result)
def test_return_l2_regularizer_weights(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
weight: 0.42
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
conv_scope_arguments = scope.values()[0]
regularizer = conv_scope_arguments['weights_regularizer']
weights = np.array([1., -1, 4., 2.])
with self.test_session() as sess:
result = sess.run(regularizer(tf.constant(weights)))
self.assertAllClose(np.power(weights, 2).sum() / 2.0 * 0.42, result)
def test_return_non_default_batch_norm_params_with_train_during_train(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
conv_scope_arguments = scope.values()[0]
self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm)
batch_norm_params = conv_scope_arguments['normalizer_params']
self.assertAlmostEqual(batch_norm_params['decay'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
self.assertTrue(batch_norm_params['is_training'])
def test_return_batch_norm_params_with_notrain_during_eval(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=False)
conv_scope_arguments = scope.values()[0]
self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm)
batch_norm_params = conv_scope_arguments['normalizer_params']
self.assertAlmostEqual(batch_norm_params['decay'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
self.assertFalse(batch_norm_params['is_training'])
def test_return_batch_norm_params_with_notrain_when_train_is_false(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: false
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
conv_scope_arguments = scope.values()[0]
self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm)
batch_norm_params = conv_scope_arguments['normalizer_params']
self.assertAlmostEqual(batch_norm_params['decay'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
self.assertFalse(batch_norm_params['is_training'])
def test_do_not_use_batch_norm_if_default(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
conv_scope_arguments = scope.values()[0]
self.assertEqual(conv_scope_arguments['normalizer_fn'], None)
self.assertEqual(conv_scope_arguments['normalizer_params'], None)
def test_use_none_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: NONE
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
conv_scope_arguments = scope.values()[0]
self.assertEqual(conv_scope_arguments['activation_fn'], None)
def test_use_relu_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
conv_scope_arguments = scope.values()[0]
self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.relu)
def test_use_relu_6_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
conv_scope_arguments = scope.values()[0]
self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.relu6)
def _assert_variance_in_range(self, initializer, shape, variance,
tol=1e-2):
with tf.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
var = tf.get_variable(
name='test',
shape=shape,
dtype=tf.float32,
initializer=initializer)
sess.run(tf.global_variables_initializer())
values = sess.run(var)
self.assertAllClose(np.var(values), variance, tol, tol)
def test_variance_in_range_with_variance_scaling_initializer_fan_in(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
conv_scope_arguments = scope.values()[0]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_variance_scaling_initializer_fan_out(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_OUT
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
conv_scope_arguments = scope.values()[0]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 40.)
def test_variance_in_range_with_variance_scaling_initializer_fan_avg(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_AVG
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
conv_scope_arguments = scope.values()[0]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=4. / (100. + 40.))
def test_variance_in_range_with_variance_scaling_initializer_uniform(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: true
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
conv_scope_arguments = scope.values()[0]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_truncated_normal_initializer(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
conv_scope_arguments = scope.values()[0]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.49, tol=1e-1)
if __name__ == '__main__':
tf.test.main()
|
|
import copy
import gettext
import re
import django
from django.conf import settings
from django.utils.functional import lazy
from django.utils.importlib import import_module
from django.utils.translation import (trans_real as django_trans,
ugettext as django_ugettext,
ungettext as django_nugettext)
from babel.messages.extract import extract_python
from jinja2 import ext
INSTALL_JINJA_TRANSLATIONS = getattr(settings,
'TOWER_INSTALL_JINJA_TRANSLATIONS',
True)
def ugettext(message, context=None):
"""Always return a stripped string, localized if possible"""
stripped = strip_whitespace(message)
message = add_context(context, stripped) if context else stripped
ret = django_ugettext(message)
# If the context isn't found, we need to return the string without it
return stripped if ret == message else ret
def ungettext(singular, plural, number, context=None):
"""Always return a stripped string, localized if possible"""
singular_stripped = strip_whitespace(singular)
plural_stripped = strip_whitespace(plural)
if context:
singular = add_context(context, singular_stripped)
plural = add_context(context, plural_stripped)
else:
singular = singular_stripped
plural = plural_stripped
ret = django_nugettext(singular, plural, number)
# If the context isn't found, the string is returned as it came
if ret == singular:
return singular_stripped
elif ret == plural:
return plural_stripped
return ret
ugettext_lazy = lazy(ugettext, unicode)
ungettext_lazy = lazy(ungettext, unicode)
def add_context(context, message):
# \x04 is a magic gettext number.
return u"%s\x04%s" % (context, message)
def split_context(message):
# \x04 is a magic gettext number.
ret = message.split(u"\x04")
if len(ret) == 1:
ret.insert(0, "")
return ret
def strip_whitespace(message):
return re.compile(r'\s+', re.UNICODE).sub(' ', message).strip()
def install_jinja_translations():
"""
Install our gettext and ngettext functions into Jinja2's environment.
"""
class Translation(object):
"""
We pass this object to jinja so it can find our gettext implementation.
If we pass the GNUTranslation object directly, it won't have our
context and whitespace stripping action.
"""
ugettext = staticmethod(ugettext)
ungettext = staticmethod(ungettext)
import jingo
jingo.env.install_gettext_translations(Translation)
def activate(locale):
"""
Override django's utils.translation.activate(). Django forces files
to be named django.mo (http://code.djangoproject.com/ticket/6376). Since
that's dumb and we want to be able to load different files depending on
what part of the site the user is in, we'll make our own function here.
"""
if INSTALL_JINJA_TRANSLATIONS:
install_jinja_translations()
if django.VERSION >= (1, 3):
django_trans._active.value = _activate(locale)
else:
from django.utils.thread_support import currentThread
django_trans._active[currentThread()] = _activate(locale)
def _activate(locale):
# XXX TODO: When it comes time to load .mo files on the fly and merge
# them, this is the place to do it. We'll also need to implement our own
# caching since the _translations stuff is built on a per locale basis,
# not per locale + some key
locale = django_trans.to_locale(locale)
# Django caches the translation objects here
t = django_trans._translations.get(locale, None)
if t is not None:
return t
# Django's activate() simply calls translation() and adds it to a global.
# We'll do the same here, first calling django's translation() so it can
# do everything it needs to do, and then calling gettext directly to
# load the rest. We make a deepcopy because Django will return the en-US
# catalog if it doesn't have a locale (but we do). We don't want to merge
# our foreign catalog into en-US. Since Django stuck the en-US catalog
# into its cache for this locale, we have to update that too.
t = copy.deepcopy(django_trans.translation(locale))
t.set_language(locale)
try:
# When trying to load css, js, and images through the Django server
# gettext() throws an exception saying it can't find the .mo files. I
# suspect this has something to do with Django trying not to load
# extra stuff for requests that won't need it. I do know that I don't
# want to try to debug it. This is what Django does in their function
# also.
#
# We check for SETTINGS_MODULE here because if it's not here, then
# it's possible we're in a test using override_settings and we don't
# want to flip out.
settings_module = getattr(settings, 'SETTINGS_MODULE', None)
if settings_module:
# If you've got extra .mo files to load, this is the place.
path = import_module(settings_module).path
domain = getattr(settings, 'TEXT_DOMAIN', 'messages')
bonus = gettext.translation(domain, path('locale'), [locale],
django_trans.DjangoTranslation)
t.merge(bonus)
# Overwrite t (defaults to en-US) with our real locale's plural form
t.plural = bonus.plural
except IOError:
pass
django_trans._translations[locale] = t
return t
def deactivate_all():
""" Override django's utils.translation.deactivate_all(). Django continues
to cache a catalog even if you call their deactivate_all().
"""
django_trans.deactivate_all()
django_trans._translations = {}
def tweak_message(message):
"""We piggyback on jinja2's babel_extract() (really, Babel's extract_*
functions) but they don't support some things we need so this function will
tweak the message. Specifically:
1) We strip whitespace from the msgid. Jinja2 will only strip
whitespace from the ends of a string so linebreaks show up in
your .po files still.
2) Babel doesn't support context (msgctxt). We hack that in ourselves
here.
"""
if isinstance(message, basestring):
message = strip_whitespace(message)
elif isinstance(message, tuple):
# A tuple of 2 has context, 3 is plural, 4 is plural with context
if len(message) == 2:
message = add_context(message[1], message[0])
elif len(message) == 3:
if all(isinstance(x, basestring) for x in message[:2]):
singular, plural, num = message
message = (strip_whitespace(singular),
strip_whitespace(plural),
num)
elif len(message) == 4:
singular, plural, num, ctxt = message
message = (add_context(ctxt, strip_whitespace(singular)),
add_context(ctxt, strip_whitespace(plural)),
num)
return message
def extract_tower_python(fileobj, keywords, comment_tags, options):
for lineno, funcname, message, comments in \
list(extract_python(fileobj, keywords, comment_tags, options)):
message = tweak_message(message)
yield lineno, funcname, message, comments
def extract_tower_template(fileobj, keywords, comment_tags, options):
for lineno, funcname, message, comments in \
list(ext.babel_extract(fileobj, keywords, comment_tags, options)):
message = tweak_message(message)
yield lineno, funcname, message, comments
|
|
import pytest
import sys
import time
from tlz import first
import threading
from distributed.compatibility import WINDOWS
from distributed import metrics
from distributed.profile import (
process,
merge,
create,
call_stack,
identifier,
watch,
llprocess,
ll_get_stack,
plot_data,
)
def test_basic():
def test_g():
time.sleep(0.01)
def test_h():
time.sleep(0.02)
def test_f():
for i in range(100):
test_g()
test_h()
thread = threading.Thread(target=test_f)
thread.daemon = True
thread.start()
state = create()
for i in range(100):
time.sleep(0.02)
frame = sys._current_frames()[thread.ident]
process(frame, None, state)
assert state["count"] == 100
d = state
while len(d["children"]) == 1:
d = first(d["children"].values())
assert d["count"] == 100
assert "test_f" in str(d["description"])
g = [c for c in d["children"].values() if "test_g" in str(c["description"])][0]
h = [c for c in d["children"].values() if "test_h" in str(c["description"])][0]
assert g["count"] < h["count"]
assert 95 < g["count"] + h["count"] <= 100
pd = plot_data(state)
assert len(set(map(len, pd.values()))) == 1 # all same length
assert len(set(pd["color"])) > 1 # different colors
@pytest.mark.skipif(
WINDOWS, reason="no low-level profiler support for Windows available"
)
def test_basic_low_level():
pytest.importorskip("stacktrace")
state = create()
for i in range(100):
time.sleep(0.02)
frame = sys._current_frames()[threading.get_ident()]
llframes = {threading.get_ident(): ll_get_stack(threading.get_ident())}
for f in llframes.values():
if f is not None:
llprocess(f, None, state)
assert state["count"] == 100
children = state.get("children")
assert children
expected = "<low-level>"
for k, v in zip(children.keys(), children.values()):
desc = v.get("description")
assert desc
filename = desc.get("filename")
assert expected in k and filename == expected
def test_merge():
a1 = {
"count": 5,
"identifier": "root",
"description": "a",
"children": {
"b": {
"count": 3,
"description": "b-func",
"identifier": "b",
"children": {},
},
"c": {
"count": 2,
"description": "c-func",
"identifier": "c",
"children": {},
},
},
}
a2 = {
"count": 4,
"description": "a",
"identifier": "root",
"children": {
"d": {
"count": 2,
"description": "d-func",
"children": {},
"identifier": "d",
},
"c": {
"count": 2,
"description": "c-func",
"children": {},
"identifier": "c",
},
},
}
expected = {
"count": 9,
"identifier": "root",
"description": "a",
"children": {
"b": {
"count": 3,
"description": "b-func",
"identifier": "b",
"children": {},
},
"d": {
"count": 2,
"description": "d-func",
"identifier": "d",
"children": {},
},
"c": {
"count": 4,
"description": "c-func",
"identifier": "c",
"children": {},
},
},
}
assert merge(a1, a2) == expected
def test_merge_empty():
assert merge() == create()
assert merge(create()) == create()
assert merge(create(), create()) == create()
def test_call_stack():
frame = sys._current_frames()[threading.get_ident()]
L = call_stack(frame)
assert isinstance(L, list)
assert all(isinstance(s, str) for s in L)
assert "test_call_stack" in str(L[-1])
def test_identifier():
frame = sys._current_frames()[threading.get_ident()]
assert identifier(frame) == identifier(frame)
assert identifier(None) == identifier(None)
def test_watch():
start = metrics.time()
def stop():
return metrics.time() > start + 0.500
start_threads = threading.active_count()
log = watch(interval="10ms", cycle="50ms", stop=stop)
start = metrics.time() # wait until thread starts up
while threading.active_count() <= start_threads:
assert metrics.time() < start + 2
time.sleep(0.01)
time.sleep(0.5)
assert 1 < len(log) < 10
start = metrics.time()
while threading.active_count() > start_threads:
assert metrics.time() < start + 2
time.sleep(0.01)
|
|
import datetime
import time
import query
from connect import *
class MTModel(object):
def __init__(self, id=None, *args, **kwargs):
self.className = self.__class__.__name__.lower()
def save(self):
"""
Commit the model's data to the database. The query will be constructed
as an insert or update, depending on whether or not an ID exists on the
current model.
An MTConnection is opened and closed as needed to prevent too many
connections from being established at any given time (i.e. we don't
open the connection in self.__init__ because a connection would exist
for each model that gets instantiated, causing an error in some cases.
"""
self.conn = MTConnection()
query = self.build_save_query()
rows, results = self.conn.execute(query)
if not self.id:
setattr(self, "%s_id" % self.className,
self.conn.last_inserted_id())
"""
Run through the items in this model's dict, checking to see if any
are subclasses of MTModel. If any are found, as would be the case
with entries that have associated categories, placements, and authors,
call the save() method for each to make sure any changes are
committed. It'd be nice to only save 'dirty' objects here.
"""
for key, obj in self.__dict__.items():
if issubclass(type(obj), MTModel):
obj.save()
self.conn.close()
def build_save_query(self):
"""
This piece of nasty constructs a query to either INSERT or UPDATE
a model depending on whether or not the current instance has an id.
The big hole here is that we trust the user not to arbitrarily set
an id on the model. Since I more or less trust myself, this has been
ok so far but it's not ideal - maybe raise an exception when the user
attempts to set the id?
Only items in self.__dict__ that have keys preceeded by
the name of the model (e.g. 'entry_') are included. Depending on the
type of the item, it is either included inside or outside quotes so
we can pop it directly into a query string.
Obviously, no escaping is done here as would be necessary to prevent
SQL injection, because it wasn't needed at the time this was created.
"""
values = []
columns = [x for x in self.__dict__.keys()\
if x.find('%s_' % self.className) >= 0]
query = ''
for column in columns:
value = self.__dict__[column]
if isinstance(value, int):
values.append("%d" % value)
elif not value:
values.append("NULL")
else:
if isinstance(value, tuple):
values.append("\"%s\"" % (', '.join(value)))
else:
value = "%s" % value
"""
We have to encode the value here as latin-1 (iso-8859-1)
because our MT installation uses it.
"""
value = value.encode('latin-1', 'replace')
value = "\"%s\"" % value.replace('"', '\\"')
values.append(value)
if self.id:
# Object already exists, so we'll update rather than insert
pairs = []
for i in range(len(columns)):
pairs.append('%s = %s' % (columns[i], values[i]))
pairs = ', '.join(pairs)
query =\
"UPDATE mt_%s SET %s WHERE %s_id = %s" % (self.className,
pairs,
self.className,
self.id)
else:
columns = ', '.join(columns)
values = ', '.join(values)
query = "INSERT INTO mt_%s (%s) VALUES (%s)" % (self.className,
columns,
values)
return query
def check_keys(self, expected_keys, got_keys):
"""
MTModel subclasses must provide a list of expected
attributes when instantiating a new instance of the model. This
is done as a naive kind of validation to make sure any data you
know you need to have is included when creating new objects.
"""
expected_keys = set(expected_keys)
got_keys = set(got_keys)
if bool(expected_keys - got_keys):
raise Exception("Class %s did not get expected keys: %s" %
(self.className.capitalize(),
', '.join(expected_keys - got_keys)))
def reformat_keys(self, kwargs):
"""
To save typing, keys exclude the name of the model. In a Movable Type
database, fields are preceded by the name of the object,
e.g. entry_title for an mt_entry. To simplify usage, the MTModel class
handles conversion of the field name from and to the correct usage,
so new instances can be created using the shortened form and
attributes on objects can be accessed like:
>>> e = Entry.get(55216)
>>> e.title # rather than requiring e.entry_title
'This is the title'
"""
for key in kwargs.keys():
# Reformat keys to match appropriate table
setattr(self, '%s_%s' % (self.className, key), kwargs[key])
def __getattr__(self, key):
table_key = "%s_%s" % (self.className, key)
return self.__dict__.get(table_key, self.__dict__.get(key))
def __setattr__(self, name, value):
table_key = "%s_%s" % (self.__class__.__name__.lower(), name)
if table_key in self.__dict__:
name = table_key
self.__dict__[name] = value
def __unicode__(self):
"""
Needs to work for entries, authors, and categories/folders.
"""
title_str = 'title'
if not hasattr(self, title_str):
title_str = 'label'
if not hasattr(self, title_str):
title_str = 'name'
return u"%s" % getattr(self, title_str)
def get_week_number(self, created_date):
"""
Used by Movable Type for Entries and Pages.
"""
model_date =\
datetime.datetime(*time.strptime("%s" % created_date,
"%Y-%m-%d %H:%M:%S")[0:5])
year, week, weekday = model_date.isocalendar()
return int("%d%02d" % (year, week))
@classmethod
def get(self, obj_id=None, *args, **kwargs):
"""
Defines a class method to query an MTModel, for example:
e = Entry.get(55216)
c = Category.get(3)
"""
obj_id = int(obj_id)
mtquery = query.MTQuery()
dispatch = getattr(mtquery,
"get_%s" % self.__name__.lower())
return dispatch(obj_id)
class Entry(MTModel):
def __init__(self, *args, **kwargs):
super(Entry, self).__init__()
expected = ['blog_id', 'status', 'author_id',
'title', 'excerpt', 'text',
'created_on', 'basename']
kwargs['week_number'] = self.get_week_number(kwargs['created_on'])
if not 'authored_on' in expected:
kwargs['authored_on'] = kwargs['created_on']
kwargs['modified_on'] = kwargs['created_on']
self.check_keys(expected, kwargs.keys())
self.reformat_keys(kwargs)
class Page(Entry):
def __init__(self, *args, **kwargs):
kwargs['class'] = "page"
super(Page, self).__init__(**kwargs)
self.className = 'entry'
expected = ['blog_id', 'status', 'author_id',
'title', 'excerpt', 'text',
'created_on', 'basename']
kwargs['week_number'] = self.get_week_number(kwargs['created_on'])
if not 'authored_on' in expected:
kwargs['authored_on'] = kwargs['created_on']
kwargs['modified_on'] = kwargs['created_on']
self.check_keys(expected, kwargs.keys())
self.reformat_keys(kwargs)
class Asset(MTModel):
def __init__(self, *args, **kwargs):
super(Asset, self).__init__()
expected = ['blog_id', 'class', 'created_by', 'created_on',
'description', 'file_ext', 'file_name', 'file_path',
'label', 'mime_type', 'modified_by', 'modified_on',
'parent', 'url']
self.check_keys(expected, kwargs.keys())
self.reformat_keys(kwargs)
class ObjectAsset(MTModel):
def __init__(self, *args, **kwargs):
super(ObjectAsset, self).__init__()
expected = ['asset_id', 'blog_id', 'embedded',
'object_ds', 'object_id']
self.check_keys(expected, kwargs.keys())
self.reformat_keys(kwargs)
class Tag(MTModel):
def __init__(self, *args, **kwargs):
super(Tag, self).__init__()
expected = ['name']
self.check_keys(expected, kwargs.keys())
self.reformat_keys(kwargs)
class ObjectTag(MTModel):
def __init__(self, *args, **kwargs):
super(ObjectTag, self).__init__()
expected = ['blog_id', 'object_datasource', 'object_id', 'tag_id']
self.check_keys(expected, kwargs.keys())
self.reformat_keys(kwargs)
class Entry_Meta(MTModel):
def __init__(self, *args, **kwargs):
super(Entry_Meta, self).__init__()
expected = ['entry_id', 'type', 'data', 'blog_id']
self.check_keys(expected, kwargs.keys())
mtquery = query.MTQuery()
if not kwargs['type'].find('field.') >= 0:
kwargs['type'] = "field.%s" % kwargs['type']
kwargs['type'] = kwargs['type'].lower()
meta_column = mtquery.get_field_type(kwargs['blog_id'],
kwargs['type'].replace('field.',
''))
kwargs[meta_column] = kwargs['data']
del kwargs['data']
del kwargs['blog_id']
self.reformat_keys(kwargs)
class Category_Meta(MTModel):
def __init__(self, *args, **kwargs):
super(Category_Meta, self).__init__()
expected = ['category_id', 'type', 'data', 'blog_id']
self.check_keys(expected, kwargs.keys())
mtquery = query.MTQuery()
if not kwargs['type'].find('field.') >= 0:
kwargs['type'] = "field.%s" % kwargs['type']
kwargs['type'] = kwargs['type'].lower()
meta_column = mtquery.get_field_type(kwargs['blog_id'],
kwargs['type'].replace('field.',
''))
kwargs[meta_column] = kwargs['data']
del kwargs['data']
del kwargs['blog_id']
self.reformat_keys(kwargs)
class Placement(MTModel):
def __init__(self, *args, **kwargs):
super(Placement, self).__init__()
expected = ['blog_id', 'entry_id',
'category_id', 'is_primary']
self.check_keys(expected, kwargs.keys())
self.reformat_keys(kwargs)
class Category(MTModel):
def __init__(self, *args, **kwargs):
super(Category, self).__init__()
expected = ['blog_id', 'label', 'description',
'author_id', 'parent', 'basename',
'created_on']
self.check_keys(expected, kwargs.keys())
self.reformat_keys(kwargs)
class Folder(Category):
def __init__(self, *args, **kwargs):
kwargs['class'] = "folder"
super(Folder, self).__init__(**kwargs)
self.className = 'category'
expected = ['blog_id', 'label', 'description',
'author_id', 'parent', 'basename',
'created_on']
self.check_keys(expected, kwargs.keys())
self.reformat_keys(kwargs)
class Author(MTModel):
def __init__(self, *args, **kwargs):
super(Author, self).__init__()
expected = ['name', 'nickname', 'email']
self.check_keys(expected, kwargs.keys())
self.reformat_keys(kwargs)
|
|
from __future__ import division, absolute_import, print_function
import numpy as np
try:
from scipy.spatial import (cKDTree, KDTree, SphericalVoronoi, distance,
ConvexHull, Voronoi)
except ImportError:
pass
from .common import Benchmark
class Build(Benchmark):
params = [
[(3,10000,1000), (8,10000,1000), (16,10000,1000)],
['KDTree', 'cKDTree'],
]
param_names = ['(m, n, r)', 'class']
def setup(self, mnr, cls_name):
self.cls = KDTree if cls_name == 'KDTree' else cKDTree
m, n, r = mnr
np.random.seed(1234)
self.data = np.concatenate((np.random.randn(n//2,m),
np.random.randn(n-n//2,m)+np.ones(m)))
self.queries = np.concatenate((np.random.randn(r//2,m),
np.random.randn(r-r//2,m)+np.ones(m)))
def time_build(self, mnr, cls_name):
"""
Constructing kd-tree
=======================
dim | # points | time
"""
m, n, r = mnr
if cls_name == 'cKDTree_flat':
self.T = self.cls(self.data, leafsize=n)
else:
self.cls(self.data)
LEAF_SIZES = [8, 128]
BOX_SIZES = [None, 1.0]
class Query(Benchmark):
params = [
[(3,10000,1000), (8,10000,1000), (16,10000,1000)],
[1, 2, np.inf],
BOX_SIZES, LEAF_SIZES,
]
param_names = ['(m, n, r)', 'p', 'boxsize', 'leafsize']
@staticmethod
def do_setup(self, mnr, p, boxsize, leafsize):
m, n, r = mnr
np.random.seed(1234)
self.data = np.random.uniform(size=(n, m))
self.queries = np.random.uniform(size=(r, m))
self.T = cKDTree(self.data, leafsize=leafsize, boxsize=boxsize)
def setup(self, mnr, p, boxsize, leafsize):
Query.do_setup(self, mnr, p, boxsize, leafsize)
def time_query(self, mnr, p, boxsize, leafsize):
"""
Querying kd-tree
dim | # points | # queries | KDTree | cKDTree | flat cKDTree
"""
self.T.query(self.queries, p=p)
class Radius(Benchmark):
params = [
[(3,10000,1000)],
[1, 2, np.inf],
[0.2, 0.5],
BOX_SIZES, LEAF_SIZES,
]
param_names = ['(m, n, r)', 'p', 'probe radius', 'boxsize', 'leafsize']
def __init__(self):
self.time_query_pairs.__func__.params = list(self.params)
self.time_query_pairs.__func__.params[0] = [(3,1000,30),
(8,1000,30),
(16,1000,30)]
def setup(self, mnr, p, probe_radius, boxsize, leafsize):
Query.do_setup(self, mnr, p, boxsize, leafsize)
def time_query_ball_point(self, mnr, p, probe_radius, boxsize, leafsize):
self.T.query_ball_point(self.queries, probe_radius, p=p)
def time_query_pairs(self, mnr, p, probe_radius, boxsize, leafsize):
self.T.query_pairs(probe_radius, p=p)
class Neighbors(Benchmark):
params = [
[(3,1000,1000),
(8,1000,1000),
(16,1000,1000)],
[1, 2, np.inf],
[0.2, 0.5],
BOX_SIZES, LEAF_SIZES,
['cKDTree', 'cKDTree_weighted'],
]
param_names = ['(m, n1, n2)', 'p', 'probe radius', 'boxsize', 'leafsize', 'cls']
def setup(self, mn1n2, p, probe_radius, boxsize, leafsize, cls):
m, n1, n2 = mn1n2
self.data1 = np.random.uniform(size=(n1, m))
self.data2 = np.random.uniform(size=(n2, m))
self.w1 = np.ones(n1)
self.w2 = np.ones(n2)
self.T1 = cKDTree(self.data1, boxsize=boxsize, leafsize=leafsize)
self.T2 = cKDTree(self.data2, boxsize=boxsize, leafsize=leafsize)
def time_sparse_distance_matrix(self, mn1n2, p, probe_radius, boxsize, leafsize, cls):
self.T1.sparse_distance_matrix(self.T2, probe_radius, p=p)
def time_count_neighbors(self, mn1n2, p, probe_radius, boxsize, leafsize, cls):
"""
Count neighbors kd-tree
dim | # points T1 | # points T2 | p | probe radius | BoxSize | LeafSize | cls
"""
if cls != 'cKDTree_weighted':
self.T1.count_neighbors(self.T2, probe_radius, p=p)
else:
self.T1.count_neighbors(self.T2, probe_radius, self_weights=self.w1, other_weights=self.w2, p=p)
class CNeighbors(Benchmark):
params = [
[
(2,1000,1000),
(8,1000,1000),
(16,1000,1000)
],
[2, 10, 100, 400, 1000],
]
param_names = ['(m, n1, n2)', 'Nr']
def setup(self, mn1n2, Nr):
m, n1, n2 = mn1n2
data1 = np.random.uniform(size=(n1, m))
data2 = np.random.uniform(size=(n2, m))
self.w1 = np.ones(len(data1))
self.w2 = np.ones(len(data2))
self.T1d = cKDTree(data1, leafsize=1)
self.T2d = cKDTree(data2, leafsize=1)
self.T1s = cKDTree(data1, leafsize=8)
self.T2s = cKDTree(data2, leafsize=8)
self.r = np.linspace(0, 0.5, Nr)
def time_count_neighbors_deep(self, mn1n2, Nr):
"""
Count neighbors for a very deep kd-tree
dim | # points T1 | # points T2 | Nr
"""
self.T1d.count_neighbors(self.T2d, self.r)
def time_count_neighbors_shallow(self, mn1n2, Nr):
"""
Count neighbors for a shallow kd-tree
dim | # points T1 | # points T2 | Nr
"""
self.T1s.count_neighbors(self.T2s, self.r)
def generate_spherical_points(num_points):
# generate uniform points on sphere (see:
# http://stackoverflow.com/a/23785326/2942522)
np.random.seed(123)
points = np.random.normal(size=(num_points, 3))
points /= np.linalg.norm(points, axis=1)[:, np.newaxis]
return points
class SphericalVor(Benchmark):
params = [10, 100, 1000, 5000, 10000]
param_names = ['num_points']
def setup(self, num_points):
self.points = generate_spherical_points(num_points)
def time_spherical_voronoi_calculation(self, num_points):
"""Perform spherical Voronoi calculation, but not the sorting of
vertices in the Voronoi polygons.
"""
SphericalVoronoi(self.points, radius=1, center=np.zeros(3))
class SphericalVorSort(Benchmark):
params = [10, 100, 1000, 5000, 10000]
param_names = ['num_points']
def setup(self, num_points):
self.points = generate_spherical_points(num_points)
self.sv = SphericalVoronoi(self.points, radius=1,
center=np.zeros(3))
def time_spherical_polygon_vertex_sorting(self, num_points):
"""Time the vertex sorting operation in the Spherical Voronoi
code.
"""
self.sv.sort_vertices_of_regions()
class Cdist(Benchmark):
params = ([10, 100, 1000], ['euclidean', 'minkowski', 'cityblock',
'seuclidean', 'sqeuclidean', 'cosine', 'correlation', 'hamming', 'jaccard',
'chebyshev', 'canberra', 'braycurtis', 'mahalanobis', 'yule', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath', 'wminkowski'])
param_names = ['num_points', 'metric']
def setup(self, num_points, metric):
np.random.seed(123)
self.points = np.random.random_sample((num_points, 3))
def time_cdist(self, num_points, metric):
"""Time scipy.spatial.distance.cdist over a range of input data
sizes and metrics.
"""
distance.cdist(self.points, self.points, metric)
class ConvexHullBench(Benchmark):
params = ([10, 100, 1000, 5000], [True, False])
param_names = ['num_points', 'incremental']
def setup(self, num_points, incremental):
np.random.seed(123)
self.points = np.random.random_sample((num_points, 3))
def time_convex_hull(self, num_points, incremental):
"""Time scipy.spatial.ConvexHull over a range of input data sizes
and settings.
"""
ConvexHull(self.points, incremental)
class VoronoiBench(Benchmark):
params = ([10, 100, 1000, 5000, 10000], [False, True])
param_names = ['num_points', 'furthest_site']
def setup(self, num_points, furthest_site):
np.random.seed(123)
self.points = np.random.random_sample((num_points, 3))
def time_voronoi_calculation(self, num_points, furthest_site):
"""Time conventional Voronoi diagram calculation."""
Voronoi(self.points, furthest_site=furthest_site)
class Hausdorff(Benchmark):
params = [10, 100, 1000]
param_names = ['num_points']
def setup(self, num_points):
np.random.seed(123)
self.points1 = np.random.random_sample((num_points, 3))
np.random.seed(71890)
self.points2 = np.random.random_sample((num_points, 3))
def time_directed_hausdorff(self, num_points):
# time directed_hausdorff code in 3 D
distance.directed_hausdorff(self.points1, self.points2)
|
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 10232
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
|
#!/usr/bin/env python
import unittest
import string
import commands
import re
import os
import time
import cPickle
import testbase
import browser_model, fc
g_comp = fc.Compiler()
g_comp.add_func_path("../formulas")
g_comp.add_path("../maps", fc.FormulaTypes.GRADIENT)
g_comp.load_formula_file("gf4d.frm")
g_comp.load_formula_file("test.frm")
g_comp.load_formula_file("gf4d.cfrm")
g_comp.load_formula_file("gf4d.uxf")
class Wrapper(browser_model.T):
def __init__(self):
self.type_changelist = []
self.file_changelist = []
self.formula_changelist = []
browser_model.T.__init__(self,g_comp)
self.type_changed += self._type_changed
self.file_changed += self._file_changed
self.formula_changed += self._formula_changed
def _type_changed(self):
self.type_changelist.append(self.current_type)
def _file_changed(self):
self.file_changelist.append(self.current.fname)
def _formula_changed(self):
self.formula_changelist.append(self.current.formula)
class Test(testbase.TestBase):
def setUp(self):
pass
def testCreation(self):
bm = browser_model.T(g_comp)
def testFuncMapping(self):
bm = browser_model.T(g_comp)
ti = bm.get_type_info(browser_model.FRACTAL)
self.assertEqual(
fc.FormulaTypes.FRACTAL, ti.formula_type)
self.assertEqual( None, ti.fname)
self.assertEqual( None, ti.formula)
self.assertEqual( [], ti.formulas)
ti2 = bm.get_type_info(browser_model.GRADIENT)
self.assertEqual(
fc.FormulaTypes.GRADIENT, ti2.formula_type)
def testSetType(self):
bm = Wrapper()
self.assertEqual([], bm.type_changelist)
self.assertEqual(browser_model.FRACTAL, bm.current_type)
self.assertEqual(
bm.typeinfo[bm.current_type], bm.current)
bm.set_type(browser_model.INNER)
self.assertEqual(browser_model.INNER, bm.current_type)
self.assertEqual(
[browser_model.INNER],
bm.type_changelist)
def testFileList(self):
bm = browser_model.T(g_comp)
self.assertNotEqual(bm.current.files, [])
self.assertListSorted(bm.current.files)
def testSetTypeTwice(self):
bm = Wrapper()
bm.set_type(browser_model.INNER)
bm.set_type(browser_model.INNER)
self.assertEqual(
[browser_model.INNER],
bm.type_changelist)
def testSetTypeUpdatesFnames(self):
bm = browser_model.T(g_comp)
bm.current.fname = "fish"
bm.current.formula = "haddock"
bm.set_type(browser_model.GRADIENT)
self.assertEqual( None, bm.current.fname)
self.assertEqual( None, bm.current.formula)
bm.set_type(browser_model.FRACTAL)
self.assertEqual( "fish", bm.current.fname)
self.assertEqual( "haddock", bm.current.formula)
def testSetFile(self):
bm = Wrapper()
bm.set_file("gf4d.frm")
self.assertEqual("gf4d.frm",bm.current.fname)
self.assertEqual(
["gf4d.frm"], bm.file_changelist)
self.assertNotEqual(0, bm.current.formulas.count("Mandelbrot"))
def testSetBadFile(self):
bm = browser_model.T(g_comp)
self.assertRaises(IOError,bm.set_file,"nonexistent.frm")
def assertListSorted(self,l):
last = ""
for f in l:
self.failUnless(last < f.lower(),"list not sorted: %s" % l)
last = f.lower()
def testFormulasSorted(self):
bm = browser_model.T(g_comp)
bm.set_file("gf4d.frm")
self.assertListSorted(bm.current.formulas)
def testExcludeList(self):
bm = browser_model.T(g_comp)
bm.set_type(browser_model.INNER)
bm.set_file("gf4d.cfrm")
self.assertEqual(0, bm.current.formulas.count("default"))
bm.set_type(browser_model.OUTER)
bm.set_file("gf4d.cfrm")
self.assertEqual(1, bm.current.formulas.count("default"))
def testSetFormula(self):
bm = Wrapper()
bm.set_file("gf4d.frm")
bm.set_formula("Mandelbrot")
self.assertEqual("Mandelbrot",bm.current.formula)
self.assertEqual(
["Mandelbrot"], bm.formula_changelist)
def testSetFileResetsFormula(self):
bm = Wrapper()
bm.set_file("gf4d.frm")
bm.set_formula("Mandelbrot")
bm.set_file("fractint-g4.frm")
self.assertEqual(None, bm.current.formula)
self.assertEqual(
["Mandelbrot", None], bm.formula_changelist)
def testUpdate(self):
bm = Wrapper()
bm.update("gf4d.frm","Mandelbrot")
self.assertEqual("gf4d.frm",bm.current.fname)
self.assertEqual("Mandelbrot", bm.current.formula)
bm.update("fractint-g4.frm", None)
self.assertEqual("fractint-g4.frm",bm.current.fname)
self.assertEqual(None, bm.current.formula)
bm.update(None, None)
self.assertEqual(None, bm.current.fname)
self.assertEqual(None, bm.current.formula)
def testApplyStatus(self):
bm = browser_model.T(g_comp)
self.assertEqual(False, bm.current.can_apply)
bm.set_file("gf4d.frm")
self.assertEqual(False, bm.current.can_apply)
bm.set_formula("Mandelbrot")
self.assertEqual(True, bm.current.can_apply)
bm.set_type(browser_model.GRADIENT)
self.assertEqual(False, bm.current.can_apply)
bm.set_file("Gallet01.map")
self.assertEqual(True, bm.current.can_apply)
bm.set_file("blatte1.ugr")
self.assertEqual(False, bm.current.can_apply)
bm.update("test.frm","test_error")
self.assertEqual(False, bm.current.can_apply)
def testUgrPresent(self):
bm = browser_model.T(g_comp)
bm.set_type(browser_model.GRADIENT)
files = bm.current.files
self.assertEqual(1,files.count("blatte1.ugr"))
def testInstance(self):
x = browser_model.instance
def suite():
return unittest.makeSuite(Test,'test')
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Construct the Kronecker product of one or more `LinearOperators`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorKronecker"]
def _vec(x):
"""Stacks column of matrix to form a single column."""
return array_ops.reshape(
array_ops.matrix_transpose(x),
array_ops.concat(
[array_ops.shape(x)[:-2], [-1]], axis=0))
def _unvec_by(y, num_col):
"""Unstack vector to form a matrix, with a specified amount of columns."""
return array_ops.matrix_transpose(
array_ops.reshape(
y,
array_ops.concat(
[array_ops.shape(y)[:-1], [num_col, -1]], axis=0)))
def _rotate_last_dim(x, rotate_right=False):
"""Rotate the last dimension either left or right."""
ndims = array_ops.rank(x)
if rotate_right:
transpose_perm = array_ops.concat(
[[ndims - 1], math_ops.range(0, ndims - 1)], axis=0)
else:
transpose_perm = array_ops.concat(
[math_ops.range(1, ndims), [0]], axis=0)
return array_ops.transpose(x, transpose_perm)
@tf_export("linalg.LinearOperatorKronecker")
class LinearOperatorKronecker(linear_operator.LinearOperator):
"""Kronecker product between two `LinearOperators`.
This operator composes one or more linear operators `[op1,...,opJ]`,
building a new `LinearOperator` representing the Kronecker product:
`op1 x op2 x .. opJ` (we omit parentheses as the Kronecker product is
associative).
If `opj` has shape `batch_shape_j + [M_j, N_j]`, then the composed operator
will have shape equal to `broadcast_batch_shape + [prod M_j, prod N_j]`,
where the product is over all operators.
```python
# Create a 4 x 4 linear operator composed of two 2 x 2 operators.
operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
operator_2 = LinearOperatorFullMatrix([[1., 0.], [2., 1.]])
operator = LinearOperatorKronecker([operator_1, operator_2])
operator.to_dense()
==> [[1., 0., 2., 0.],
[2., 1., 4., 2.],
[3., 0., 4., 0.],
[6., 3., 8., 4.]]
operator.shape
==> [4, 4]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [4, 2] Tensor
operator.matmul(x)
==> Shape [4, 2] Tensor
# Create a [2, 3] batch of 4 x 5 linear operators.
matrix_45 = tf.random.normal(shape=[2, 3, 4, 5])
operator_45 = LinearOperatorFullMatrix(matrix)
# Create a [2, 3] batch of 5 x 6 linear operators.
matrix_56 = tf.random.normal(shape=[2, 3, 5, 6])
operator_56 = LinearOperatorFullMatrix(matrix_56)
# Compose to create a [2, 3] batch of 20 x 30 operators.
operator_large = LinearOperatorKronecker([operator_45, operator_56])
# Create a shape [2, 3, 20, 2] vector.
x = tf.random.normal(shape=[2, 3, 6, 2])
operator_large.matmul(x)
==> Shape [2, 3, 30, 2] Tensor
```
#### Performance
The performance of `LinearOperatorKronecker` on any operation is equal to
the sum of the individual operators' operations.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operators,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize a `LinearOperatorKronecker`.
`LinearOperatorKronecker` is initialized with a list of operators
`[op_1,...,op_J]`.
Args:
operators: Iterable of `LinearOperator` objects, each with
the same `dtype` and composable shape, representing the Kronecker
factors.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`. Default is the individual
operators names joined with `_x_`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty.
"""
parameters = dict(
operators=operators,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name
)
# Validate operators.
check_ops.assert_proper_iterable(operators)
operators = list(operators)
if not operators:
raise ValueError(
"Expected a list of >=1 operators. Found: %s" % operators)
self._operators = operators
# Validate dtype.
dtype = operators[0].dtype
for operator in operators:
if operator.dtype != dtype:
name_type = (str((o.name, o.dtype)) for o in operators)
raise TypeError(
"Expected all operators to have the same dtype. Found %s"
% " ".join(name_type))
# Auto-set and check hints.
# A Kronecker product is invertible, if and only if all factors are
# invertible.
if all(operator.is_non_singular for operator in operators):
if is_non_singular is False:
raise ValueError(
"The Kronecker product of non-singular operators is always "
"non-singular.")
is_non_singular = True
if all(operator.is_self_adjoint for operator in operators):
if is_self_adjoint is False:
raise ValueError(
"The Kronecker product of self-adjoint operators is always "
"self-adjoint.")
is_self_adjoint = True
# The eigenvalues of a Kronecker product are equal to the products of eigen
# values of the corresponding factors.
if all(operator.is_positive_definite for operator in operators):
if is_positive_definite is False:
raise ValueError("The Kronecker product of positive-definite operators "
"is always positive-definite.")
is_positive_definite = True
# Initialization.
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
if name is None:
name = operators[0].name
for operator in operators[1:]:
name += "_x_" + operator.name
with ops.name_scope(name, values=graph_parents):
super(LinearOperatorKronecker, self).__init__(
dtype=dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
# TODO(b/143910018) Remove graph_parents in V3.
self._set_graph_parents(graph_parents)
@property
def operators(self):
return self._operators
def _shape(self):
# Get final matrix shape.
domain_dimension = self.operators[0].domain_dimension
for operator in self.operators[1:]:
domain_dimension = domain_dimension * operator.domain_dimension
range_dimension = self.operators[0].range_dimension
for operator in self.operators[1:]:
range_dimension = range_dimension * operator.range_dimension
matrix_shape = tensor_shape.TensorShape([
range_dimension, domain_dimension])
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape
for operator in self.operators[1:]:
batch_shape = common_shapes.broadcast_shape(
batch_shape, operator.batch_shape)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
domain_dimension = self.operators[0].domain_dimension_tensor()
for operator in self.operators[1:]:
domain_dimension = domain_dimension * operator.domain_dimension_tensor()
range_dimension = self.operators[0].range_dimension_tensor()
for operator in self.operators[1:]:
range_dimension = range_dimension * operator.range_dimension_tensor()
matrix_shape = [range_dimension, domain_dimension]
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape_tensor()
for operator in self.operators[1:]:
batch_shape = array_ops.broadcast_dynamic_shape(
batch_shape, operator.batch_shape_tensor())
return array_ops.concat((batch_shape, matrix_shape), 0)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# Here we heavily rely on Roth's column Lemma [1]:
# (A x B) * vec X = vec BXA^T,
# where vec stacks all the columns of the matrix under each other. In our
# case, x represents a batch of vec X (i.e. we think of x as a batch of
# column vectors, rather than a matrix). Each member of the batch can be
# reshaped to a matrix (hence we get a batch of matrices).
# We can iteratively apply this lemma by noting that if B is a Kronecker
# product, then we can apply the lemma again.
# [1] W. E. Roth, "On direct product matrices,"
# Bulletin of the American Mathematical Society, vol. 40, pp. 461-468,
# 1934
# Efficiency
# Naively doing the Kronecker product, by calculating the dense matrix and
# applying it will can take cubic time in the size of domain_dimension
# (assuming a square matrix). The other issue is that calculating the dense
# matrix can be prohibitively expensive, in that it can take a large amount
# of memory.
#
# This implementation avoids this memory blow up by only computing matmuls
# with the factors. In this way, we don't have to realize the dense matrix.
# In terms of complexity, if we have Kronecker Factors of size:
# (n1, n1), (n2, n2), (n3, n3), ... (nJ, nJ), with N = \prod n_i, and we
# have as input a [N, M] matrix, the naive approach would take O(N^2 M).
# With this approach (ignoring reshaping of tensors and transposes for now),
# the time complexity can be O(M * (\sum n_i) * N). There is also the
# benefit of batched multiplication (In this example, the batch size is
# roughly M * N) so this can be much faster. However, not factored in are
# the costs of the several transposing of tensors, which can affect cache
# behavior.
# Below we document the shape manipulation for adjoint=False,
# adjoint_arg=False, but the general case of different adjoints is still
# handled.
if adjoint_arg:
x = linalg.adjoint(x)
# Always add a batch dimension to enable broadcasting to work.
batch_shape = array_ops.concat(
[array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0)
x += array_ops.zeros(batch_shape, dtype=x.dtype.base_dtype)
# x has shape [B, R, C], where B represent some number of batch dimensions,
# R represents the number of rows, and C represents the number of columns.
# In order to apply Roth's column lemma, we need to operate on a batch of
# column vectors, so we reshape into a batch of column vectors. We put it
# at the front to ensure that broadcasting between operators to the batch
# dimensions B still works.
output = _rotate_last_dim(x, rotate_right=True)
# Also expand the shape to be [A, C, B, R]. The first dimension will be
# used to accumulate dimensions from each operator matmul.
output = output[array_ops.newaxis, ...]
# In this loop, A is going to refer to the value of the accumulated
# dimension. A = 1 at the start, and will end up being self.range_dimension.
# V will refer to the last dimension. V = R at the start, and will end up
# being 1 in the end.
for operator in self.operators[:-1]:
# Reshape output from [A, C, B, V] to be
# [A, C, B, V / op.domain_dimension, op.domain_dimension]
if adjoint:
operator_dimension = operator.range_dimension_tensor()
else:
operator_dimension = operator.domain_dimension_tensor()
output = _unvec_by(output, operator_dimension)
# We are computing (XA^T) = (AX^T)^T.
# output has [A, C, B, V / op.domain_dimension, op.domain_dimension],
# which is being converted to:
# [A, C, B, V / op.domain_dimension, op.range_dimension]
output = array_ops.matrix_transpose(output)
output = operator.matmul(output, adjoint=adjoint, adjoint_arg=False)
output = array_ops.matrix_transpose(output)
# Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=True)
# After the loop, we will have
# A = self.range_dimension / op[-1].range_dimension
# V = op[-1].domain_dimension
# We convert that using matvec to get:
# [A, C, B, op[-1].range_dimension]
output = self.operators[-1].matvec(output, adjoint=adjoint)
# Rearrange shape to be [B1, ... Bn, self.range_dimension, C]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=False)
if x.shape.is_fully_defined():
column_dim = x.shape[-1]
broadcast_batch_shape = common_shapes.broadcast_shape(
x.shape[:-2], self.batch_shape)
if adjoint:
matrix_dimensions = [self.domain_dimension, column_dim]
else:
matrix_dimensions = [self.range_dimension, column_dim]
output.set_shape(broadcast_batch_shape.concatenate(
matrix_dimensions))
return output
def _determinant(self):
# Note that we have |X1 x X2| = |X1| ** n * |X2| ** m, where X1 is an m x m
# matrix, and X2 is an n x n matrix. We can iteratively apply this property
# to get the determinant of |X1 x X2 x X3 ...|. If T is the product of the
# domain dimension of all operators, then we have:
# |X1 x X2 x X3 ...| =
# |X1| ** (T / m) * |X2 x X3 ... | ** m =
# |X1| ** (T / m) * |X2| ** (m * (T / m) / n) * ... =
# |X1| ** (T / m) * |X2| ** (T / n) * | X3 x X4... | ** (m * n)
# And by doing induction we have product(|X_i| ** (T / dim(X_i))).
total = self.domain_dimension_tensor()
determinant = 1.
for operator in self.operators:
determinant = determinant * operator.determinant() ** math_ops.cast(
total / operator.domain_dimension_tensor(),
dtype=operator.dtype)
return determinant
def _log_abs_determinant(self):
# This will be sum((total / dim(x_i)) * log |X_i|)
total = self.domain_dimension_tensor()
log_abs_det = 0.
for operator in self.operators:
log_abs_det += operator.log_abs_determinant() * math_ops.cast(
total / operator.domain_dimension_tensor(),
dtype=operator.dtype)
return log_abs_det
def _trace(self):
# tr(A x B) = tr(A) * tr(B)
trace = 1.
for operator in self.operators:
trace = trace * operator.trace()
return trace
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
# Here we follow the same use of Roth's column lemma as in `matmul`, with
# the key difference that we replace all `matmul` instances with `solve`.
# This follows from the property that inv(A x B) = inv(A) x inv(B).
# Below we document the shape manipulation for adjoint=False,
# adjoint_arg=False, but the general case of different adjoints is still
# handled.
if adjoint_arg:
rhs = linalg.adjoint(rhs)
# Always add a batch dimension to enable broadcasting to work.
batch_shape = array_ops.concat(
[array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0)
rhs += array_ops.zeros(batch_shape, dtype=rhs.dtype.base_dtype)
# rhs has shape [B, R, C], where B represent some number of batch
# dimensions,
# R represents the number of rows, and C represents the number of columns.
# In order to apply Roth's column lemma, we need to operate on a batch of
# column vectors, so we reshape into a batch of column vectors. We put it
# at the front to ensure that broadcasting between operators to the batch
# dimensions B still works.
output = _rotate_last_dim(rhs, rotate_right=True)
# Also expand the shape to be [A, C, B, R]. The first dimension will be
# used to accumulate dimensions from each operator matmul.
output = output[array_ops.newaxis, ...]
# In this loop, A is going to refer to the value of the accumulated
# dimension. A = 1 at the start, and will end up being self.range_dimension.
# V will refer to the last dimension. V = R at the start, and will end up
# being 1 in the end.
for operator in self.operators[:-1]:
# Reshape output from [A, C, B, V] to be
# [A, C, B, V / op.domain_dimension, op.domain_dimension]
if adjoint:
operator_dimension = operator.range_dimension_tensor()
else:
operator_dimension = operator.domain_dimension_tensor()
output = _unvec_by(output, operator_dimension)
# We are computing (XA^-1^T) = (A^-1 X^T)^T.
# output has [A, C, B, V / op.domain_dimension, op.domain_dimension],
# which is being converted to:
# [A, C, B, V / op.domain_dimension, op.range_dimension]
output = array_ops.matrix_transpose(output)
output = operator.solve(output, adjoint=adjoint, adjoint_arg=False)
output = array_ops.matrix_transpose(output)
# Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=True)
# After the loop, we will have
# A = self.range_dimension / op[-1].range_dimension
# V = op[-1].domain_dimension
# We convert that using matvec to get:
# [A, C, B, op[-1].range_dimension]
output = self.operators[-1].solvevec(output, adjoint=adjoint)
# Rearrange shape to be [B1, ... Bn, self.range_dimension, C]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=False)
if rhs.shape.is_fully_defined():
column_dim = rhs.shape[-1]
broadcast_batch_shape = common_shapes.broadcast_shape(
rhs.shape[:-2], self.batch_shape)
if adjoint:
matrix_dimensions = [self.domain_dimension, column_dim]
else:
matrix_dimensions = [self.range_dimension, column_dim]
output.set_shape(broadcast_batch_shape.concatenate(
matrix_dimensions))
return output
def _diag_part(self):
diag_part = self.operators[0].diag_part()
for operator in self.operators[1:]:
diag_part = diag_part[..., :, array_ops.newaxis]
op_diag_part = operator.diag_part()[..., array_ops.newaxis, :]
diag_part = diag_part * op_diag_part
diag_part = array_ops.reshape(
diag_part,
shape=array_ops.concat(
[array_ops.shape(diag_part)[:-2], [-1]], axis=0))
if self.range_dimension > self.domain_dimension:
diag_dimension = self.domain_dimension
else:
diag_dimension = self.range_dimension
diag_part.set_shape(
self.batch_shape.concatenate(diag_dimension))
return diag_part
def _to_dense(self):
product = self.operators[0].to_dense()
for operator in self.operators[1:]:
# Product has shape [B, R1, 1, C1, 1].
product = product[
..., :, array_ops.newaxis, :, array_ops.newaxis]
# Operator has shape [B, 1, R2, 1, C2].
op_to_mul = operator.to_dense()[
..., array_ops.newaxis, :, array_ops.newaxis, :]
# This is now [B, R1, R2, C1, C2].
product = product * op_to_mul
# Now merge together dimensions to get [B, R1 * R2, C1 * C2].
product = array_ops.reshape(
product,
shape=array_ops.concat(
[array_ops.shape(product)[:-4],
[array_ops.shape(product)[-4] * array_ops.shape(product)[-3],
array_ops.shape(product)[-2] * array_ops.shape(product)[-1]]
], axis=0))
product.set_shape(self.shape)
return product
def _eigvals(self):
# This will be the kronecker product of all the eigenvalues.
# Note: It doesn't matter which kronecker product it is, since every
# kronecker product of the same matrices are similar.
eigvals = [operator.eigvals() for operator in self.operators]
# Now compute the kronecker product
product = eigvals[0]
for eigval in eigvals[1:]:
# Product has shape [B, R1, 1].
product = product[..., array_ops.newaxis]
# Eigval has shape [B, 1, R2]. Produces shape [B, R1, R2].
product = product * eigval[..., array_ops.newaxis, :]
# Reshape to [B, R1 * R2]
product = array_ops.reshape(
product,
shape=array_ops.concat([array_ops.shape(product)[:-2], [-1]], axis=0))
product.set_shape(self.shape[:-1])
return product
def _assert_non_singular(self):
if all(operator.is_square for operator in self.operators):
asserts = [operator.assert_non_singular() for operator in self.operators]
return control_flow_ops.group(asserts)
else:
raise errors.InvalidArgumentError(
node_def=None, op=None, message="All Kronecker factors must be "
"square for the product to be invertible.")
def _assert_self_adjoint(self):
if all(operator.is_square for operator in self.operators):
asserts = [operator.assert_self_adjoint() for operator in self.operators]
return control_flow_ops.group(asserts)
else:
raise errors.InvalidArgumentError(
node_def=None, op=None, message="All Kronecker factors must be "
"square for the product to be self adjoint.")
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import binascii
import itertools
import math
import os
import pytest
from cryptography import exceptions, utils
from cryptography.exceptions import UnsupportedAlgorithm, _Reasons
from cryptography.hazmat.primitives import hashes, interfaces
from cryptography.hazmat.primitives.asymmetric import padding, rsa
from .utils import generate_rsa_verification_test
from ...utils import (
load_pkcs1_vectors, load_rsa_nist_vectors, load_vectors_from_file,
raises_unsupported_algorithm
)
@utils.register_interface(interfaces.AsymmetricPadding)
class DummyPadding(object):
name = "UNSUPPORTED-PADDING"
class DummyMGF(object):
pass
def _modinv(e, m):
"""
Modular Multiplicative Inverse. Returns x such that: (x*e) mod m == 1
"""
x1, y1, x2, y2 = 1, 0, 0, 1
a, b = e, m
while b > 0:
q, r = divmod(a, b)
xn, yn = x1 - q * x2, y1 - q * y2
a, b, x1, y1, x2, y2 = b, r, x2, y2, xn, yn
return x1 % m
def _check_rsa_private_key(skey):
assert skey
assert skey.modulus
assert skey.public_exponent
assert skey.private_exponent
assert skey.p * skey.q == skey.modulus
assert skey.key_size
assert skey.dmp1 == skey.d % (skey.p - 1)
assert skey.dmq1 == skey.d % (skey.q - 1)
assert skey.iqmp == _modinv(skey.q, skey.p)
pkey = skey.public_key()
assert pkey
assert skey.modulus == pkey.modulus
assert skey.public_exponent == pkey.public_exponent
assert skey.key_size == pkey.key_size
def _flatten_pkcs1_examples(vectors):
flattened_vectors = []
for vector in vectors:
examples = vector[0].pop("examples")
for example in examples:
merged_vector = (vector[0], vector[1], example)
flattened_vectors.append(merged_vector)
return flattened_vectors
def test_modular_inverse():
p = int(
"d1f9f6c09fd3d38987f7970247b85a6da84907753d42ec52bc23b745093f4fff5cff3"
"617ce43d00121a9accc0051f519c76e08cf02fc18acfe4c9e6aea18da470a2b611d2e"
"56a7b35caa2c0239bc041a53cc5875ca0b668ae6377d4b23e932d8c995fd1e58ecfd8"
"c4b73259c0d8a54d691cca3f6fb85c8a5c1baf588e898d481", 16
)
q = int(
"d1519255eb8f678c86cfd06802d1fbef8b664441ac46b73d33d13a8404580a33a8e74"
"cb2ea2e2963125b3d454d7a922cef24dd13e55f989cbabf64255a736671f4629a47b5"
"b2347cfcd669133088d1c159518531025297c2d67c9da856a12e80222cd03b4c6ec0f"
"86c957cb7bb8de7a127b645ec9e820aa94581e4762e209f01", 16
)
assert _modinv(q, p) == int(
"0275e06afa722999315f8f322275483e15e2fb46d827b17800f99110b269a6732748f"
"624a382fa2ed1ec68c99f7fc56fb60e76eea51614881f497ba7034c17dde955f92f15"
"772f8b2b41f3e56d88b1e096cdd293eba4eae1e82db815e0fadea0c4ec971bc6fd875"
"c20e67e48c31a611e98d32c6213ae4c4d7b53023b2f80c538", 16
)
@pytest.mark.rsa
class TestRSA(object):
@pytest.mark.parametrize(
"public_exponent,key_size",
itertools.product(
(3, 5, 65537),
(1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1536, 2048)
)
)
def test_generate_rsa_keys(self, backend, public_exponent, key_size):
skey = rsa.RSAPrivateKey.generate(public_exponent, key_size, backend)
_check_rsa_private_key(skey)
assert skey.key_size == key_size
assert skey.public_exponent == public_exponent
def test_generate_bad_rsa_key(self, backend):
with pytest.raises(ValueError):
rsa.RSAPrivateKey.generate(public_exponent=1,
key_size=2048,
backend=backend)
with pytest.raises(ValueError):
rsa.RSAPrivateKey.generate(public_exponent=4,
key_size=2048,
backend=backend)
def test_cant_generate_insecure_tiny_key(self, backend):
with pytest.raises(ValueError):
rsa.RSAPrivateKey.generate(public_exponent=65537,
key_size=511,
backend=backend)
with pytest.raises(ValueError):
rsa.RSAPrivateKey.generate(public_exponent=65537,
key_size=256,
backend=backend)
@pytest.mark.parametrize(
"pkcs1_example",
load_vectors_from_file(
os.path.join(
"asymmetric", "RSA", "pkcs-1v2-1d2-vec", "pss-vect.txt"),
load_pkcs1_vectors
)
)
def test_load_pss_vect_example_keys(self, pkcs1_example):
secret, public = pkcs1_example
skey = rsa.RSAPrivateKey(
p=secret["p"],
q=secret["q"],
private_exponent=secret["private_exponent"],
dmp1=secret["dmp1"],
dmq1=secret["dmq1"],
iqmp=secret["iqmp"],
public_exponent=secret["public_exponent"],
modulus=secret["modulus"]
)
assert skey
_check_rsa_private_key(skey)
pkey = rsa.RSAPublicKey(
public_exponent=public["public_exponent"],
modulus=public["modulus"]
)
assert pkey
pkey2 = skey.public_key()
assert pkey2
assert skey.modulus == pkey.modulus
assert skey.modulus == skey.n
assert skey.public_exponent == pkey.public_exponent
assert skey.public_exponent == skey.e
assert skey.private_exponent == skey.d
assert pkey.modulus
assert pkey.modulus == pkey2.modulus
assert pkey.modulus == pkey.n
assert pkey.public_exponent == pkey2.public_exponent
assert pkey.public_exponent == pkey.e
assert skey.key_size
assert skey.key_size == pkey.key_size
assert skey.key_size == pkey2.key_size
def test_invalid_private_key_argument_types(self):
with pytest.raises(TypeError):
rsa.RSAPrivateKey(None, None, None, None, None, None, None, None)
def test_invalid_public_key_argument_types(self):
with pytest.raises(TypeError):
rsa.RSAPublicKey(None, None)
def test_invalid_private_key_argument_values(self):
# Start with p=3, q=11, private_exponent=3, public_exponent=7,
# modulus=33, dmp1=1, dmq1=3, iqmp=2. Then change one value at
# a time to test the bounds.
# Test a modulus < 3.
with pytest.raises(ValueError):
rsa.RSAPrivateKey(
p=3,
q=11,
private_exponent=3,
dmp1=1,
dmq1=3,
iqmp=2,
public_exponent=7,
modulus=2
)
# Test a modulus != p * q.
with pytest.raises(ValueError):
rsa.RSAPrivateKey(
p=3,
q=11,
private_exponent=3,
dmp1=1,
dmq1=3,
iqmp=2,
public_exponent=7,
modulus=35
)
# Test a p > modulus.
with pytest.raises(ValueError):
rsa.RSAPrivateKey(
p=37,
q=11,
private_exponent=3,
dmp1=1,
dmq1=3,
iqmp=2,
public_exponent=7,
modulus=33
)
# Test a q > modulus.
with pytest.raises(ValueError):
rsa.RSAPrivateKey(
p=3,
q=37,
private_exponent=3,
dmp1=1,
dmq1=3,
iqmp=2,
public_exponent=7,
modulus=33
)
# Test a dmp1 > modulus.
with pytest.raises(ValueError):
rsa.RSAPrivateKey(
p=3,
q=11,
private_exponent=3,
dmp1=35,
dmq1=3,
iqmp=2,
public_exponent=7,
modulus=33
)
# Test a dmq1 > modulus.
with pytest.raises(ValueError):
rsa.RSAPrivateKey(
p=3,
q=11,
private_exponent=3,
dmp1=1,
dmq1=35,
iqmp=2,
public_exponent=7,
modulus=33
)
# Test an iqmp > modulus.
with pytest.raises(ValueError):
rsa.RSAPrivateKey(
p=3,
q=11,
private_exponent=3,
dmp1=1,
dmq1=3,
iqmp=35,
public_exponent=7,
modulus=33
)
# Test a private_exponent > modulus
with pytest.raises(ValueError):
rsa.RSAPrivateKey(
p=3,
q=11,
private_exponent=37,
dmp1=1,
dmq1=3,
iqmp=2,
public_exponent=7,
modulus=33
)
# Test a public_exponent < 3
with pytest.raises(ValueError):
rsa.RSAPrivateKey(
p=3,
q=11,
private_exponent=3,
dmp1=1,
dmq1=3,
iqmp=2,
public_exponent=1,
modulus=33
)
# Test a public_exponent > modulus
with pytest.raises(ValueError):
rsa.RSAPrivateKey(
p=3,
q=11,
private_exponent=3,
dmp1=1,
dmq1=3,
iqmp=2,
public_exponent=65537,
modulus=33
)
# Test a public_exponent that is not odd.
with pytest.raises(ValueError):
rsa.RSAPrivateKey(
p=3,
q=11,
private_exponent=3,
dmp1=1,
dmq1=3,
iqmp=2,
public_exponent=6,
modulus=33
)
# Test a dmp1 that is not odd.
with pytest.raises(ValueError):
rsa.RSAPrivateKey(
p=3,
q=11,
private_exponent=3,
dmp1=2,
dmq1=3,
iqmp=2,
public_exponent=7,
modulus=33
)
# Test a dmq1 that is not odd.
with pytest.raises(ValueError):
rsa.RSAPrivateKey(
p=3,
q=11,
private_exponent=3,
dmp1=1,
dmq1=4,
iqmp=2,
public_exponent=7,
modulus=33
)
def test_invalid_public_key_argument_values(self):
# Start with public_exponent=7, modulus=15. Then change one value at a
# time to test the bounds.
# Test a modulus < 3.
with pytest.raises(ValueError):
rsa.RSAPublicKey(public_exponent=7, modulus=2)
# Test a public_exponent < 3
with pytest.raises(ValueError):
rsa.RSAPublicKey(public_exponent=1, modulus=15)
# Test a public_exponent > modulus
with pytest.raises(ValueError):
rsa.RSAPublicKey(public_exponent=17, modulus=15)
# Test a public_exponent that is not odd.
with pytest.raises(ValueError):
rsa.RSAPublicKey(public_exponent=6, modulus=15)
def test_rsa_generate_invalid_backend():
pretend_backend = object()
with raises_unsupported_algorithm(_Reasons.BACKEND_MISSING_INTERFACE):
rsa.RSAPrivateKey.generate(65537, 2048, pretend_backend)
@pytest.mark.rsa
class TestRSASignature(object):
@pytest.mark.parametrize(
"pkcs1_example",
_flatten_pkcs1_examples(load_vectors_from_file(
os.path.join(
"asymmetric", "RSA", "pkcs1v15sign-vectors.txt"),
load_pkcs1_vectors
))
)
def test_pkcs1v15_signing(self, pkcs1_example, backend):
private, public, example = pkcs1_example
private_key = rsa.RSAPrivateKey(
p=private["p"],
q=private["q"],
private_exponent=private["private_exponent"],
dmp1=private["dmp1"],
dmq1=private["dmq1"],
iqmp=private["iqmp"],
public_exponent=private["public_exponent"],
modulus=private["modulus"]
)
signer = private_key.signer(padding.PKCS1v15(), hashes.SHA1(), backend)
signer.update(binascii.unhexlify(example["message"]))
signature = signer.finalize()
assert binascii.hexlify(signature) == example["signature"]
@pytest.mark.parametrize(
"pkcs1_example",
_flatten_pkcs1_examples(load_vectors_from_file(
os.path.join(
"asymmetric", "RSA", "pkcs-1v2-1d2-vec", "pss-vect.txt"),
load_pkcs1_vectors
))
)
def test_pss_signing(self, pkcs1_example, backend):
private, public, example = pkcs1_example
private_key = rsa.RSAPrivateKey(
p=private["p"],
q=private["q"],
private_exponent=private["private_exponent"],
dmp1=private["dmp1"],
dmq1=private["dmq1"],
iqmp=private["iqmp"],
public_exponent=private["public_exponent"],
modulus=private["modulus"]
)
public_key = rsa.RSAPublicKey(
public_exponent=public["public_exponent"],
modulus=public["modulus"]
)
signer = private_key.signer(
padding.PSS(
mgf=padding.MGF1(
algorithm=hashes.SHA1(),
salt_length=padding.MGF1.MAX_LENGTH
)
),
hashes.SHA1(),
backend
)
signer.update(binascii.unhexlify(example["message"]))
signature = signer.finalize()
assert len(signature) == math.ceil(private_key.key_size / 8.0)
# PSS signatures contain randomness so we can't do an exact
# signature check. Instead we'll verify that the signature created
# successfully verifies.
verifier = public_key.verifier(
signature,
padding.PSS(
mgf=padding.MGF1(
algorithm=hashes.SHA1(),
salt_length=padding.MGF1.MAX_LENGTH
)
),
hashes.SHA1(),
backend
)
verifier.update(binascii.unhexlify(example["message"]))
verifier.verify()
@pytest.mark.parametrize(
"hash_alg",
[hashes.SHA224(), hashes.SHA256(), hashes.SHA384(), hashes.SHA512()]
)
def test_pss_signing_sha2(self, hash_alg, backend):
if not backend.mgf1_hash_supported(hash_alg):
pytest.skip(
"Does not support {0} with MGF1.".format(hash_alg.name)
)
private_key = rsa.RSAPrivateKey.generate(
public_exponent=65537,
key_size=768,
backend=backend
)
public_key = private_key.public_key()
pss = padding.PSS(
mgf=padding.MGF1(
algorithm=hash_alg,
salt_length=padding.MGF1.MAX_LENGTH
)
)
signer = private_key.signer(
pss,
hash_alg,
backend
)
signer.update(b"testing signature")
signature = signer.finalize()
verifier = public_key.verifier(
signature,
pss,
hash_alg,
backend
)
verifier.update(b"testing signature")
verifier.verify()
@pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(hashes.SHA512()),
skip_message="Does not support SHA512."
)
def test_pss_minimum_key_size_for_digest(self, backend):
private_key = rsa.RSAPrivateKey.generate(
public_exponent=65537,
key_size=522,
backend=backend
)
signer = private_key.signer(
padding.PSS(
mgf=padding.MGF1(
algorithm=hashes.SHA1(),
salt_length=padding.MGF1.MAX_LENGTH
)
),
hashes.SHA512(),
backend
)
signer.update(b"no failure")
signer.finalize()
@pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(hashes.SHA512()),
skip_message="Does not support SHA512."
)
def test_pss_signing_digest_too_large_for_key_size(self, backend):
private_key = rsa.RSAPrivateKey.generate(
public_exponent=65537,
key_size=512,
backend=backend
)
with pytest.raises(ValueError):
private_key.signer(
padding.PSS(
mgf=padding.MGF1(
algorithm=hashes.SHA1(),
salt_length=padding.MGF1.MAX_LENGTH
)
),
hashes.SHA512(),
backend
)
def test_pss_signing_salt_length_too_long(self, backend):
private_key = rsa.RSAPrivateKey.generate(
public_exponent=65537,
key_size=512,
backend=backend
)
signer = private_key.signer(
padding.PSS(
mgf=padding.MGF1(
algorithm=hashes.SHA1(),
salt_length=1000000
)
),
hashes.SHA1(),
backend
)
signer.update(b"failure coming")
with pytest.raises(ValueError):
signer.finalize()
def test_use_after_finalize(self, backend):
private_key = rsa.RSAPrivateKey.generate(
public_exponent=65537,
key_size=512,
backend=backend
)
signer = private_key.signer(padding.PKCS1v15(), hashes.SHA1(), backend)
signer.update(b"sign me")
signer.finalize()
with pytest.raises(exceptions.AlreadyFinalized):
signer.finalize()
with pytest.raises(exceptions.AlreadyFinalized):
signer.update(b"more data")
def test_unsupported_padding(self, backend):
private_key = rsa.RSAPrivateKey.generate(
public_exponent=65537,
key_size=512,
backend=backend
)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.signer(DummyPadding(), hashes.SHA1(), backend)
def test_padding_incorrect_type(self, backend):
private_key = rsa.RSAPrivateKey.generate(
public_exponent=65537,
key_size=512,
backend=backend
)
with pytest.raises(TypeError):
private_key.signer("notpadding", hashes.SHA1(), backend)
def test_rsa_signer_invalid_backend(self, backend):
pretend_backend = object()
private_key = rsa.RSAPrivateKey.generate(65537, 2048, backend)
with raises_unsupported_algorithm(_Reasons.BACKEND_MISSING_INTERFACE):
private_key.signer(
padding.PKCS1v15(), hashes.SHA256, pretend_backend)
def test_unsupported_pss_mgf(self, backend):
private_key = rsa.RSAPrivateKey.generate(
public_exponent=65537,
key_size=512,
backend=backend
)
with pytest.raises(UnsupportedAlgorithm):
private_key.signer(padding.PSS(mgf=DummyMGF()), hashes.SHA1(),
backend)
@pytest.mark.rsa
class TestRSAVerification(object):
@pytest.mark.parametrize(
"pkcs1_example",
_flatten_pkcs1_examples(load_vectors_from_file(
os.path.join(
"asymmetric", "RSA", "pkcs1v15sign-vectors.txt"),
load_pkcs1_vectors
))
)
def test_pkcs1v15_verification(self, pkcs1_example, backend):
private, public, example = pkcs1_example
public_key = rsa.RSAPublicKey(
public_exponent=public["public_exponent"],
modulus=public["modulus"]
)
verifier = public_key.verifier(
binascii.unhexlify(example["signature"]),
padding.PKCS1v15(),
hashes.SHA1(),
backend
)
verifier.update(binascii.unhexlify(example["message"]))
verifier.verify()
def test_invalid_pkcs1v15_signature_wrong_data(self, backend):
private_key = rsa.RSAPrivateKey.generate(
public_exponent=65537,
key_size=512,
backend=backend
)
public_key = private_key.public_key()
signer = private_key.signer(padding.PKCS1v15(), hashes.SHA1(), backend)
signer.update(b"sign me")
signature = signer.finalize()
verifier = public_key.verifier(
signature,
padding.PKCS1v15(),
hashes.SHA1(),
backend
)
verifier.update(b"incorrect data")
with pytest.raises(exceptions.InvalidSignature):
verifier.verify()
def test_invalid_pkcs1v15_signature_wrong_key(self, backend):
private_key = rsa.RSAPrivateKey.generate(
public_exponent=65537,
key_size=512,
backend=backend
)
private_key2 = rsa.RSAPrivateKey.generate(
public_exponent=65537,
key_size=512,
backend=backend
)
public_key = private_key2.public_key()
signer = private_key.signer(padding.PKCS1v15(), hashes.SHA1(), backend)
signer.update(b"sign me")
signature = signer.finalize()
verifier = public_key.verifier(
signature,
padding.PKCS1v15(),
hashes.SHA1(),
backend
)
verifier.update(b"sign me")
with pytest.raises(exceptions.InvalidSignature):
verifier.verify()
@pytest.mark.parametrize(
"pkcs1_example",
_flatten_pkcs1_examples(load_vectors_from_file(
os.path.join(
"asymmetric", "RSA", "pkcs-1v2-1d2-vec", "pss-vect.txt"),
load_pkcs1_vectors
))
)
def test_pss_verification(self, pkcs1_example, backend):
private, public, example = pkcs1_example
public_key = rsa.RSAPublicKey(
public_exponent=public["public_exponent"],
modulus=public["modulus"]
)
verifier = public_key.verifier(
binascii.unhexlify(example["signature"]),
padding.PSS(
mgf=padding.MGF1(
algorithm=hashes.SHA1(),
salt_length=20
)
),
hashes.SHA1(),
backend
)
verifier.update(binascii.unhexlify(example["message"]))
verifier.verify()
def test_invalid_pss_signature_wrong_data(self, backend):
public_key = rsa.RSAPublicKey(
modulus=int(
b"dffc2137d5e810cde9e4b4612f5796447218bab913b3fa98bdf7982e4fa6"
b"ec4d6653ef2b29fb1642b095befcbea6decc178fb4bed243d3c3592c6854"
b"6af2d3f3", 16
),
public_exponent=65537
)
signature = binascii.unhexlify(
b"0e68c3649df91c5bc3665f96e157efa75b71934aaa514d91e94ca8418d100f45"
b"6f05288e58525f99666bab052adcffdf7186eb40f583bd38d98c97d3d524808b"
)
verifier = public_key.verifier(
signature,
padding.PSS(
mgf=padding.MGF1(
algorithm=hashes.SHA1(),
salt_length=padding.MGF1.MAX_LENGTH
)
),
hashes.SHA1(),
backend
)
verifier.update(b"incorrect data")
with pytest.raises(exceptions.InvalidSignature):
verifier.verify()
def test_invalid_pss_signature_wrong_key(self, backend):
signature = binascii.unhexlify(
b"3a1880165014ba6eb53cc1449d13e5132ebcc0cfd9ade6d7a2494a0503bd0826"
b"f8a46c431e0d7be0ca3e453f8b2b009e2733764da7927cc6dbe7a021437a242e"
)
public_key = rsa.RSAPublicKey(
modulus=int(
b"381201f4905d67dfeb3dec131a0fbea773489227ec7a1448c3109189ac68"
b"5a95441be90866a14c4d2e139cd16db540ec6c7abab13ffff91443fd46a8"
b"960cbb7658ded26a5c95c86f6e40384e1c1239c63e541ba221191c4dd303"
b"231b42e33c6dbddf5ec9a746f09bf0c25d0f8d27f93ee0ae5c0d723348f4"
b"030d3581e13522e1", 16
),
public_exponent=65537
)
verifier = public_key.verifier(
signature,
padding.PSS(
mgf=padding.MGF1(
algorithm=hashes.SHA1(),
salt_length=padding.MGF1.MAX_LENGTH
)
),
hashes.SHA1(),
backend
)
verifier.update(b"sign me")
with pytest.raises(exceptions.InvalidSignature):
verifier.verify()
def test_invalid_pss_signature_data_too_large_for_modulus(self, backend):
signature = binascii.unhexlify(
b"cb43bde4f7ab89eb4a79c6e8dd67e0d1af60715da64429d90c716a490b799c29"
b"194cf8046509c6ed851052367a74e2e92d9b38947ed74332acb115a03fcc0222"
)
public_key = rsa.RSAPublicKey(
modulus=int(
b"381201f4905d67dfeb3dec131a0fbea773489227ec7a1448c3109189ac68"
b"5a95441be90866a14c4d2e139cd16db540ec6c7abab13ffff91443fd46a8"
b"960cbb7658ded26a5c95c86f6e40384e1c1239c63e541ba221191c4dd303"
b"231b42e33c6dbddf5ec9a746f09bf0c25d0f8d27f93ee0ae5c0d723348f4"
b"030d3581e13522", 16
),
public_exponent=65537
)
verifier = public_key.verifier(
signature,
padding.PSS(
mgf=padding.MGF1(
algorithm=hashes.SHA1(),
salt_length=padding.MGF1.MAX_LENGTH
)
),
hashes.SHA1(),
backend
)
verifier.update(b"sign me")
with pytest.raises(exceptions.InvalidSignature):
verifier.verify()
def test_use_after_finalize(self, backend):
private_key = rsa.RSAPrivateKey.generate(
public_exponent=65537,
key_size=512,
backend=backend
)
public_key = private_key.public_key()
signer = private_key.signer(padding.PKCS1v15(), hashes.SHA1(), backend)
signer.update(b"sign me")
signature = signer.finalize()
verifier = public_key.verifier(
signature,
padding.PKCS1v15(),
hashes.SHA1(),
backend
)
verifier.update(b"sign me")
verifier.verify()
with pytest.raises(exceptions.AlreadyFinalized):
verifier.verify()
with pytest.raises(exceptions.AlreadyFinalized):
verifier.update(b"more data")
def test_unsupported_padding(self, backend):
private_key = rsa.RSAPrivateKey.generate(
public_exponent=65537,
key_size=512,
backend=backend
)
public_key = private_key.public_key()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
public_key.verifier(b"sig", DummyPadding(), hashes.SHA1(), backend)
def test_padding_incorrect_type(self, backend):
private_key = rsa.RSAPrivateKey.generate(
public_exponent=65537,
key_size=512,
backend=backend
)
public_key = private_key.public_key()
with pytest.raises(TypeError):
public_key.verifier(b"sig", "notpadding", hashes.SHA1(), backend)
def test_rsa_verifier_invalid_backend(self, backend):
pretend_backend = object()
private_key = rsa.RSAPrivateKey.generate(65537, 2048, backend)
public_key = private_key.public_key()
with raises_unsupported_algorithm(_Reasons.BACKEND_MISSING_INTERFACE):
public_key.verifier(
b"foo", padding.PKCS1v15(), hashes.SHA256(), pretend_backend)
def test_unsupported_pss_mgf(self, backend):
private_key = rsa.RSAPrivateKey.generate(
public_exponent=65537,
key_size=512,
backend=backend
)
public_key = private_key.public_key()
with pytest.raises(UnsupportedAlgorithm):
public_key.verifier(b"sig", padding.PSS(mgf=DummyMGF()),
hashes.SHA1(), backend)
@pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(hashes.SHA512()),
skip_message="Does not support SHA512."
)
def test_pss_verify_digest_too_large_for_key_size(self, backend):
private_key = rsa.RSAPrivateKey.generate(
public_exponent=65537,
key_size=512,
backend=backend
)
signature = binascii.unhexlify(
b"8b9a3ae9fb3b64158f3476dd8d8a1f1425444e98940e0926378baa9944d219d8"
b"534c050ef6b19b1bdc6eb4da422e89161106a6f5b5cc16135b11eb6439b646bd"
)
public_key = private_key.public_key()
with pytest.raises(ValueError):
public_key.verifier(
signature,
padding.PSS(
mgf=padding.MGF1(
algorithm=hashes.SHA1(),
salt_length=padding.MGF1.MAX_LENGTH
)
),
hashes.SHA512(),
backend
)
def test_pss_verify_salt_length_too_long(self, backend):
signature = binascii.unhexlify(
b"8b9a3ae9fb3b64158f3476dd8d8a1f1425444e98940e0926378baa9944d219d8"
b"534c050ef6b19b1bdc6eb4da422e89161106a6f5b5cc16135b11eb6439b646bd"
)
public_key = rsa.RSAPublicKey(
modulus=int(
b"d309e4612809437548b747d7f9eb9cd3340f54fe42bb3f84a36933b0839c"
b"11b0c8b7f67e11f7252370161e31159c49c784d4bc41c42a78ce0f0b40a3"
b"ca8ffb91", 16
),
public_exponent=65537
)
verifier = public_key.verifier(
signature,
padding.PSS(
mgf=padding.MGF1(
algorithm=hashes.SHA1(),
salt_length=1000000
)
),
hashes.SHA1(),
backend
)
verifier.update(b"sign me")
with pytest.raises(exceptions.InvalidSignature):
verifier.verify()
@pytest.mark.rsa
class TestRSAPSSMGF1Verification(object):
test_rsa_pss_mgf1_sha1 = pytest.mark.supported(
only_if=lambda backend: backend.mgf1_hash_supported(hashes.SHA1()),
skip_message="Does not support SHA1 with MGF1."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGenPSS_186-2.rsp",
"SigGenPSS_186-3.rsp",
"SigVerPSS_186-3.rsp",
],
hashes.SHA1(),
lambda params, hash_alg: padding.PSS(
mgf=padding.MGF1(
algorithm=hash_alg,
salt_length=params["salt_length"]
)
)
))
test_rsa_pss_mgf1_sha224 = pytest.mark.supported(
only_if=lambda backend: backend.mgf1_hash_supported(hashes.SHA224()),
skip_message="Does not support SHA224 with MGF1."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGenPSS_186-2.rsp",
"SigGenPSS_186-3.rsp",
"SigVerPSS_186-3.rsp",
],
hashes.SHA224(),
lambda params, hash_alg: padding.PSS(
mgf=padding.MGF1(
algorithm=hash_alg,
salt_length=params["salt_length"]
)
)
))
test_rsa_pss_mgf1_sha256 = pytest.mark.supported(
only_if=lambda backend: backend.mgf1_hash_supported(hashes.SHA256()),
skip_message="Does not support SHA256 with MGF1."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGenPSS_186-2.rsp",
"SigGenPSS_186-3.rsp",
"SigVerPSS_186-3.rsp",
],
hashes.SHA256(),
lambda params, hash_alg: padding.PSS(
mgf=padding.MGF1(
algorithm=hash_alg,
salt_length=params["salt_length"]
)
)
))
test_rsa_pss_mgf1_sha384 = pytest.mark.supported(
only_if=lambda backend: backend.mgf1_hash_supported(hashes.SHA384()),
skip_message="Does not support SHA384 with MGF1."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGenPSS_186-2.rsp",
"SigGenPSS_186-3.rsp",
"SigVerPSS_186-3.rsp",
],
hashes.SHA384(),
lambda params, hash_alg: padding.PSS(
mgf=padding.MGF1(
algorithm=hash_alg,
salt_length=params["salt_length"]
)
)
))
test_rsa_pss_mgf1_sha512 = pytest.mark.supported(
only_if=lambda backend: backend.mgf1_hash_supported(hashes.SHA512()),
skip_message="Does not support SHA512 with MGF1."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGenPSS_186-2.rsp",
"SigGenPSS_186-3.rsp",
"SigVerPSS_186-3.rsp",
],
hashes.SHA512(),
lambda params, hash_alg: padding.PSS(
mgf=padding.MGF1(
algorithm=hash_alg,
salt_length=params["salt_length"]
)
)
))
@pytest.mark.rsa
class TestRSAPKCS1Verification(object):
test_rsa_pkcs1v15_verify_sha1 = pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(hashes.SHA1()),
skip_message="Does not support SHA1."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigVer15_186-3.rsp",
],
hashes.SHA1(),
lambda params, hash_alg: padding.PKCS1v15()
))
test_rsa_pkcs1v15_verify_sha224 = pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(hashes.SHA224()),
skip_message="Does not support SHA224."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigVer15_186-3.rsp",
],
hashes.SHA224(),
lambda params, hash_alg: padding.PKCS1v15()
))
test_rsa_pkcs1v15_verify_sha256 = pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(hashes.SHA256()),
skip_message="Does not support SHA256."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigVer15_186-3.rsp",
],
hashes.SHA256(),
lambda params, hash_alg: padding.PKCS1v15()
))
test_rsa_pkcs1v15_verify_sha384 = pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(hashes.SHA384()),
skip_message="Does not support SHA384."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigVer15_186-3.rsp",
],
hashes.SHA384(),
lambda params, hash_alg: padding.PKCS1v15()
))
test_rsa_pkcs1v15_verify_sha512 = pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(hashes.SHA512()),
skip_message="Does not support SHA512."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigVer15_186-3.rsp",
],
hashes.SHA512(),
lambda params, hash_alg: padding.PKCS1v15()
))
class TestMGF1(object):
def test_invalid_hash_algorithm(self):
with pytest.raises(TypeError):
padding.MGF1(b"not_a_hash", 0)
def test_invalid_salt_length_not_integer(self):
with pytest.raises(TypeError):
padding.MGF1(hashes.SHA1(), b"not_a_length")
def test_invalid_salt_length_negative_integer(self):
with pytest.raises(ValueError):
padding.MGF1(hashes.SHA1(), -1)
def test_valid_mgf1_parameters(self):
algorithm = hashes.SHA1()
salt_length = algorithm.digest_size
mgf = padding.MGF1(algorithm, salt_length)
assert mgf._algorithm == algorithm
assert mgf._salt_length == salt_length
def test_valid_mgf1_parameters_maximum(self):
algorithm = hashes.SHA1()
mgf = padding.MGF1(algorithm, padding.MGF1.MAX_LENGTH)
assert mgf._algorithm == algorithm
assert mgf._salt_length == padding.MGF1.MAX_LENGTH
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Fourth Paradigm Development, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import shortcuts
from django import template
from django import http
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
import datetime
import json
import logging
from django.contrib import messages
from django_openstack import api
from django_openstack import forms
from django_openstack.dash.views import instances as dash_instances
from openstackx.api import exceptions as api_exceptions
LOG = logging.getLogger('django_openstack.syspanel.views.users')
class UserForm(forms.Form):
def __init__(self, *args, **kwargs):
tenant_list = kwargs.pop('tenant_list', None)
super(UserForm, self).__init__(*args, **kwargs)
self.fields['tenant_id'].choices = [[tenant.id,tenant.id] for tenant in tenant_list]
id = forms.CharField(label="ID (username)")
email = forms.CharField(label="Email")
password = forms.CharField(label="Password", widget=forms.PasswordInput(render_value=False), required=False)
tenant_id = forms.ChoiceField(label="Primary Tenant")
class UserDeleteForm(forms.SelfHandlingForm):
user = forms.CharField(required=True)
def handle(self, request, data):
user_id = data['user']
LOG.info('Deleting user with id "%s"' % user_id)
api.user_delete(request, user_id)
messages.info(request, '%s was successfully deleted.'
% user_id)
return redirect(request.build_absolute_uri())
class UserEnableDisableForm(forms.SelfHandlingForm):
id = forms.CharField(label="ID (username)", widget=forms.HiddenInput())
enabled = forms.ChoiceField(label="enabled", widget=forms.HiddenInput(),
choices=[[c, c]
for c in ("disable", "enable")])
def handle(self, request, data):
user_id = data['id']
enabled = data['enabled'] == "enable"
try:
api.user_update_enabled(request, user_id, enabled)
messages.info(request, "User %s %s" %
(user_id,
"enabled" if enabled else "disabled"))
except api_exceptions.ApiException:
messages.error(request, "Unable to %s user %s" %
("enable" if enabled else "disable",
user_id))
return redirect(request.build_absolute_uri())
@login_required
def index(request):
for f in (UserDeleteForm, UserEnableDisableForm):
_, handled = f.maybe_handle(request)
if handled:
return handled
users = []
try:
users = api.user_list(request)
except api_exceptions.ApiException, e:
messages.error(request, 'Unable to list users: %s' %
e.message)
user_delete_form = UserDeleteForm()
user_enable_disable_form = UserEnableDisableForm()
return shortcuts.render_to_response('syspanel_users.html', {
'users': users,
'user_delete_form': user_delete_form,
'user_enable_disable_form': user_enable_disable_form,
}, context_instance=template.RequestContext(request))
@login_required
def update(request, user_id):
if request.method == "POST":
tenants = api.tenant_list(request)
form = UserForm(request.POST, tenant_list=tenants)
if form.is_valid():
user = form.clean()
updated = []
if user['email']:
updated.append('email')
api.user_update_email(request, user['id'], user['email'])
if user['password']:
updated.append('password')
api.user_update_password(request, user['id'], user['password'])
if user['tenant_id']:
updated.append('tenant')
api.user_update_tenant(request, user['id'], user['tenant_id'])
messages.success(request,
'Updated %s for %s.'
% (', '.join(updated), user_id))
return redirect('syspanel_users')
else:
# TODO add better error management
messages.error(request, 'Unable to update user,\
please try again.')
return render_to_response(
'syspanel_user_update.html',{
'form': form,
'user_id': user_id,
}, context_instance = template.RequestContext(request))
else:
u = api.user_get(request, user_id)
tenants = api.tenant_list(request)
try:
# FIXME
email = u.email
except:
email = ''
try:
tenant_id = u.tenantId
except:
tenant_id = None
form = UserForm(initial={'id': user_id,
'tenant_id': tenant_id,
'email': email},
tenant_list=tenants)
return render_to_response(
'syspanel_user_update.html',{
'form': form,
'user_id': user_id,
}, context_instance = template.RequestContext(request))
@login_required
def create(request):
try:
tenants = api.tenant_list(request)
except api_exceptions.ApiException, e:
messages.error(request, 'Unable to retrieve tenant list: %s' %
e.message)
return redirect('syspanel_users')
if request.method == "POST":
form = UserForm(request.POST, tenant_list=tenants)
if form.is_valid():
user = form.clean()
# TODO Make this a real request
try:
LOG.info('Creating user with id "%s"' % user['id'])
api.user_create(request,
user['id'],
user['email'],
user['password'],
user['tenant_id'],
True)
api.account_api(request).role_refs.add_for_tenant_user(
user['tenant_id'], user['id'],
settings.OPENSTACK_KEYSTONE_DEFAULT_ROLE)
messages.success(request,
'%s was successfully created.'
% user['id'])
return redirect('syspanel_users')
except api_exceptions.ApiException, e:
LOG.error('ApiException while creating user\n'
'id: "%s", email: "%s", tenant_id: "%s"' %
(user['id'], user['email'], user['tenant_id']),
exc_info=True)
messages.error(request,
'Error creating user: %s'
% e.message)
return redirect('syspanel_users')
else:
return render_to_response(
'syspanel_user_create.html',{
'form': form,
}, context_instance = template.RequestContext(request))
else:
form = UserForm(tenant_list=tenants)
return render_to_response(
'syspanel_user_create.html',{
'form': form,
}, context_instance = template.RequestContext(request))
|
|
from datetime import datetime
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.core import mail
from django.utils.http import int_to_base36
from django.conf import settings
from mock import Mock, patch
from nose.tools import eq_
from pyquery import PyQuery as pq
import waffle
import amo
import amo.tests
from amo.helpers import urlparams
from amo.urlresolvers import reverse
from amo.tests.test_helpers import get_uploaded_file
from users.models import BlacklistedPassword, UserProfile
from users.forms import UserEditForm
class UserFormBase(amo.tests.TestCase):
fixtures = ['users/test_backends']
def setUp(self):
self.user = User.objects.get(id='4043307')
self.user_profile = self.user.get_profile()
self.uidb36 = int_to_base36(self.user.id)
self.token = default_token_generator.make_token(self.user)
class TestSetPasswordForm(UserFormBase):
def _get_reset_url(self):
return "/en-US/firefox/users/pwreset/%s/%s" % (self.uidb36, self.token)
def test_url_fail(self):
r = self.client.get('/users/pwreset/junk/', follow=True)
eq_(r.status_code, 404)
r = self.client.get('/en-US/firefox/users/pwreset/%s/12-345' %
self.uidb36)
self.assertContains(r, "Password reset unsuccessful")
def test_set_fail(self):
url = self._get_reset_url()
r = self.client.post(url, {'new_password1': '', 'new_password2': ''})
self.assertFormError(r, 'form', 'new_password1',
"This field is required.")
self.assertFormError(r, 'form', 'new_password2',
"This field is required.")
r = self.client.post(url, {'new_password1': 'onelonger',
'new_password2': 'twolonger'})
self.assertFormError(r, 'form', 'new_password2',
"The two password fields didn't match.")
def test_set_blacklisted(self):
BlacklistedPassword.objects.create(password='password')
url = self._get_reset_url()
r = self.client.post(url, {'new_password1': 'password',
'new_password2': 'password'})
self.assertFormError(r, 'form', 'new_password1',
'That password is not allowed.')
def test_set_short(self):
url = self._get_reset_url()
r = self.client.post(url, {'new_password1': 'short',
'new_password2': 'short'})
self.assertFormError(r, 'form', 'new_password1',
'Must be 8 characters or more.')
def test_set_success(self):
url = self._get_reset_url()
assert self.user_profile.check_password('testlonger') is False
self.client.post(url, {'new_password1': 'testlonger',
'new_password2': 'testlonger'})
self.user_profile = User.objects.get(id='4043307').get_profile()
assert self.user_profile.check_password('testlonger')
eq_(self.user_profile.userlog_set
.filter(activity_log__action=amo.LOG.CHANGE_PASSWORD.id)
.count(), 1)
class TestPasswordResetForm(UserFormBase):
def test_request_fail(self):
r = self.client.post('/en-US/firefox/users/pwreset',
{'email': '[email protected]'})
eq_(len(mail.outbox), 0)
self.assertFormError(r, 'form', 'email',
("An email has been sent to the requested account with further "
"information. If you do not receive an email then please confirm "
"you have entered the same email address used during "
"account registration."))
def test_request_success(self):
self.client.post('/en-US/firefox/users/pwreset',
{'email': self.user.email})
eq_(len(mail.outbox), 1)
assert mail.outbox[0].subject.find('Password reset') == 0
assert mail.outbox[0].body.find('pwreset/%s' % self.uidb36) > 0
def test_amo_user_but_no_django_user(self):
# Password reset should work without a Django user.
self.user_profile.update(user=None, _signal=True)
self.user.delete()
self.client.post('/en-US/firefox/users/pwreset',
{'email': self.user.email})
eq_(len(mail.outbox), 1)
class TestUserDeleteForm(UserFormBase):
def test_bad_password(self):
self.client.login(username='[email protected]', password='foo')
data = {'password': 'wrong', 'confirm': True, }
r = self.client.post('/en-US/firefox/users/delete', data)
msg = "Wrong password entered!"
self.assertFormError(r, 'form', 'password', msg)
def test_not_confirmed(self):
self.client.login(username='[email protected]', password='foo')
data = {'password': 'foo'}
r = self.client.post('/en-US/firefox/users/delete', data)
self.assertFormError(r, 'form', 'confirm', 'This field is required.')
def test_success(self):
self.client.login(username='[email protected]', password='foo')
data = {'password': 'foo', 'confirm': True, }
self.client.post('/en-US/firefox/users/delete', data, follow=True)
# TODO XXX: Bug 593055
#self.assertContains(r, "Profile Deleted")
u = UserProfile.objects.get(id=4043307)
eq_(u.deleted, True)
eq_(u.email, None)
@patch('users.models.UserProfile.is_developer')
def test_developer_attempt(self, f):
"""A developer's attempt to delete one's self must be thwarted."""
f.return_value = True
self.client.login(username='[email protected]', password='foo')
data = {'password': 'foo', 'confirm': True, }
r = self.client.post('/en-US/firefox/users/delete', data, follow=True)
self.assertContains(r, 'You cannot delete your account')
class TestUserAdminForm(UserFormBase):
def test_long_hash(self):
self.client.login(username='[email protected]', password='foo')
data = {'password': 'sha512$32e15df727a054aa56cf69accc142d1573372641a176aab9b0f1458e27dc6f3b$5bd3bd7811569776a07fbbb5e50156aa6ebdd0bec9267249b57da065340f0324190f1ad0d5f609dca19179a86c64807e22f789d118e6f7109c95b9c64ae8f619',
'username': 'alice',
'last_login': '2010-07-03 23:03:11',
'date_joined': '2010-07-03 23:03:11'}
r = self.client.post(reverse('admin:auth_user_change',
args=[self.user.id]),
data)
eq_(pq(r.content)('#user_form div.password .errorlist').text(), None)
def test_toolong_hash(self):
self.client.login(username='[email protected]', password='foo')
data = {'password': 'sha512$32e15df727a054aa56cf69accc142d1573372641a176aab9b0f1458e27dc6f3b$5bd3bd7811569776a07fbbb5e50156aa6ebdd0bec9267249b57da065340f0324190f1ad0d5f609dca19179a86c64807e22f789d118e6f7109c95b9c64ae8f6190000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'username': 'alice'}
r = self.client.post(reverse('admin:auth_user_change',
args=[self.user.id]),
data)
eq_(pq(r.content)('#id_password strong').text(),
'algorithm salt hash')
class TestUserEditForm(UserFormBase):
def setUp(self):
super(TestUserEditForm, self).setUp()
self.client.login(username='[email protected]', password='foo')
self.url = reverse('users.edit')
def test_no_names(self):
data = {'username': '',
'email': '[email protected]', }
r = self.client.post(self.url, data)
self.assertFormError(r, 'form', 'username', 'This field is required.')
def test_no_real_name(self):
data = {'username': 'blah',
'email': '[email protected]', }
r = self.client.post(self.url, data, follow=True)
self.assertContains(r, 'Profile Updated')
def test_set_wrong_password(self):
data = {'email': '[email protected]',
'oldpassword': 'wrong',
'password': 'new',
'password2': 'new', }
r = self.client.post(self.url, data)
self.assertFormError(r, 'form', 'oldpassword',
'Wrong password entered!')
def test_set_unmatched_passwords(self):
data = {'email': '[email protected]',
'oldpassword': 'foo',
'password': 'longer123',
'password2': 'longer1234', }
r = self.client.post(self.url, data)
self.assertFormError(r, 'form', 'password2',
'The passwords did not match.')
def test_set_new_passwords(self):
data = {'username': 'jbalogh',
'email': '[email protected]',
'oldpassword': 'foo',
'password': 'longer123',
'password2': 'longer123', }
r = self.client.post(self.url, data, follow=True)
self.assertContains(r, 'Profile Updated')
def test_long_data(self):
data = {'username': 'jbalogh',
'email': '[email protected]',
'oldpassword': 'foo',
'password': 'new',
'password2': 'new', }
for field, length in (('username', 50), ('display_name', 50),
('location', 100), ('occupation', 100)):
data[field] = 'x' * (length + 1)
r = self.client.post(self.url, data, follow=True)
err = u'Ensure this value has at most %s characters (it has %s).'
self.assertFormError(r, 'form', field, err % (length, length + 1))
@patch('amo.models.ModelBase.update')
def test_photo_modified(self, update_mock):
dummy = Mock()
dummy.user = self.user
data = {'username': self.user_profile.username,
'email': self.user_profile.email}
files = {'photo': get_uploaded_file('transparent.png')}
form = UserEditForm(data, files=files, instance=self.user_profile,
request=dummy)
assert form.is_valid()
form.save()
assert update_mock.called
class TestAdminUserEditForm(UserFormBase):
fixtures = ['base/users']
def setUp(self):
super(TestAdminUserEditForm, self).setUp()
self.client.login(username='[email protected]', password='password')
self.url = reverse('users.admin_edit', args=[self.user.id])
def test_delete_link(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('a.delete').attr('href'),
reverse('admin:users_userprofile_delete', args=[self.user.id]))
class TestUserLoginForm(UserFormBase):
def _get_login_url(self):
return "/en-US/firefox/users/login"
def test_credential_fail(self):
url = self._get_login_url()
r = self.client.post(url, {'username': '', 'password': ''})
self.assertFormError(r, 'form', 'username', "This field is required.")
self.assertFormError(r, 'form', 'password', "This field is required.")
r = self.client.post(url, {'username': '[email protected]',
'password': 'wrongpassword'})
self.assertFormError(r, 'form', '', ("Please enter a correct username "
"and password. Note that both "
"fields are case-sensitive."))
def test_credential_success(self):
user = UserProfile.objects.get(email='[email protected]')
url = self._get_login_url()
r = self.client.post(url, {'username': user.email,
'password': 'foo'}, follow=True)
eq_(pq(r.content.decode('utf-8'))('.account .user').text(),
user.display_name)
eq_(pq(r.content)('.account .user').attr('title'), user.email)
r = self.client.post(url, {'username': user.email,
'password': 'foo',
'rememberme': 1}, follow=True)
eq_(pq(r.content.decode('utf-8'))('.account .user').text(),
user.display_name)
eq_(pq(r.content)('.account .user').attr('title'), user.email)
# Subtract 100 to give some breathing room
age = settings.SESSION_COOKIE_AGE - 100
assert self.client.session.get_expiry_age() > age
def test_redirect_after_login(self):
url = urlparams(self._get_login_url(), to="/en-US/firefox/about")
r = self.client.post(url, {'username': '[email protected]',
'password': 'foo'}, follow=True)
self.assertRedirects(r, '/en-US/about')
# Test a valid domain. Note that assertRedirects doesn't work on
# external domains
url = urlparams(self._get_login_url(), to="/addon/new",
domain="builder")
r = self.client.post(url, {'username': '[email protected]',
'password': 'foo'}, follow=True)
to, code = r.redirect_chain[0]
self.assertEqual(to, 'https://builder.addons.mozilla.org/addon/new')
self.assertEqual(code, 302)
def test_redirect_after_login_evil(self):
url = urlparams(self._get_login_url(), to='http://foo.com')
r = self.client.post(url, {'username': '[email protected]',
'password': 'foo'}, follow=True)
self.assertRedirects(r, '/en-US/firefox/')
def test_redirect_after_login_domain(self):
url = urlparams(self._get_login_url(), to='/en-US/firefox',
domain='http://evil.com')
r = self.client.post(url, {'username': '[email protected]',
'password': 'foo'}, follow=True)
self.assertRedirects(r, '/en-US/firefox/')
def test_unconfirmed_account(self):
url = self._get_login_url()
self.user_profile.confirmationcode = 'blah'
self.user_profile.save()
r = self.client.post(url, {'username': '[email protected]',
'password': 'foo'}, follow=True)
self.assertNotContains(r, "Welcome, Jeff")
self.assertContains(r, "A link to activate your user account")
self.assertContains(r, "If you did not receive the confirmation")
@patch.object(settings, 'APP_PREVIEW', True)
def test_no_register(self):
res = self.client.get(self._get_login_url())
assert not res.content in 'Create an Add-ons Account'
@patch.object(settings, 'APP_PREVIEW', False)
def test_yes_register(self):
res = self.client.get(self._get_login_url())
self.assertContains(res, 'Create an Add-ons Account')
def test_disabled_account(self):
url = self._get_login_url()
self.user_profile.deleted = True
self.user_profile.save()
r = self.client.post(url, {'username': '[email protected]',
'password': 'foo'}, follow=True)
self.assertNotContains(r, "Welcome, Jeff")
self.assertContains(r, 'Please enter a correct username and password. '
'Note that both fields are case-sensitive.')
def test_successful_login_logging(self):
t = datetime.now()
# microsecond is not saved in the db
t = datetime(t.year, t.month, t.day, t.hour, t.minute, t.second)
url = self._get_login_url()
self.client.post(url, {'username': '[email protected]',
'password': 'foo'}, follow=True)
u = UserProfile.objects.get(email='[email protected]')
eq_(u.failed_login_attempts, 0)
eq_(u.last_login_attempt_ip, '127.0.0.1')
eq_(u.last_login_ip, '127.0.0.1')
assert u.last_login_attempt == t or u.last_login_attempt > t
def test_failed_login_logging(self):
t = datetime.now()
# microsecond is not saved in the db
t = datetime(t.year, t.month, t.day, t.hour, t.minute, t.second)
url = self._get_login_url()
self.client.post(url, {'username': '[email protected]',
'password': 'wrongpassword'})
u = UserProfile.objects.get(email='[email protected]')
eq_(u.failed_login_attempts, 4)
eq_(u.last_login_attempt_ip, '127.0.0.1')
assert u.last_login_ip != '127.0.0.1'
assert u.last_login_attempt == t or u.last_login_attempt > t
class TestUserRegisterForm(UserFormBase):
def test_no_info(self):
data = {'email': '',
'password': '',
'password2': '',
'username': '', }
r = self.client.post('/en-US/firefox/users/register', data)
msg = "This field is required."
self.assertFormError(r, 'form', 'email', msg)
self.assertFormError(r, 'form', 'username', msg)
def test_register_existing_account(self):
data = {'email': '[email protected]',
'password': 'xxxlonger',
'password2': 'xxxlonger',
'username': 'xxx', }
r = self.client.post('/en-US/firefox/users/register', data)
self.assertFormError(r, 'form', 'email',
'User profile with this Email already exists.')
eq_(len(mail.outbox), 0)
def test_set_unmatched_passwords(self):
data = {'email': '[email protected]',
'password': 'new1longer',
'password2': 'new2longer', }
r = self.client.post('/en-US/firefox/users/register', data)
self.assertFormError(r, 'form', 'password2',
'The passwords did not match.')
eq_(len(mail.outbox), 0)
def test_invalid_username(self):
data = {'email': '[email protected]',
'password': 'xxxlonger',
'password2': 'xxxlonger',
'username': 'Todd/Rochelle', }
r = self.client.post('/en-US/firefox/users/register', data)
self.assertFormError(r, 'form', 'username',
'Enter a valid username consisting of letters, numbers, '
'underscores or hyphens.')
def test_blacklisted_username(self):
data = {'email': '[email protected]',
'password': 'xxxlonger',
'password2': 'xxxlonger',
'username': 'IE6Fan', }
r = self.client.post('/en-US/firefox/users/register', data)
self.assertFormError(r, 'form', 'username',
'This username cannot be used.')
def test_alldigit_username(self):
data = {'email': '[email protected]',
'password': 'xxxlonger',
'password2': 'xxxlonger',
'username': '8675309', }
r = self.client.post('/en-US/firefox/users/register', data)
self.assertFormError(r, 'form', 'username',
'Usernames cannot contain only digits.')
def test_blacklisted_password(self):
BlacklistedPassword.objects.create(password='password')
data = {'email': '[email protected]',
'password': 'password',
'password2': 'password',
'username': 'IE6Fan', }
r = self.client.post('/en-US/firefox/users/register', data)
self.assertFormError(r, 'form', 'password',
'That password is not allowed.')
def test_password_length(self):
BlacklistedPassword.objects.create(password='password')
data = {'email': '[email protected]',
'password': 'short',
'password2': 'short',
'username': 'IE6Fan', }
r = self.client.post('/en-US/firefox/users/register', data)
self.assertFormError(r, 'form', 'password',
'Must be 8 characters or more.')
def test_invalid_email_domain(self):
data = {'email': '[email protected]',
'password': 'xxxlonger',
'password2': 'xxxlonger',
'username': 'trulyfake', }
r = self.client.post('/en-US/firefox/users/register', data)
self.assertFormError(r, 'form', 'email',
'Please use an email address from a different '
'provider to complete your registration.')
def test_invalid_homepage(self):
data = {'homepage': 'example.com:alert(String.fromCharCode(88,83,83)',
'email': ''}
m = 'This URL has an invalid format. '
m += 'Valid URLs look like http://example.com/my_page.'
r = self.client.post('/en-US/firefox/users/register', data)
self.assertFormError(r, 'form', 'homepage', m)
def test_already_logged_in(self):
self.client.login(username='[email protected]', password='foo')
r = self.client.get('/users/register', follow=True)
self.assertContains(r, "You are already logged in")
self.assertNotContains(r, '<button type="submit">Register</button>')
def test_browserid_registered(self):
u = UserProfile.objects.create(email='[email protected]',
source=amo.LOGIN_SOURCE_BROWSERID,
password='')
data = {'email': u.email}
r = self.client.post('/en-US/firefox/users/register', data)
self.assertContains(r, 'already have an account')
def good_data(self):
return {
'email': '[email protected]',
'password': 'carebears',
'password2': 'carebears',
'username': 'BigJC',
'homepage': ''
}
@patch('captcha.fields.ReCaptchaField.clean')
def test_success(self, clean):
clean.return_value = ''
r = self.client.post('/en-US/firefox/users/register', self.good_data(),
follow=True)
self.assertContains(r, "Congratulations!")
u = User.objects.get(email='[email protected]').get_profile()
assert u.confirmationcode
eq_(len(mail.outbox), 1)
assert mail.outbox[0].subject.find('Please confirm your email') == 0
assert mail.outbox[0].body.find('%s/confirm/%s' %
(u.id, u.confirmationcode)) > 0
def test_long_data(self):
data = {'username': 'jbalogh',
'email': '[email protected]',
'oldpassword': 'foo',
'password': 'new',
'password2': 'new', }
for field, length in (('username', 50), ('display_name', 50)):
data[field] = 'x' * (length + 1)
r = self.client.post(reverse('users.register'), data, follow=True)
err = u'Ensure this value has at most %s characters (it has %s).'
self.assertFormError(r, 'form', field, err % (length, length + 1))
class TestBlacklistedUsernameAdminAddForm(UserFormBase):
def test_no_usernames(self):
self.client.login(username='[email protected]', password='foo')
url = reverse('admin:users_blacklistedusername_add')
data = {'usernames': "\n\n", }
r = self.client.post(url, data)
msg = 'Please enter at least one username to blacklist.'
self.assertFormError(r, 'form', 'usernames', msg)
def test_add(self):
self.client.login(username='[email protected]', password='foo')
url = reverse('admin:users_blacklistedusername_add')
data = {'usernames': "IE6Fan\nfubar\n\n", }
r = self.client.post(url, data)
msg = '1 new values added to the blacklist. '
msg += '1 duplicates were ignored.'
self.assertContains(r, msg)
self.assertNotContains(r, 'fubar')
class TestBlacklistedEmailDomainAdminAddForm(UserFormBase):
def test_no_domains(self):
self.client.login(username='[email protected]', password='foo')
url = reverse('admin:users_blacklistedemaildomain_add')
data = {'domains': "\n\n", }
r = self.client.post(url, data)
msg = 'Please enter at least one e-mail domain to blacklist.'
self.assertFormError(r, 'form', 'domains', msg)
def test_add(self):
self.client.login(username='[email protected]', password='foo')
url = reverse('admin:users_blacklistedemaildomain_add')
data = {'domains': "mailinator.com\ntrash-mail.de\n\n", }
r = self.client.post(url, data)
msg = '1 new values added to the blacklist. '
msg += '1 duplicates were ignored.'
self.assertContains(r, msg)
self.assertNotContains(r, 'fubar')
|
|
"""Network device management.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import errno
import io
import logging
import os
import enum
import six
from treadmill import subproc
_LOGGER = logging.getLogger(__name__)
_SYSFS_NET = '/sys/class/net'
_BRCTL_EXE = 'brctl'
_IP_EXE = 'ip'
_PROC_CONF_PROXY_ARP = '/proc/sys/net/ipv4/conf/{dev}/proxy_arp'
_PROC_CONF_FORWARDING = '/proc/sys/net/ipv4/conf/{dev}/forwarding'
_PROC_CONF_ARP_IGNORE = '/proc/sys/net/ipv4/conf/{dev}/arp_ignore'
_PROC_CONF_ROUTE_LOCALNET = '/proc/sys/net/ipv4/conf/{dev}/route_localnet'
_PROC_NET_LOCAL_PORT_RANGE = '/proc/sys/net/ipv4/ip_local_port_range'
def dev_mtu(devname):
"""Read a device's MTU.
:param ``str`` devname:
The name of the network device.
:returns:
``int`` - Device MTU
:raises:
OSError, IOError if the device doesn't exist
"""
return int(_get_dev_attr(devname, 'mtu'))
def dev_mac(devname):
"""Read a device's MAC address.
:param ``str`` devname:
The name of the network device.
:returns:
``str`` - Device MAC address
:raises:
OSError, IOError if the device doesn't exist
"""
return six.text_type(_get_dev_attr(devname, 'address'))
def dev_alias(devname):
"""Read a device's defined alias.
:param ``str`` devname:
The name of the network device.
:returns:
``str`` - Device alias
:raises:
OSError, IOError if the device doesn't exist
"""
return six.text_type(_get_dev_attr(devname, 'ifalias'))
class DevType(enum.IntEnum):
"""Network device types.
see include/uapi/linux/if_arp.h
"""
# NOTE: Missing types below. Add as needed.
Ether = 1
GRE = 778
Loopback = 772
def dev_list(typefilter=None):
"""List network devices.
:returns:
``list(str)`` - List of device names
"""
all_devs = os.listdir(_SYSFS_NET)
if not typefilter:
return all_devs
return [
devname
for devname in all_devs
if int(_get_dev_attr(devname, 'type')) == DevType(typefilter).value
]
class DevState(enum.Enum):
"""Network device state.
https://www.kernel.org/doc/Documentation/networking/operstates.txt
"""
UP = 'up' # pylint: disable=C0103
DOWN = 'down'
UNKNOWN = 'unknown'
NOT_PRESENT = 'notpresent'
LOWER_LAYER_DOWN = 'lowerlayerdown'
TESTING = 'testing'
DORMANT = 'dormant'
def dev_state(devname):
"""Read a device's state.
:param ``str`` devname:
The name of the network device.
:returns:
``DevState`` - Device state
:raises:
OSError, IOError if the device doesn't exist
"""
return DevState(_get_dev_attr(devname, 'operstate'))
def dev_speed(devname):
"""Read a device's link speed.
:param ``str`` devname:
The name of the network device.
:returns:
``int`` - Device link speed
:raises:
OSError, IOError (ENOENT) if the device doesn't exist
"""
try:
return int(_get_dev_attr(devname, 'speed'))
except IOError as err:
if err.errno == errno.EINVAL:
_LOGGER.warning(
'Unable to read speed information from %s', devname
)
return 0
def link_set_up(devname):
"""Bring a network device up.
:param ``str`` devname:
The name of the network device.
"""
subproc.check_call(
[
_IP_EXE, 'link',
'set',
'dev', devname,
'up'
],
)
def link_set_down(devname):
"""Bring a network device down.
:param ``str`` devname:
The name of the network device.
"""
subproc.check_call(
[
_IP_EXE, 'link',
'set',
'dev', devname,
'down'
],
)
def link_set_name(devname, newname):
"""Set a network device's name.
:param ``str`` devname:
The current name of the network device.
"""
subproc.check_call(
[
_IP_EXE, 'link',
'set',
'dev', devname,
'name', newname,
],
)
def link_set_alias(devname, alias):
"""Set a network device's alias.
:param ``str`` devname:
The name of the network device.
"""
subproc.check_call(
[
_IP_EXE, 'link',
'set',
'dev', devname,
'alias', alias,
],
)
def link_set_mtu(devname, mtu):
"""Set a network device's MTU.
:param ``str`` devname:
The name of the network device.
"""
subproc.check_call(
[
_IP_EXE, 'link',
'set',
'dev', devname,
'mtu', six.text_type(mtu),
],
)
def link_set_netns(devname, namespace):
"""Set a network device's namespace.
:param ``str`` devname:
The name of the network device.
"""
subproc.check_call(
[
_IP_EXE, 'link',
'set',
'dev', devname,
'netns', six.text_type(namespace),
],
)
def link_set_addr(devname, macaddr):
"""Set mac address of the link
:param ``str`` devname:
The name of the network device.
:param ``str`` macaddr:
The mac address.
"""
subproc.check_call(
[
_IP_EXE, 'link',
'set',
'dev', devname,
'address', macaddr,
],
)
def link_add_veth(veth0, veth1):
"""Create a virtual ethernet device pair.
:param ``str`` veth0:
The name of the first network device.
:param ``str`` veth1:
The name of the second network device.
"""
subproc.check_call(
[
_IP_EXE, 'link',
'add', 'name', veth0,
'type', 'veth',
'peer', 'name', veth1
],
)
def link_del_veth(devname):
"""Delete a virtual ethernet device.
:param ``str`` devname:
The name of the network device.
"""
subproc.check_call(
[
_IP_EXE, 'link',
'delete',
'dev', devname,
'type', 'veth',
],
)
def addr_add(addr, devname, ptp_addr=None, addr_scope='link'):
"""Add an IP address to a network device.
:param ``str`` addr:
IP address.
:param ``str`` devname:
The name of the network device.
:param ``str`` ptp_addr:
Peer address on Point-to-Point links.
"""
if ptp_addr is not None:
ipaddr = [addr, 'peer', ptp_addr]
else:
ipaddr = [addr]
subproc.check_call(
[
'ip', 'addr',
'add',
] + ipaddr + [
'dev', devname,
'scope', addr_scope,
],
)
def route_add(dest, rtype='unicast',
via=None, devname=None, src=None, route_scope=None):
"""Define a new entry in the routing table.
:param ``str`` devname:
The name of the network device.
"""
assert (rtype == 'unicast' and (devname or via)) or rtype == 'blackhole'
route = [
'ip', 'route',
'add',
rtype, dest,
]
if rtype == 'unicast':
if via is not None:
route += ['via', via]
if devname is not None:
route += ['dev', devname]
if src is not None:
route += ['src', src]
if route_scope is not None:
route += ['scope', route_scope]
subproc.check_call(route)
def bridge_create(devname):
"""Create a new network bridge device.
:param ``str`` devname:
The name of the network device.
"""
subproc.check_call(
[
_BRCTL_EXE,
'addbr',
devname
],
)
def bridge_delete(devname):
"""Delete a new network bridge device.
:param ``str`` devname:
The name of the network device.
"""
subproc.check_call(
[
_BRCTL_EXE,
'delbr',
devname
],
)
def bridge_setfd(devname, forward_delay):
"""Configure the forward-delay of a bridge device.
:param ``str`` devname:
The name of the network device.
"""
subproc.check_call(
[
_BRCTL_EXE,
'setfd',
devname,
six.text_type(forward_delay),
],
)
def bridge_addif(devname, interface):
"""Add an interface to a bridge device.
:param ``str`` devname:
The name of the network device.
"""
subproc.check_call(
[
_BRCTL_EXE,
'addif',
devname,
interface,
],
)
def bridge_delif(devname, interface):
"""Remove an interface from a bridge device.
:param ``str`` devname:
The name of the network device.
"""
subproc.check_call(
[
_BRCTL_EXE,
'delif',
devname,
interface,
],
)
def bridge_forward_delay(devname):
"""Read a bridge device's forward delay timer.
:returns ``int``:
Bridge forward delay timer.
:raises:
OSError, IOError (ENOENT) if the device doesn't exist.
"""
return int(_get_dev_attr(devname, 'bridge/forward_delay'))
def bridge_brif(devname):
"""Read a bridge device's slave devices.
:returns ``list``:
List of slave device names.
:raises:
OSError, IOError (ENOENT) if the device doesn't exist or if the device
is not a bridge.
"""
return list(_get_dev_attr(devname, 'brif', dirattr=True))
def _get_dev_attr(devname, attr, dirattr=False):
"""
:raises:
OSError, IOError if the device doesn't exist
"""
path = os.path.join(_SYSFS_NET, devname, attr)
if dirattr:
attr = os.listdir(path)
else:
with io.open(path) as f:
attr = f.read().strip()
return attr
def gre_create(grename, devname,
localaddr,
remoteaddr=None,
key=None):
"""Create a new GRE interface.
"""
cmd = [
'ip', 'tunnel',
'add', grename,
'mode', 'gre',
'dev', devname,
'local', localaddr,
]
if remoteaddr is not None:
cmd += ['remote', remoteaddr]
if key is not None:
cmd += ['key', hex(key)]
subproc.check_call(cmd)
def gre_change(grename,
remoteaddr=None,
key=None):
"""Change an existing GRE interface.
"""
assert remoteaddr or key
cmd = [
'ip', 'tunnel',
'change', grename,
'mode', 'gre',
]
if remoteaddr is not None:
cmd += ['remote', remoteaddr]
if key is not None:
cmd += ['key', hex(key)]
subproc.check_call(cmd)
def gre_delete(grename):
"""Delete a GRE interface.
"""
subproc.check_call(
[
'ip', 'tunnel',
'del', grename,
'mode', 'gre',
],
)
def net_conf_ip_port_range(lower, upper):
"""Configure the usable ephemeral port range.
"""
assert lower <= upper
_proc_sys_write(
_PROC_NET_LOCAL_PORT_RANGE,
'{lower:d} {upper:d}'.format(
lower=lower,
upper=upper
)
)
def dev_conf_route_localnet_set(eth, enabled):
"""Enable Route Localnet on the given device
:param ``str`` eth:
The name of the ethernet device.
:param ``bool`` enabled:
Enable or disable the feature.
"""
_proc_sys_write(
_PROC_CONF_ROUTE_LOCALNET.format(dev=eth),
int(enabled),
)
def dev_conf_proxy_arp_set(eth, enabled):
"""Enable Proxy-Arp on the given device
:param ``str`` eth:
The name of the ethernet device.
:param ``bool`` enabled:
Enable or disable the feature.
"""
_proc_sys_write(
_PROC_CONF_PROXY_ARP.format(dev=eth),
int(enabled),
)
def dev_conf_forwarding_set(eth, enabled):
"""Enable IP Forwarding on the given device
:param ``str`` eth:
The name of the ethernet device.
:param ``bool`` enabled:
Enable or disable the feature.
"""
_proc_sys_write(
_PROC_CONF_FORWARDING.format(dev=eth),
int(enabled),
)
# FIXME(boysson): Should be an enum
# Reply for any local target IP address, configured on any interface
ARP_IGNORE_REPLY_ANY_LOCAL = 0
# Do not reply for local addresses configured with scope host, only resolutions
# for global and link addresses are replied
ARP_IGNORE_DO_NOT_REPLY_ANY_ON_HOST = 3
def dev_conf_arp_ignore_set(eth, value):
"""Set the arp_ignore flag on the given device
Define different modes for sending replies in response to received ARP
requests that resolve local target IP addresses
:param ``str`` eth:
The name of the ethernet device.
:param ``int`` value:
Set arp_ignore to this value.
"""
_proc_sys_write(
_PROC_CONF_ARP_IGNORE.format(dev=eth),
int(value),
)
def _proc_sys_write(path, value):
"""Set a sysctl value to `value`.
"""
assert path.startswith('/proc/sys/')
_LOGGER.debug('Setting %r to %r', path, value)
with io.open(path, 'w') as f:
f.write(six.text_type(value))
|
|
"""
File : controller.py
Date : April, 2017
Author : eugene liyai
Desc : Controller file processes request from the api endpoints
"""
# ============================================================================
# necessary imports
# ============================================================================
import os
import hashlib
import json
from math import ceil
from datetime import datetime
from flask import jsonify, request, abort, make_response, session
from flask_login import login_required, login_user, logout_user, current_user
from bucketlist.app import login_manager
from bucketlist.controllers.database_controller import DatabaseController
from bucketlist.controllers.authentication_controller import encode_auth_token, check_token, decode_auth_token
#
# Database engine
# Postgres connection postgresql+psycopg2://user:password@host/database
#
db_engine = os.environ['BUCKETLIST_SQLALCHEMY_DATABASE_URI']
PAGE_SIZE = 2
DATA_CONTROLLER = DatabaseController(db_engine)
def initialize_database():
"""
The method initializes tables and relations in the database.
:param : None
:return: None
"""
DATA_CONTROLLER.initialize_database()
def populate_database():
"""
The method populates database tables with valid data.
:param : None
:return: None
"""
DATA_CONTROLLER.populate_database()
def drop_tables():
"""
The method drops tables and relations in the database.
:param : None
:return: None
"""
DATA_CONTROLLER.drop_tables()
def login():
"""
The method validates user credentials and gives user access. The json is hashed to increase security.
:param serialize: Serialize helps indicate the format of the response
:return: Json format or plain text depending in the serialize parameter
"""
data = request.data
data_dict = json.loads(data)
username = data_dict['username']
password = data_dict['password']
try:
validation_return = DATA_CONTROLLER.user_login_authentication(username=username, password=password)
if validation_return['status'] is True:
user = validation_return['User']
login_user(user, remember=True)
session['user_id'] = user.user_id
auth_token = encode_auth_token(user.user_id)
if auth_token:
response_data = {
'STATUS': 'success',
'MESSAGE': 'Successfully logged in.',
'TOKEN': auth_token
}
data_response = make_response(jsonify(response_data), 200)
data_response.headers['STATUS'] = 'success'
data_response.headers['TOKEN'] = auth_token
return data_response
else:
response_data = {
'STATUS': 'fail',
'MESSAGE': 'Username or password provided does not match.'
}
return make_response(jsonify(response_data)), 401
except ValueError as err:
tmp_response = make_response("", 500)
return tmp_response
@check_token
def users(user_id=None, serialize=True):
"""
The method returns users in a json responses. The json is hashed to increase security.
:param serialize: Serialize helps indicate the format of the response
:param user_id: user id intended to be searched
:return: Json format or plain text depending in the serialize parameter
"""
users = DATA_CONTROLLER.get_user_by_id(user_id=user_id, serialize=True)
page = request.args.get("limit")
number_of_pages = None
pages = []
if page:
number_of_pages = int(ceil(float(len(users)) / PAGE_SIZE))
converted_page = int(page)
if converted_page > number_of_pages or converted_page < 0:
return make_response("", 404)
from_index = (converted_page - 1) * PAGE_SIZE
to_index = from_index + PAGE_SIZE
users = users[from_index:to_index]
if number_of_pages:
pages = range(1, number_of_pages + 1)
if serialize:
data = {
"users": users,
"total": len(users),
"pages": pages
}
json_data = json.dumps(data)
response = make_response(jsonify(data), 200)
# Caching
response.headers["ETag"] = str(hashlib.sha256(json_data).hexdigest())
# Entity tag uniquely identifies request
response.headers["Cache-Control"] = "private, max-age=300"
return response
def add_user():
data = request.data
data_dict = json.loads(data)
first_name = data_dict["first_name"]
last_name = data_dict["last_name"]
email = data_dict["email"]
username = data_dict["username"]
password = data_dict["password"]
try:
new_user = DATA_CONTROLLER.create_user(first_name=first_name,
last_name=last_name,
email=email,
username=username,
password=password)
response_data = {
'STATUS': 'success',
'MESSAGE': 'Successfully registered.',
'USER': new_user
}
data_response = make_response(jsonify(response_data), 201)
data_response.headers['STATUS'] = 'success'
return data_response
except ValueError as err:
tmp_response = make_response("", 500)
return tmp_response
@check_token
def update_user(user_id):
"""
The method updates user with provided user_id, and returns a json responses.
:param user_id: user id of the user to be updated
:return: User json response
"""
data = request.data
data_dict = json.loads(data)
new_user = {
"first_name": data_dict["first_name"],
"last_name": data_dict["last_name"],
"email": data_dict["email"],
"username": data_dict["username"]
}
updated_user = DATA_CONTROLLER.update_user(user_id, new_user)
if not updated_user:
data = {
"STATUS": 'fail',
"MESSAGE": 'Error updating user'
}
response = make_response(jsonify(data), 500)
return response
else:
data = {
"STATUS": 'success',
"updated_user": updated_user
}
response = make_response(jsonify(data), 201)
return response
@check_token
def delete_user(user_id):
"""
The method deletes user with provided user_id.
:param user_id: user id of the user to be deleted
:return: http response
"""
try:
if DATA_CONTROLLER.delete_user(user_id):
return make_response("", 200)
else:
return make_response("", 404)
except ValueError as err:
tmp_response = make_response("", 500)
return tmp_response
@check_token
def create_bucketlist():
"""
The method adds a new bucketlist under the current user.
:param : None
:return: http response
"""
try:
data = request.data
data_dict = json.loads(data)
bucketlist_name = data_dict["name"]
user = current_user
auth_token = request.headers.get('TOKEN')
resp = decode_auth_token(auth_token)
if resp['status'] is False:
data = {
'STATUS': 'fail',
'MESSAGE': 'Invalid token provided'
}
data_response = make_response(jsonify(data), 401)
return data_response
new_bucket_name = DATA_CONTROLLER.create_bucketlist(bucketlist_name, resp['decode_data'])
response_data = {
'STATUS': 'success',
'MESSAGE': 'Bucket list successfully created.',
'BUCKET_LIST_NAME': new_bucket_name
}
data_response = make_response(jsonify(response_data), 201)
data_response.headers['STATUS'] = 'success'
return data_response
except ValueError as err:
tmp_response = make_response("", 500)
return tmp_response
@check_token
def bucketlist(bucket_id=None, serialize=True):
"""
The method returns bucketlist in a json responses.
:param bucket_id: id of bucket list to be retrieved
:param serialize: Serialize helps indicate the format of the response
:return: Json format or plain text depending in the serialize parameter
"""
auth_token = request.headers.get('TOKEN')
resp = decode_auth_token(auth_token)
if resp['status']:
resp = decode_auth_token(auth_token)
if resp['status']:
if resp['decode_data']:
bucketlists = DATA_CONTROLLER.get_bucketlist_by_id(bucket_id=bucket_id, user=resp['decode_data'],
serialize=True)
page = request.args.get("limit")
number_of_pages = None
pages = []
if page:
number_of_pages = int(ceil(float(len(bucketlists)) / PAGE_SIZE))
converted_page = int(page)
if converted_page > number_of_pages or converted_page < 0:
return make_response("", 404)
from_index = (converted_page - 1) * PAGE_SIZE
to_index = from_index + PAGE_SIZE
bucketlists = bucketlists[from_index:to_index]
if number_of_pages:
pages = range(1, number_of_pages+1)
if serialize:
data = {
'STATUS': 'success',
"bucketlists": bucketlists,
"total": len(bucketlists),
"pages": pages
}
json_data = json.dumps(data)
response = make_response(jsonify(data), 200)
response.headers["ETag"] = str(hashlib.sha256(json_data).hexdigest())
response.headers["Cache-Control"] = "private, max-age=300"
return response
else:
response_object = {
'STATUS': 'fail',
'MESSAGE': 'Provide a valid auth token.'
}
return make_response(jsonify(response_object)), 401
@check_token
def update_bucketlist(bucket_id):
"""
The method updates bucket list with provided the id, and returns a json responses.
:param bucket_id: id of the bucket list to be updated
:return: bucket list json response
"""
data = request.data
data_dict = json.loads(data)
auth_token = request.headers.get('TOKEN')
resp = decode_auth_token(auth_token)
if resp['status'] is False:
data = {
'STATUS': 'fail',
'MESSAGE': 'Invalid token provided'
}
data_response = make_response(jsonify(data), 401)
return data_response
new_bucket = {
"bucketlist_name": data_dict["name"]
}
updated_bucket = DATA_CONTROLLER.update_bucketlist(bucket_id=bucket_id, new_bucketlist=new_bucket,
user=resp['decode_data'])
if updated_bucket:
data = {
'STATUS': 'success',
'bucketlist': updated_bucket
}
data_response = make_response(jsonify(data), 201)
data_response.headers['STATUS'] = 'success'
return data_response
else:
tmp_response = make_response("", 500)
return tmp_response
@check_token
def delete_bucketlist(bucket_id):
"""
The method deletes bucket list with provided id.
:param bucket_id: id of the bucket list to be deleted
:return: http response
"""
try:
if DATA_CONTROLLER.delete_bucketlist(bucket_id):
data = {
'STATUS': 'Success',
'MESSAGE': 'Database with id '+bucket_id+' successfully deleted'
}
data_response = make_response(jsonify(data), 200)
data_response.headers['STATUS'] = 'success'
return data_response
else:
data = {
'STATUS': 'Error',
'MESSAGE': 'Bucketlist ID cannot be found, or database encountered an error.'
}
data_response = make_response(jsonify(data), 500)
data_response.headers['STATUS'] = 'fail'
return data_response
except ValueError as err:
tmp_response = make_response("", 500)
return tmp_response
@check_token
def item(item_id=None, bucket_id=None, serialize=True):
"""
The method returns items in a json responses.
:param item_id: id of item to be retrieved
:param bucket_id: id of bucket list to which the item belongs
:param serialize: Serialize helps indicate the format of the response
:return: Json format or plain text depending in the serialize parameter
"""
items = DATA_CONTROLLER.get_item_by_id(item_id=item_id, bucket_id=bucket_id, serialize=True)
if item_id:
if not items:
data = {
'STATUS': 'fail',
'MESSAGE': 'The user has no item with provided ID in any of the bucket lists'
}
data_response = make_response(jsonify(data), 404)
data_response.headers['STATUS'] = 'fail'
return data_response
page = request.args.get("limit")
number_of_pages = None
pages = []
if page:
number_of_pages = int(ceil(float(len(items)) / PAGE_SIZE))
converted_page = int(page)
if converted_page > number_of_pages or converted_page < 0:
return make_response("", 404)
from_index = (converted_page - 1) * PAGE_SIZE
to_index = from_index + PAGE_SIZE
items = items[from_index:to_index]
if number_of_pages:
pages = range(1, number_of_pages + 1)
if serialize:
data = {
"bucketlist_item": items,
"total": len(items),
"pages": pages
}
json_data = json.dumps(data)
response = make_response(jsonify(data), 200)
response.headers["ETag"] = str(hashlib.sha256(json_data).hexdigest())
response.headers["Cache-Control"] = "private, max-age=300"
return response
@check_token
def create_item(bucket_id):
"""
The method adds a new item under the current bucket list.
:param bucket_id: id of the bucket list to be deleted
:return: http response
"""
data = request.data
data_dict = json.loads(data)
item_name = data_dict["name"]
item_description = data_dict["description"]
new_item_name = DATA_CONTROLLER.create_bucketlist_item(item_name, item_description, bucket_id)
data = {
"STATUS": 'success',
"bucket_list_item": new_item_name
}
response = make_response(jsonify(data), 201)
return response
@check_token
def update_item(item_id):
"""
The method updates item with provided the id, and returns a json responses.
:param item_id: id of the item to be updated
:return: item json response
"""
data = request.data
data_dict = json.loads(data)
date_completed = None
done = False
if data_dict["done"] == 'True':
done = True
date_completed = datetime.now()
new_item = {
"item_name": data_dict["name"],
"done": done,
"description": data_dict["description"],
"date_completed": date_completed
}
updated_item = DATA_CONTROLLER.update_bucketlist_item(item_id, new_item)
if not updated_item:
data = {
"STATUS": 'fail',
"MESSAGE": 'Error updating item'
}
response = make_response(jsonify(data), 500)
return response
else:
data = {
"STATUS": 'success',
"bucket_list_item": updated_item
}
response = make_response(jsonify(data), 201)
return response
@check_token
def delete_item(item_id):
"""
The method deletes item with the provided id.
:param item_id: id of the item to be deleted
:return: http response
"""
try:
if DATA_CONTROLLER.delete_bucketlist_item(item_id):
return make_response("", 200)
else:
return make_response("", 404)
except ValueError as err:
tmp_response = make_response("", 500)
return tmp_response
@check_token
def search(search_value):
"""
The search method searches bucket list database.
:param search_value: value to be searched
:return: http response
"""
auth_token = request.headers.get('TOKEN')
resp = decode_auth_token(auth_token)
if resp['status'] is False:
data = {
'STATUS': 'fail',
'MESSAGE': 'Invalid token provided'
}
data_response = make_response(jsonify(data), 401)
data_response.headers['STATUS'] = 'fail'
return data_response
search_result = DATA_CONTROLLER.search_database(search_value, resp['decode_data'], serialize=True)
if search_result:
response_data = {
'STATUS': 'success',
'SEARCH': search_result
}
data_response = make_response(jsonify(response_data), 200)
data_response.headers['STATUS'] = 'success'
return data_response
else:
response_data = {
'STATUS': 'fail',
'MESSAGE': 'No bucket list with search phrase found'
}
data_response = make_response(jsonify(response_data), 404)
data_response.headers['STATUS'] = 'fail'
return data_response
def authenticate():
auth_token = request.headers.get('TOKEN')
resp = decode_auth_token(auth_token)
if resp['status']:
response_data = {
'STATUS': 'success',
'MESSAGE': 'Authenticated'
}
data_response = make_response(jsonify(response_data), 200)
data_response.headers['STATUS'] = 'success'
return data_response
else:
response_data = {
'STATUS': 'fail',
'MESSAGE': 'Not authenticated'
}
data_response = make_response(jsonify(response_data), 401)
data_response.headers['STATUS'] = 'fail'
return data_response
|
|
from hashlib import new
from sys import exc_info
try:
from bson import ObjectId
from pymongo import MongoClient
from pymongo.errors import ServerSelectionTimeoutError, DuplicateKeyError
except:
pass
class MongoDBClient:
def __init__(self, aHostAndPort, aServerSelectionTimeoutMS=1000):
self.mClient = None
self.changeServer(aHostAndPort, aServerSelectionTimeoutMS)
def changeServer(self, aHostAndPort, aServerSelectionTimeoutMS):
if self.mClient is not None:
self.mClient.close()
self.mClient = MongoClient(host=aHostAndPort,
serverSelectionTimeoutMS=aServerSelectionTimeoutMS)
try:
self.mClient.server_info()
except ServerSelectionTimeoutError:
raise Exception(exc_info()[1])
def getItemsFromDB(self, aDB, aTable, aQuery={}, aProjection=None,
aSkip=None, aLimit=None, aSort=None):
try:
db = self.mClient[aDB]
table = db[aTable]
if aProjection is not None:
items = table.find(aQuery, aProjection)
else:
items = table.find(aQuery)
if aSkip is not None:
items = items.skip(aSkip)
if aLimit is not None:
items = items.limit(aLimit)
if aSort is not None:
items = items.sort(aSort)
return list(items)
except:
raise Exception(exc_info()[1])
def getItemsCount(self, aDB, aTable, aQuery={}):
try:
db = self.mClient[aDB]
table = db[aTable]
items = table.find(aQuery)
return items.count(True)
except:
raise Exception(exc_info()[1])
def insertToDB(self, aDB, aTable, aItems):
try:
db = self.mClient[aDB]
table = db[aTable]
if isinstance(aItems, list):
table.insert_many(aItems)
else:
table.insert_one(aItems)
except:
raise Exception(exc_info()[1])
def updateInDB(self, aDB, aTable, aItems):
try:
db = self.mClient[aDB]
table = db[aTable]
if isinstance(aItems, list):
for item in aItems:
try:
table.insert_one(item)
except DuplicateKeyError:
table.replace_one({'_id': item['_id']}, item)
else:
try:
table.insert_one(aItems)
except DuplicateKeyError:
table.replace_one({'_id': aItems['_id']}, aItems)
except:
raise Exception(exc_info()[1])
def deleteFromDB(self, aDB, aTable, aItems):
try:
db = self.mClient[aDB]
table = db[aTable]
if isinstance(aItems, list):
for item in aItems:
table.delete_one(item)
else:
table.delete_one(aItems)
except:
raise Exception(exc_info()[1])
def dropDB(self, aDB):
try:
self.mClient.drop_database(aDB)
except:
raise Exception(exc_info()[1])
def dropCollectionsInDB(self, aDB, aTable):
try:
db = self.mClient[aDB]
table = db[aTable]
table.drop()
except:
raise Exception(exc_info()[1])
def getDBs(self):
return self.mClient.database_names()
def getCollectionsFromDB(self, aDB):
try:
db = self.mClient[aDB]
return db.collection_names()
except:
raise Exception(exc_info()[1])
def aggregate(self, aDB, aTable, aAggregateExpression):
try:
db = self.mClient[aDB]
table = db[aTable]
return list(table.aggregate(aAggregateExpression))
except:
raise Exception(exc_info()[1])
def getUniqueItemsFromCollection(self, aDB, aTable, aCollection):
collection = []
inputIds = []
for item in aCollection:
inputIds.append(item['_id'])
if 0 < len(inputIds):
query = {'_id': {'$in': inputIds}}
items = self.getItemsFromDB(aDB, aTable, query)
dbIds = []
for item in items:
dbIds.append(item['_id'])
uniqueIds = list(set(inputIds) - set(dbIds))
for uniqueId in uniqueIds:
for inputItem in aCollection:
if uniqueId == inputItem['_id']:
collection.append(inputItem)
break
return collection
@staticmethod
def generateObjectId(aString):
h = new('sha256', aString.encode('utf-8'))
s = h.hexdigest()[:12]
return ObjectId(s.encode('utf-8'))
@staticmethod
def addObjectIdFieldAtCollection(aCollection):
for item in aCollection:
if (item.get('Path') and item.get('KB') and
item.get('Version') and item.get('Type') and
item.get('Language') and item.get('Date')):
s = '{}{}{}{}{}{}'.format(item['Path'],
item['KB'],
item['Version'],
item['Type'],
item['Language'],
item['Date']
)
item['_id'] = MongoDBClient.generateObjectId(s)
else:
s = []
keys = sorted(item.keys())
for key in keys:
if '_id' != key:
s.append('{}'.format(item[key]))
item['_id'] = MongoDBClient.generateObjectId(''.join(s))
return aCollection
|
|
# coding: UTF-8
"""
Copyright (c) 2008-2010 Marian Tietz
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
import gtk
import logging
from gettext import gettext as _
from .. import com
from .. import gui
widgets = None
serverSelection = None
RESPONSE_CONNECT = 3
def setup():
global widgets, serverSelection
if widgets:
return
widgets = gui.builder.load_dialog("servers")
sigdic = { "addButton_clicked_cb":
lambda w: openAddDialog(),
"editButton_clicked_cb":
lambda w: openEditDialog(),
"deleteButton_clicked_cb":
lambda w: openDeleteDialog(),
"serverRenderer_edited_cb":
serverNameEdited
}
widgets.connect_signals(sigdic)
# enable multiple selection
serverSelection = widgets.get_object("serverList").get_selection()
serverSelection.set_mode(gtk.SELECTION_MULTIPLE)
def addServer(name):
""" Add server from maki to the list store """
widgets.get_object("serverStore").append([name])
def retrieveServerlist():
""" Fetch server list from maki and get
infos about every server.
"""
widgets.get_object("serverStore").clear()
servers = com.sushi.server_list("","")
if servers:
for server in servers:
addServer(server)
def serverNameEdited(renderer, path, newText):
""" User edited column in serverView """
try:
oldText = widgets.get_object("serverStore")[path][0]
except IndexError:
return
com.sushi.server_rename(oldText, newText)
# at last, update the list from maki (caching would be better..)
retrieveServerlist()
def run(callback):
dialog = widgets.get_object("serverDialog")
retrieveServerlist()
main_window = gui.mgmt.widgets.get_object("main_window")
dialog.set_transient_for(main_window)
dialog.connect("response", dialog_response_cb, callback)
dialog.show()
def createServer(serverName, data):
""" Create a server in maki. """
for (k,v) in data.items():
com.sushi.server_set(serverName, "server", k, v)
def deleteServer(servername):
""" Remove server from Serverlist widget
and delete server in maki.
"""
serverList = widgets.get_object("serverStore")
for row in serverList:
if row[0] == servername:
serverList.remove(row.iter)
com.sushi.server_remove(servername, "", "")
def openAddDialog():
gui.dialogs.show_dialog("addServer", add_dialog_cb)
def openEditDialog():
view = widgets.get_object("serverList")
serverList = view.get_model()
path = view.get_cursor()[0]
servername = None
if not path:
d = gui.builder.information_dialog(
_("No server selected."),
_("You must select a server to edit it."))
d.connect("response", lambda w,i: w.destroy())
d.show_all()
return
else:
servername = serverList[path][0]
data = gui.dialogs.show_dialog("editServer", servername)
if not servername:
logging.error("openEditDialog: Error in retrieving the servername")
return
if data:
retrieveServerlist()
def openDeleteDialog():
view = widgets.get_object("serverList")
path = view.get_cursor()[0]
servername = None
if not path:
d = gui.builder.information_dialog(
_("No server selected."),
_("You must select a server to delete it."))
d.connect("response", lambda w,i: w.destroy())
d.show_all()
return
else:
servername = view.get_model()[path][0]
if not servername:
gui.mgmt.show_error_dialog(
title=_("Error while retrieving server name."),
message=_("There was an error while retrieving the server "
"name.\nAre you connected to maki?"))
return
gui.dialogs.show_dialog("deleteServer", servername, delete_dialog_cb)
def dialog_response_cb(dialog, response_id, callback):
if response_id == RESPONSE_CONNECT:
# get the selected server(s)
serverList = widgets.get_object("serverStore")
paths = serverSelection.get_selected_rows()[1]
if not paths:
return
toConnect = []
for path in paths:
toConnect.append(serverList[path][0])
callback(toConnect)
else:
callback(None)
dialog.hide()
def add_dialog_cb():
""" indicates, server was added """
retrieveServerlist()
def delete_dialog_cb(servername):
""" indicates that the server can be deleted """
deleteServer(servername)
|
|
#!/usr/bin/env python
import os
import re
import sys
from textwrap import dedent
import tarfile
from conda.fetch import download
# e.g., "======== addCols ===================================="
re_header = re.compile(r'^=+\s+(?P<program>\w+)\s+=+$')
# e.g.,# "addCols - Sum columns in a text file."
re_summary = re.compile(r'^(?P<program>\w.*?) - (?P<description>.*)$')
def parse_footer(fn):
"""
Parse the downloaded FOOTER file, which contains a header for each program
and (usually) a description line.
Yields either a nested 2-tuple of (header-program-name,
(description-program-name, description-text)) if a description can be
found, or a 1-tuple of (header-program-name,) if no description found.
"""
block = []
f = open(fn)
while True:
line = f.readline()
if not line:
break
m1 = re_header.match(line)
if m1:
if block:
yield block
block = []
name = m1.groups()[0]
block.append(name)
continue
m = re_summary.match(line)
if m:
if not block:
continue
block.append(m.groups())
yield block
block = []
if block:
yield block
# This is the version of the last available tarball visible on
# http://hgdownload.cse.ucsc.edu/admin/exe/
VERSION = "324"
# Download tarball if it doesn't exist. Always download FOOTER.
tarball = (
'http://hgdownload.cse.ucsc.edu/admin/exe/userApps.v{0}.src.tgz'
.format(VERSION))
if not os.path.exists(os.path.basename(tarball)):
download(tarball, os.path.basename(tarball))
download(
'http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/FOOTER',
'FOOTER')
# Different programs are built under different subdirectories in the source. So
# get a directory listing of the tarball
t = tarfile.open(os.path.basename(tarball))
names = [i for i in t.getnames()
if i.startswith('./userApps/kent/src')]
def program_subdir(program, names):
"""
Identify the source directory for a program.
"""
hits = [i for i in names if program in i and t.getmember(i).isdir()]
if len(hits) == 0:
return
top = sorted(hits)[0]
return top.replace('./userApps/', '')
meta_template = open('template-meta.yaml').read()
build_template = open('template-build.sh').read()
test_template = open('template-run_test.sh').read()
# relative to where this file lives
recipes_dir = os.path.join(os.path.dirname(__file__), '..', '..', 'recipes')
# Mismatches between what is parsed from FOOTER and where a program lives in
# the source
problematic = {
'LiftSpec': 'liftSpec',
}
# Mismatches between the header and the summary; keys are the program name in
# the header and values are the dir in the source code.
resolve_header_and_summary_conflicts = {
'rmFaDups': 'rmFaDups',
}
# Some programs' descriptions do not meet the regex in FOOTER and therefore
# must be manually assigned.
manual_descriptions = {
'estOrient': dedent(
"""
Read ESTs from a database and determine orientation based on
estOrientInfo table or direction in gbCdnaInfo table. Update
PSLs so that the strand reflects the direction of transcription.
By default, PSLs where the direction can't be determined are dropped.
"""),
'fetchChromSizes': dedent(
"""
used to fetch chrom.sizes information from UCSC for the given <db>
"""),
'overlapSelect': dedent(
"""
Select records based on overlapping chromosome ranges. The ranges are
specified in the selectFile, with each block specifying a range.
Records are copied from the inFile to outFile based on the selection
criteria. Selection is based on blocks or exons rather than entire
range.
"""),
'pslCDnaFilter': dedent(
"""
Filter cDNA alignments in psl format. Filtering criteria are
comparative, selecting near best in genome alignments for each given
cDNA and non-comparative, based only on the quality of an individual
alignment.
"""),
'pslHisto': dedent(
"""
Collect counts on PSL alignments for making histograms. These then be
analyzed with R, textHistogram, etc.
"""),
'pslSwap': dedent(
"""
Swap target and query in psls
"""),
'pslToBed': dedent(
"""
transform a psl format file to a bed format file.
"""), # note for those keeping track, s/tranform/transform
}
# programs listed in FOOTER that should not be considered a "ucsc utility"
SKIP = [
'sizeof',
]
# Some programs need to be built differently. It seems that a subset of
# programs need the "stringify" binary build as well. Or, in the case of
# fetchChromSizes, it's simply a script that needs to be copied.
custom_build_scripts = {
'fetchChromSizes': 'template-build-fetchChromSizes.sh',
'pslCDnaFilter': 'template-build-with-stringify.sh',
'pslMap': 'template-build-with-stringify.sh',
}
for block in parse_footer('FOOTER'):
sys.stderr.write('.')
# If len == 2, then a description was parsed.
if len(block) == 2:
program, summary = block
program = problematic.get(program, program)
summary_program, description = summary
# Some programs have summary lines that look like this:
#
# bedGraphToBigWig v 4 - Convert a bedGraph file to bigWig format
#
# So just get the first word as the program name.
summary_program = summary_program.split()[0]
if program != summary_program:
try:
program = resolve_header_and_summary_conflicts[program]
except KeyError:
raise ValueError(
"mismatch in header and summary. header: "
"'{0}'; summary: '{1}'"
.format(program, summary_program))
if program in SKIP:
continue
# If len == 1 then no description was parsed, so we expect one to be in
# manual_descriptions.
else:
assert len(block) == 1
program = block[0]
if program in SKIP:
continue
description = manual_descriptions[program]
# conda package names must be lowercase
package = 'ucsc-' + program.lower()
recipe_dir = os.path.join(recipes_dir, package)
if not os.path.exists(recipe_dir):
os.makedirs(recipe_dir)
# Identify the subdirectory we need to go to in the build script. In some
# cases it may not exist, in which case we expect a custom build script.
subdir = program_subdir(program, names)
if subdir is None and program not in custom_build_scripts:
sys.stderr.write(" Skipping {0} ".format(program))
continue
# Fill in templates and write them to recipe dir
with open(os.path.join(recipe_dir, 'meta.yaml'), 'w') as fout:
fout.write(
meta_template.format(
program=program,
package=package,
summary=description,
version=VERSION,
)
)
with open(os.path.join(recipe_dir, 'build.sh'), 'w') as fout:
_template = open(
custom_build_scripts.get(program, 'template-build.sh')
).read()
fout.write(
_template.format(
program=program,
program_source_dir=program_subdir(program, names),
)
)
with open(os.path.join(recipe_dir, 'run_test.sh'), 'w') as fout:
fout.write(
test_template.format(
program=program
)
)
with open(os.path.join(recipe_dir, 'include.patch'), 'w') as fout:
fout.write(open('include.patch').read())
sys.stderr.write('\n')
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from abc import abstractmethod
from collections import OrderedDict, defaultdict, deque
from twitter.common.collections import OrderedSet
from pants.build_graph.address import Address
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.build_graph.target import Target
from pants.util.meta import AbstractClass
logger = logging.getLogger(__name__)
class BuildGraph(AbstractClass):
"""A directed acyclic graph of Targets and dependencies. Not necessarily connected.
:API: public
"""
class DuplicateAddressError(AddressLookupError):
"""The same address appears multiple times in a dependency list
:API: public
"""
class TransitiveLookupError(AddressLookupError):
"""Used to append the current node to the error message from an AddressLookupError
:API: public
"""
class ManualSyntheticTargetError(AddressLookupError):
"""Used to indicate that an synthetic target was defined manually
:API: public
"""
def __init__(self, addr):
super(BuildGraph.ManualSyntheticTargetError, self).__init__(
'Found a manually-defined target at synthetic address {}'.format(addr.spec))
class DepthAgnosticWalk(object):
"""This is a utility class to aid in graph traversals that don't care about the depth."""
def __init__(self):
self._worked = set()
self._expanded = set()
def expanded_or_worked(self, vertex):
"""Returns True if the vertex has been expanded or worked."""
return vertex in self._expanded or vertex in self._worked
def do_work_once(self, vertex):
"""Returns True exactly once for the given vertex."""
if vertex in self._worked:
return False
self._worked.add(vertex)
return True
def expand_once(self, vertex, _):
"""Returns True exactly once for the given vertex."""
if vertex in self._expanded:
return False
self._expanded.add(vertex)
return True
class DepthAwareWalk(DepthAgnosticWalk):
"""This is a utility class to aid in graph traversals that care about the depth."""
def __init__(self):
super(BuildGraph.DepthAwareWalk, self).__init__()
self._expanded = defaultdict(set)
def expand_once(self, vertex, level):
"""Returns True if this (vertex, level) pair has never been expanded, and False otherwise.
This method marks the (vertex, level) pair as expanded after executing, such that this method
will return True for a given (vertex, level) pair exactly once.
"""
if level in self._expanded[vertex]:
return False
self._expanded[vertex].add(level)
return True
@staticmethod
def closure(*vargs, **kwargs):
"""See `Target.closure_for_targets` for arguments.
:API: public
"""
return Target.closure_for_targets(*vargs, **kwargs)
def __init__(self):
self.reset()
@abstractmethod
def clone_new(self):
"""Returns a new BuildGraph instance of the same type and with the same __init__ params."""
def reset(self):
"""Clear out the state of the BuildGraph, in particular Target mappings and dependencies.
:API: public
"""
self._target_by_address = OrderedDict()
self._target_dependencies_by_address = defaultdict(OrderedSet)
self._target_dependees_by_address = defaultdict(set)
self._derived_from_by_derivative_address = {}
self.synthetic_addresses = set()
def contains_address(self, address):
"""
:API: public
"""
return address in self._target_by_address
def get_target_from_spec(self, spec, relative_to=''):
"""Converts `spec` into an address and returns the result of `get_target`
:API: public
"""
return self.get_target(Address.parse(spec, relative_to=relative_to))
def get_target(self, address):
"""Returns the Target at `address` if it has been injected into the BuildGraph, otherwise None.
:API: public
"""
return self._target_by_address.get(address, None)
def dependencies_of(self, address):
"""Returns the dependencies of the Target at `address`.
This method asserts that the address given is actually in the BuildGraph.
:API: public
"""
assert address in self._target_by_address, (
'Cannot retrieve dependencies of {address} because it is not in the BuildGraph.'
.format(address=address)
)
return self._target_dependencies_by_address[address]
def dependents_of(self, address):
"""Returns the Targets which depend on the target at `address`.
This method asserts that the address given is actually in the BuildGraph.
:API: public
"""
assert address in self._target_by_address, (
'Cannot retrieve dependents of {address} because it is not in the BuildGraph.'
.format(address=address)
)
return self._target_dependees_by_address[address]
def get_derived_from(self, address):
"""Get the target the specified target was derived from.
If a Target was injected programmatically, e.g. from codegen, this allows us to trace its
ancestry. If a Target is not derived, default to returning itself.
:API: public
"""
parent_address = self._derived_from_by_derivative_address.get(address, address)
return self.get_target(parent_address)
def get_concrete_derived_from(self, address):
"""Get the concrete target the specified target was (directly or indirectly) derived from.
The returned target is guaranteed to not have been derived from any other target.
:API: public
"""
current_address = address
next_address = self._derived_from_by_derivative_address.get(current_address, current_address)
while next_address != current_address:
current_address = next_address
next_address = self._derived_from_by_derivative_address.get(current_address, current_address)
return self.get_target(current_address)
def inject_target(self, target, dependencies=None, derived_from=None, synthetic=False):
"""Injects a fully realized Target into the BuildGraph.
:API: public
:param Target target: The Target to inject.
:param list<Address> dependencies: The Target addresses that `target` depends on.
:param Target derived_from: The Target that `target` was derived from, usually as a result
of codegen.
:param bool synthetic: Whether to flag this target as synthetic, even if it isn't derived
from another target.
"""
if self.contains_address(target.address):
raise ValueError('Attempted to inject synthetic {target} derived from {derived_from}'
' into the BuildGraph with address {address}, but there is already a Target'
' {existing_target} with that address'
.format(target=target,
derived_from=derived_from,
address=target.address,
existing_target=self.get_target(target.address)))
dependencies = dependencies or frozenset()
address = target.address
if address in self._target_by_address:
raise ValueError('A Target {existing_target} already exists in the BuildGraph at address'
' {address}. Failed to insert {target}.'
.format(existing_target=self._target_by_address[address],
address=address,
target=target))
if derived_from:
if not self.contains_address(derived_from.address):
raise ValueError('Attempted to inject synthetic {target} derived from {derived_from}'
' into the BuildGraph, but {derived_from} was not in the BuildGraph.'
' Synthetic Targets must be derived from no Target (None) or from a'
' Target already in the BuildGraph.'
.format(target=target,
derived_from=derived_from))
self._derived_from_by_derivative_address[target.address] = derived_from.address
if derived_from or synthetic:
self.synthetic_addresses.add(address)
self._target_by_address[address] = target
for dependency_address in dependencies:
self.inject_dependency(dependent=address, dependency=dependency_address)
def inject_dependency(self, dependent, dependency):
"""Injects a dependency from `dependent` onto `dependency`.
It is an error to inject a dependency if the dependent doesn't already exist, but the reverse
is not an error.
:API: public
:param Address dependent: The (already injected) address of a Target to which `dependency`
is being added.
:param Address dependency: The dependency to be injected.
"""
if dependent not in self._target_by_address:
raise ValueError('Cannot inject dependency from {dependent} on {dependency} because the'
' dependent is not in the BuildGraph.'
.format(dependent=dependent, dependency=dependency))
# TODO(pl): Unfortunately this is an unhelpful time to error due to a cycle. Instead, we warn
# and allow the cycle to appear. It is the caller's responsibility to call sort_targets on the
# entire graph to generate a friendlier CycleException that actually prints the cycle.
# Alternatively, we could call sort_targets after every inject_dependency/inject_target, but
# that could have nasty performance implications. Alternative 2 would be to have an internal
# data structure of the topologically sorted graph which would have acceptable amortized
# performance for inserting new nodes, and also cycle detection on each insert.
if dependency not in self._target_by_address:
logger.warning('Injecting dependency from {dependent} on {dependency}, but the dependency'
' is not in the BuildGraph. This probably indicates a dependency cycle, but'
' it is not an error until sort_targets is called on a subgraph containing'
' the cycle.'
.format(dependent=dependent, dependency=dependency))
if dependency in self.dependencies_of(dependent):
logger.debug('{dependent} already depends on {dependency}'
.format(dependent=dependent, dependency=dependency))
else:
self._target_dependencies_by_address[dependent].add(dependency)
self._target_dependees_by_address[dependency].add(dependent)
def targets(self, predicate=None):
"""Returns all the targets in the graph in no particular order.
:API: public
:param predicate: A target predicate that will be used to filter the targets returned.
"""
return filter(predicate, self._target_by_address.values())
def sorted_targets(self):
"""
:API: public
:return: targets ordered from most dependent to least.
"""
return sort_targets(self.targets())
def walk_transitive_dependency_graph(self, addresses, work, predicate=None, postorder=False,
leveled_predicate=None):
"""Given a work function, walks the transitive dependency closure of `addresses` using DFS.
:API: public
:param list<Address> addresses: The closure of `addresses` will be walked.
:param function work: The function that will be called on every target in the closure using
the specified traversal order.
:param bool postorder: When ``True``, the traversal order is postorder (children before
parents), else it is preorder (parents before children).
:param function predicate: If this parameter is not given, no Targets will be filtered
out of the closure. If it is given, any Target which fails the predicate will not be
walked, nor will its dependencies. Thus predicate effectively trims out any subgraph
that would only be reachable through Targets that fail the predicate.
:param function leveled_predicate: Behaves identically to predicate, but takes the depth of the
target in the search tree as a second parameter, and it is checked just before a dependency is
expanded.
"""
# Use the DepthAgnosticWalk if we can, because DepthAwareWalk does a bit of extra work that can
# slow things down by few millis.
walker = self.DepthAwareWalk if leveled_predicate else self.DepthAgnosticWalk
walk = walker()
def _walk_rec(addr, level=0):
# If we've followed an edge to this address, stop recursing.
if not walk.expand_once(addr, level):
return
target = self._target_by_address[addr]
if predicate and not predicate(target):
return
if not postorder and walk.do_work_once(addr):
work(target)
for dep_address in self._target_dependencies_by_address[addr]:
if walk.expanded_or_worked(dep_address):
continue
if not leveled_predicate \
or leveled_predicate(self._target_by_address[dep_address], level):
_walk_rec(dep_address, level + 1)
if postorder and walk.do_work_once(addr):
work(target)
for address in addresses:
_walk_rec(address)
def walk_transitive_dependee_graph(self, addresses, work, predicate=None, postorder=False):
"""Identical to `walk_transitive_dependency_graph`, but walks dependees preorder (or postorder
if the postorder parameter is True).
This is identical to reversing the direction of every arrow in the DAG, then calling
`walk_transitive_dependency_graph`.
:API: public
"""
walked = set()
def _walk_rec(addr):
if addr not in walked:
walked.add(addr)
target = self._target_by_address[addr]
if not predicate or predicate(target):
if not postorder:
work(target)
for dep_address in self._target_dependees_by_address[addr]:
_walk_rec(dep_address)
if postorder:
work(target)
for address in addresses:
_walk_rec(address)
def transitive_dependees_of_addresses(self, addresses, predicate=None, postorder=False):
"""Returns all transitive dependees of `address`.
Note that this uses `walk_transitive_dependee_graph` and the predicate is passed through,
hence it trims graphs rather than just filtering out Targets that do not match the predicate.
See `walk_transitive_dependee_graph for more detail on `predicate`.
:API: public
:param list<Address> addresses: The root addresses to transitively close over.
:param function predicate: The predicate passed through to `walk_transitive_dependee_graph`.
"""
ret = OrderedSet()
self.walk_transitive_dependee_graph(addresses, ret.add, predicate=predicate,
postorder=postorder)
return ret
def transitive_subgraph_of_addresses(self, addresses, *vargs, **kwargs):
"""Returns all transitive dependencies of `address`.
Note that this uses `walk_transitive_dependencies_graph` and the predicate is passed through,
hence it trims graphs rather than just filtering out Targets that do not match the predicate.
See `walk_transitive_dependency_graph for more detail on `predicate`.
:API: public
:param list<Address> addresses: The root addresses to transitively close over.
:param function predicate: The predicate passed through to
`walk_transitive_dependencies_graph`.
:param bool postorder: When ``True``, the traversal order is postorder (children before
parents), else it is preorder (parents before children).
:param function predicate: If this parameter is not given, no Targets will be filtered
out of the closure. If it is given, any Target which fails the predicate will not be
walked, nor will its dependencies. Thus predicate effectively trims out any subgraph
that would only be reachable through Targets that fail the predicate.
:param function leveled_predicate: Behaves identically to predicate, but takes the depth of the
target in the search tree as a second parameter, and it is checked just before a dependency is
expanded.
"""
ret = OrderedSet()
self.walk_transitive_dependency_graph(addresses, ret.add,
*vargs,
**kwargs)
return ret
def transitive_subgraph_of_addresses_bfs(self, addresses, predicate=None, leveled_predicate=None):
"""Returns the transitive dependency closure of `addresses` using BFS.
:API: public
:param list<Address> addresses: The closure of `addresses` will be walked.
:param function predicate: If this parameter is not given, no Targets will be filtered
out of the closure. If it is given, any Target which fails the predicate will not be
walked, nor will its dependencies. Thus predicate effectively trims out any subgraph
that would only be reachable through Targets that fail the predicate.
:param function leveled_predicate: Behaves identically to predicate, but takes the depth of the
target in the search tree as a second parameter, and it is checked just before a dependency is
expanded.
"""
ordered_closure = OrderedSet()
# Use the DepthAgnosticWalk if we can, because DepthAwareWalk does a bit of extra work that can
# slow things down by few millis.
walker = self.DepthAwareWalk if leveled_predicate else self.DepthAgnosticWalk
walk = walker()
to_walk = deque((0, addr) for addr in addresses)
while len(to_walk) > 0:
level, address = to_walk.popleft()
if not walk.expand_once(address, level):
continue
target = self._target_by_address[address]
if predicate and not predicate(target):
continue
if walk.do_work_once(address):
ordered_closure.add(target)
for addr in self._target_dependencies_by_address[address]:
if walk.expanded_or_worked(addr):
continue
if not leveled_predicate or leveled_predicate(self._target_by_address[addr], level):
to_walk.append((level + 1, addr))
return ordered_closure
@abstractmethod
def inject_synthetic_target(self,
address,
target_type,
dependencies=None,
derived_from=None,
**kwargs):
"""Constructs and injects Target at `address` with optional `dependencies` and `derived_from`.
This method is useful especially for codegen, where a "derived" Target is injected
programmatically rather than read in from a BUILD file.
:API: public
:param Address address: The address of the new Target. Must not already be in the BuildGraph.
:param type target_type: The class of the Target to be constructed.
:param list<Address> dependencies: The dependencies of this Target, usually inferred or copied
from the `derived_from`.
:param Target derived_from: The Target this Target will derive from.
"""
def maybe_inject_address_closure(self, address):
"""If the given address is not already injected to the graph, calls inject_address_closure.
:API: public
:param Address address: The address to inject. Must be resolvable by `self._address_mapper` or
else be the address of an already injected entity.
"""
if not self.contains_address(address):
self.inject_address_closure(address)
@abstractmethod
def inject_address_closure(self, address):
"""Resolves, constructs and injects a Target and its transitive closure of dependencies.
This method is idempotent and will short circuit for already injected addresses. For all other
addresses though, it delegates to an internal AddressMapper to resolve item the address points
to.
:API: public
:param Address address: The address to inject. Must be resolvable by `self._address_mapper` or
else be the address of an already injected entity.
"""
@abstractmethod
def inject_specs_closure(self, specs, fail_fast=None):
"""Resolves, constructs and injects Targets and their transitive closures of dependencies.
:API: public
:param specs: A list of base.specs.Spec objects to resolve and inject.
:param fail_fast: Whether to fail quickly for the first error, or to complete all
possible injections before failing.
:returns: Yields a sequence of resolved Address objects.
"""
def resolve(self, spec):
"""Returns an iterator over the target(s) the given address points to."""
address = Address.parse(spec)
# NB: This is an idempotent, short-circuiting call.
self.inject_address_closure(address)
return self.transitive_subgraph_of_addresses([address])
@abstractmethod
def resolve_address(self, address):
"""Maps an address in the virtual address space to an object.
:param Address address: the address to lookup in a BUILD file
:raises AddressLookupError: if the path to the address is not found.
:returns: The Addressable which address points to.
"""
class CycleException(Exception):
"""Thrown when a circular dependency is detected.
:API: public
"""
def __init__(self, cycle):
super(CycleException, self).__init__('Cycle detected:\n\t{}'.format(
' ->\n\t'.join(target.address.spec for target in cycle)
))
def invert_dependencies(targets):
"""
:API: public
:return: the full graph of dependencies for `targets` and the list of roots.
"""
roots = set()
inverted_deps = defaultdict(OrderedSet) # target -> dependent targets
visited = set()
path = OrderedSet()
def invert(tgt):
if tgt in path:
path_list = list(path)
cycle_head = path_list.index(tgt)
cycle = path_list[cycle_head:] + [tgt]
raise CycleException(cycle)
path.add(tgt)
if tgt not in visited:
visited.add(tgt)
if tgt.dependencies:
for dependency in tgt.dependencies:
inverted_deps[dependency].add(tgt)
invert(dependency)
else:
roots.add(tgt)
path.remove(tgt)
for target in targets:
invert(target)
return roots, inverted_deps
def sort_targets(targets):
"""
:API: public
:return: the targets that `targets` depend on sorted from most dependent to least.
"""
roots, inverted_deps = invert_dependencies(targets)
ordered = []
visited = set()
def topological_sort(target):
if target not in visited:
visited.add(target)
if target in inverted_deps:
for dep in inverted_deps[target]:
topological_sort(dep)
ordered.append(target)
for root in roots:
topological_sort(root)
return ordered
|
|
#!/usr/bin/python
'''
parallel_GPLVM.py
Main script to run, implements parallel inference for GPLVM for SGE (Sun Grid
Engine), Hadoop (Map Reduce framework), and a local parallel implementation.
Arguments:
-i, --input
Folder containing files to be processed. One file will be processed per node. Files assumed to be in a comma-separated-value (CSV) format. (required))
-e, --embeddings
Existing folder to store embeddings in. One file will be created for each input file. (required)
-p, --parallel
Which parallel architecture to use (local (default), Hadoop, SGE)
-T, --iterations
Number of iterations to run; default value is 100
-s, --statistics
Folder to store statistics files in (default is /tmp)
-k, --keep
Whether to keep statistics files or to delete them
-l, --load
Whether to load statistics and embeddings from previous run or initialise new ones
-t, --tmp
Shared folder to store tmp files in (default is /scratch/tmp)
--init
Which initialisation to use (PCA (default), PPCA (probabilistic PCA), FA (factor analysis), random)
--optimiser
Which optimiser to use (SCG_adapted (adapted scaled gradient descent - default), GD (gradient descent))
--drop_out_fraction
Fraction of nodes to drop out (default: 0)
Sparse GPs specific options
-M, --inducing_points
Number of inducing points (default: 10)
-Q, --latent_dimensions
umber of latent dimensions (default: 10)
-D, --output_dimensions
Number of output dimensions given in Y (default value set to 10)
--fixed_embeddings
If given, embeddings (X) are treated as fixed. Only makes sense when embeddings are given in the folder in advance
--fixed_beta
If given, beta is treated as fixed.
SGE specific options
--simplejson
SGE simplejson location
Hadoop specific options
--hadoop
Hadoop folder
--jar
Jar file for Hadoop streaming
'''
from optparse import OptionParser, OptionGroup
import os.path
import scipy
import numpy
import pickle
from numpy import genfromtxt
import time
import subprocess
import glob
from scg_adapted import SCG_adapted
from gd import GD
import supporting_functions as sp
options = {}
map_reduce = {}
# Initialise timing statistics
time_acc = {
'time_acc_statistics_map_reduce' : [],
'time_acc_statistics_mapper' : [],
'time_acc_statistics_reducer' : [],
'time_acc_calculate_global_statistics' : [],
'time_acc_embeddings_MR' : [],
'time_acc_embeddings_MR_mapper' : []
}
def main(opt_param = None):
global options, map_reduce, time_acc
if opt_param is None:
options = parse_options()
else:
options = opt_param
# Initialise the Map-Reduce parser
if options['parallel'] == "local":
import local_MapReduce
map_reduce = local_MapReduce
elif options['parallel'] == "SGE":
import SGE_MapReduce
map_reduce = SGE_MapReduce
elif options['parallel'] == "Hadoop":
raise Exception("Not implemented yet!")
options = map_reduce.init(options)
options, global_statistics = init_statistics(map_reduce, options)
# Run the optimiser for T steps
x0 = flatten_global_statistics(options, global_statistics)
# Transform the positiv parameters to be in the range (-Inf, Inf)
x0 = numpy.array([sp.transform_back(b, x) for b, x in zip(options['flat_global_statistics_bounds'], x0)])
if options['optimiser'] == 'SCG_adapted':
x_opt = SCG_adapted(likelihood_and_gradient, x0, options['embeddings'], options['fixed_embeddings'], display=True, maxiters=options['iterations'], xtol=0, ftol=0, gtol=0)
elif options['optimiser'] == 'GD':
x_opt = GD(likelihood_and_gradient, x0, options['embeddings'], options['fixed_embeddings'], display=True, maxiters=options['iterations'])
flat_array = x_opt[0]
# Transform the parameters that have to be positive to be positive
flat_array_transformed = numpy.array([sp.transform(b, x) for b, x in
zip(options['flat_global_statistics_bounds'], flat_array)])
global_statistics = rebuild_global_statistics(options, flat_array_transformed)
print 'Final global_statistics'
print global_statistics
# Clean unneeded files
options['iteration'] = len(x_opt[1]) - 1
clean(options)
# We need to call this one last time to make sure that the search did not try any other
# vaues for the global statistics before finishing - ie the files are out of date
likelihood_and_gradient(flat_array, 'f')
file_name = options['statistics'] + '/partial_derivatives_F_f.npy'
''' We still have a bug where the reported F is not the same one as the one returned from test() '''
print 'final F=' + str(float(map_reduce.load(file_name)))
with open(options['statistics'] + '/time_acc.obj', 'wb') as f:
pickle.dump(time_acc, f)
with open(options['statistics'] + '/nlml_acc.obj', 'wb') as f:
pickle.dump(x_opt[1], f)
if options['optimiser'] == 'SCG_adapted':
with open(options['statistics'] + '/time_acc_SCG_adapted.obj', 'wb') as f:
pickle.dump(x_opt[4], f)
def init_statistics(map_reduce, options):
'''
Initialise statistics and names of variables passed-around
'''
# Initialise the statistics that need to be handled on the master node
options['global_statistics_names'] = {
'Z' : (options['M'], options['Q']), 'sf2' : (1,1), 'alpha' : (1, options['Q']), 'beta' : (1,1)
}
options['accumulated_statistics_names'] = [
'sum_YYT', 'sum_exp_K_mi_K_im', 'sum_exp_K_miY', 'sum_exp_K_ii', 'sum_KL',
'sum_d_exp_K_miY_d_Z',
'sum_d_exp_K_mi_K_im_d_Z',
'sum_d_exp_K_miY_d_alpha',
'sum_d_exp_K_mi_K_im_d_alpha',
'sum_d_exp_K_ii_d_sf2',
'sum_d_exp_K_miY_d_sf2',
'sum_d_exp_K_mi_K_im_d_sf2'
]
options['partial_derivatives_names'] = [
'F', 'dF_dsum_exp_K_ii', 'dF_dKmm', 'dF_dsum_exp_K_miY', 'dF_dsum_exp_K_mi_K_im'
]
options['cache_names'] = [
'Kmm', 'Kmm_inv'
]
# Initialise the global statistics to defaults
if not options['load']:
# Initialise Z (locations of inducing points)
input_files_names = os.listdir(options['input'] + '/')
input_files_names = sorted(input_files_names)
input_files_id = 0
embedding_name = options['embeddings'] + '/' + input_files_names[input_files_id] \
+ '.embedding.npy'
embeddings = map_reduce.load(embedding_name)
# untested:
if numpy.allclose(embeddings, embeddings.mean(axis=0)):
print 'Warning: all embeddings identical'
while embeddings.shape[0] < options['M']:
input_files_id += 1
embedding_name = options['embeddings'] + '/' + input_files_names[input_files_id] \
+ '.embedding.npy'
embeddings = numpy.concatenate((embeddings, map_reduce.load(embedding_name)))
if not embeddings.shape[1] == options['Q']:
raise Exception('Given Q does not equal existing embedding data dimensions!')
# init embeddings using k-means (gives much better guess)
import scipy.cluster.vq as cl
Z = cl.kmeans(embeddings, options['M'])[0]
# If Z has less than M points:
missing = options['M'] - Z.shape[0]
if missing > 0:
Z = numpy.concatenate((Z, embeddings[:missing]))
#Z = embeddings[:10]
Z += scipy.randn(options['M'], options['Q']) * 0.05
# Initialise the global statistics
global_statistics = {
'Z' : Z, # see GPy models/bayesian_gplvm.py
'sf2' : numpy.array([[1.0]]), # see GPy kern/rbf.py
'alpha' : scipy.ones((1, options['Q'])), # see GPy kern/rbf.py
'beta' : numpy.array([[1.0]]) # see GPy likelihood/gaussian.py
}
else:
# Load global statistics from previous run
global_statistics = {}
for key in options['global_statistics_names']:
file_name = options['statistics'] + '/global_statistics_' + key + '_f.npy'
global_statistics[key] = map_reduce.load(file_name)
# Initialise bounds for optimisation
global_statistics_bounds = {
'Z' : [(None, None) for i in range(options['M'] * options['Q'])],
'sf2' : [(0, None)],
'alpha' : [(0, None) for i in range(options['Q'])],
'beta' : [(0, None)]
}
flat_global_statistics_bounds = []
for key, statistic in global_statistics_bounds.items():
flat_global_statistics_bounds = flat_global_statistics_bounds+statistic
options['flat_global_statistics_bounds'] = flat_global_statistics_bounds
return options, global_statistics
'''
Calculate the likelihood and derivatives by sending jobs to the nodes
'''
def likelihood_and_gradient(flat_array, iteration, step_size=0):
global options, map_reduce, time_acc
flat_array_transformed = numpy.array([sp.transform(b, x) for b, x in
zip(options['flat_global_statistics_bounds'], flat_array)])
global_statistics = rebuild_global_statistics(options, flat_array_transformed)
options['i'] = iteration
options['step_size'] = step_size
#print 'global_statistics'
#print global_statistics
# Clear unneeded files from previous iteration if we don't want to keep them.
clean(options)
# Save into shared files so all node can access them
for key in global_statistics.keys():
file_name = options['statistics'] + '/global_statistics_' + key + '_' + str(options['i']) + '.npy'
map_reduce.save(file_name, global_statistics[key])
# Dispatch statistics Map-Reduce
map_reduce_start = time.time()
# Cache matrices that only need be calculated once
map_reduce.cache(options, global_statistics)
accumulated_statistics_files, statistics_mapper_time, statistics_reducer_time = map_reduce.statistics_MR(options)
map_reduce_end = time.time()
#print "Done! statistics Map-Reduce took ", int(end - start), " seconds"
# Calculate global statistics
calculate_global_statistics_start = time.time()
partial_derivatives, accumulated_statistics, partial_terms = calculate_global_statistics(options,
global_statistics, accumulated_statistics_files, map_reduce)
# Evaluate the gradient for 'Z', 'sf2', 'alpha', and 'beta'
gradient = calculate_global_derivatives(options, partial_derivatives,
accumulated_statistics, global_statistics, partial_terms)
#print "Done! global statistics took ", int(end - start), " seconds"
calculate_global_statistics_end = time.time()
gradient = flatten_global_statistics(options, gradient)
likelihood = partial_derivatives['F']
if not options['fixed_embeddings']:
# Dispatch embeddings Map-Reduce if we're not using fixed embeddings
embeddings_MR_start = time.time()
embeddings_MR_time = map_reduce.embeddings_MR(options)
embeddings_MR_end = time.time()
#print "Done! embeddings Map-Reduce took ", int(end - start), " seconds"
# Collect timing statistics
time_acc['time_acc_statistics_map_reduce'] += [map_reduce_end - map_reduce_start]
time_acc['time_acc_statistics_mapper'] += [statistics_mapper_time]
time_acc['time_acc_statistics_reducer'] += [statistics_reducer_time]
time_acc['time_acc_calculate_global_statistics'] += [calculate_global_statistics_end - calculate_global_statistics_start]
if not options['fixed_embeddings']:
time_acc['time_acc_embeddings_MR'] += [embeddings_MR_end - embeddings_MR_start]
time_acc['time_acc_embeddings_MR_mapper'] += [embeddings_MR_time]
gradient = numpy.array([g * sp.transform_grad(b, x) for b, x, g in
zip(options['flat_global_statistics_bounds'], flat_array, gradient)])
return -1 * likelihood, -1 * gradient
'''
Supporting functions to pass the parameters in and out of the optimiser, and to calculate global statistics
'''
def flatten_global_statistics(options, global_statistics):
flat_array = numpy.array([])
for key, statistic in global_statistics.items():
flat_array = numpy.concatenate((flat_array, statistic.flatten()))
return flat_array
def rebuild_global_statistics(options, flat_array):
global_statistics = {}
start = 0
for key, shape in options['global_statistics_names'].items():
size = shape[0] * shape[1]
global_statistics[key] = flat_array[start:start+size].reshape(shape)
start = start + size
return global_statistics
def calculate_global_statistics(options, global_statistics, accumulated_statistics_files, map_reduce):
'''
Loads statistics into dictionaries and calculates global statistics such as F and partial derivatives of F
'''
# Load accumulated statistics
accumulated_statistics = {}
for (statistic, file_name) in accumulated_statistics_files:
accumulated_statistics[statistic] = map_reduce.load(file_name)
# Get F and partial derivatives for F
partial_terms = map_reduce.load_partial_terms(options, global_statistics)
# Load cached matrices
map_reduce.load_cache(options, partial_terms)
partial_terms.set_local_statistics(accumulated_statistics['sum_YYT'],
accumulated_statistics['sum_exp_K_mi_K_im'],
accumulated_statistics['sum_exp_K_miY'],
accumulated_statistics['sum_exp_K_ii'],
accumulated_statistics['sum_KL'])
partial_derivatives = {
'F' : partial_terms.logmarglik(),
'dF_dsum_exp_K_ii' : partial_terms.dF_dexp_K_ii(),
'dF_dsum_exp_K_miY' : partial_terms.dF_dexp_K_miY(),
'dF_dsum_exp_K_mi_K_im' : partial_terms.dF_dexp_K_mi_K_im(),
'dF_dKmm' : partial_terms.dF_dKmm()
}
for key in partial_derivatives.keys():
file_name = options['statistics'] + '/partial_derivatives_' + key + '_' + str(options['i']) + '.npy'
map_reduce.save(file_name, partial_derivatives[key])
return partial_derivatives, accumulated_statistics, partial_terms
def calculate_global_derivatives(options, partial_derivatives, accumulated_statistics, global_statistics, partial_terms):
'''
Evaluate the gradient for 'Z', 'sf2', 'alpha', and 'beta'
'''
grad_Z = partial_terms.grad_Z(partial_derivatives['dF_dKmm'],
partial_terms.dKmm_dZ(),
partial_derivatives['dF_dsum_exp_K_miY'],
accumulated_statistics['sum_d_exp_K_miY_d_Z'],
partial_derivatives['dF_dsum_exp_K_mi_K_im'],
accumulated_statistics['sum_d_exp_K_mi_K_im_d_Z'])
grad_alpha = partial_terms.grad_alpha(partial_derivatives['dF_dKmm'],
partial_terms.dKmm_dalpha(),
partial_derivatives['dF_dsum_exp_K_miY'],
accumulated_statistics['sum_d_exp_K_miY_d_alpha'],
partial_derivatives['dF_dsum_exp_K_mi_K_im'],
accumulated_statistics['sum_d_exp_K_mi_K_im_d_alpha'])
grad_sf2 = partial_terms.grad_sf2(partial_derivatives['dF_dKmm'],
partial_terms.dKmm_dsf2(),
partial_derivatives['dF_dsum_exp_K_ii'],
accumulated_statistics['sum_d_exp_K_ii_d_sf2'],
partial_derivatives['dF_dsum_exp_K_miY'],
accumulated_statistics['sum_d_exp_K_miY_d_sf2'],
partial_derivatives['dF_dsum_exp_K_mi_K_im'],
accumulated_statistics['sum_d_exp_K_mi_K_im_d_sf2'])
gradient = {'Z' : grad_Z,
'sf2' : grad_sf2,
'alpha' : grad_alpha}
if not options['fixed_beta']:
gradient['beta'] = partial_terms.grad_beta()
else:
gradient['beta'] = numpy.zeros((1,1))
#print 'gradient'
#print gradient
return gradient
''' Clean unneeded files '''
def clean(options):
# We assume that if the file does not exist the function simply returns. We also remove files from
# the previous call to the function that used the same iteration
if not options['keep'] and options['i'] != 'f':
for key in options['global_statistics_names']:
file_name = options['statistics'] + '/global_statistics_' + key + '_' + str(-1) + '.npy'
map_reduce.remove(file_name)
file_name = options['statistics'] + '/global_statistics_' + key + '_' + str(options['i'] - 1) + '.npy'
map_reduce.remove(file_name)
file_name = options['statistics'] + '/global_statistics_' + key + '_' + str(options['i']) + '.npy'
map_reduce.remove(file_name)
for key in options['accumulated_statistics_names']:
file_name = options['statistics'] + '/accumulated_statistics_' + key + '_' + str(-1) + '.npy'
map_reduce.remove(file_name)
file_name = options['statistics'] + '/accumulated_statistics_' + key + '_' + str(options['i'] - 1) + '.npy'
map_reduce.remove(file_name)
file_name = options['statistics'] + '/accumulated_statistics_' + key + '_' + str(options['i']) + '.npy'
map_reduce.remove(file_name)
for key in options['partial_derivatives_names']:
file_name = options['statistics'] + '/partial_derivatives_' + key + '_' + str(-1) + '.npy'
map_reduce.remove(file_name)
file_name = options['statistics'] + '/partial_derivatives_' + key + '_' + str(options['i'] - 1) + '.npy'
map_reduce.remove(file_name)
file_name = options['statistics'] + '/partial_derivatives_' + key + '_' + str(options['i']) + '.npy'
map_reduce.remove(file_name)
for key in options['cache_names']:
file_name = options['statistics'] + '/cache_' + key + '_' + str(-1) + '.npy'
map_reduce.remove(file_name)
file_name = options['statistics'] + '/cache_' + key + '_' + str(options['i'] - 1) + '.npy'
map_reduce.remove(file_name)
file_name = options['statistics'] + '/cache_' + key + '_' + str(options['i']) + '.npy'
map_reduce.remove(file_name)
'''
Parse options[' Not interesting at all.
'''
def parse_options():
parser = OptionParser("usage: \n%prog [options] --input <input folder> --embeddings <embeddings folder>\n%prog -h for help")
parser.add_option("-i", "--input", dest="input",
help="Folder containing files to be processed. One file will be processed per node. Files assumed to be in a comma-separated-value (CSV) format. (required)")
parser.add_option("-e", "--embeddings", dest="embeddings",
help="Existing folder to store embeddings in. One file will be created for each input file.")
parser.add_option("-p", "--parallel",
type="choice",
choices=["local", "Hadoop", "SGE"], default="local",
help="Which parallel architecture to use (local (default), Hadoop, SGE)"
)
parser.add_option("-T", "--iterations", dest="iterations",
help="Number of iterations to run; default value is 100", type="int", default="100")
parser.add_option("-s", "--statistics", dest="statistics",
help="Folder to store statistics files in (default is /tmp)", default="/tmp")
parser.add_option("-k", "--keep", action="store_true", dest="keep", default=False,
help="Whether to keep statistics files or to delete them")
parser.add_option("-l", "--load", action="store_true", dest="load", default=False,
help="Whether to load statistics and embeddings from previous run or initialise new ones")
parser.add_option("-t", "--tmp", dest="tmp",
help="Shared folder to store tmp files in (default is /scratch/tmp)", default="/scratch/tmp")
parser.add_option("--init",
type="choice",
choices=["PCA", "PPCA", "FA", "random"], default="PCA",
help="Which initialisation to use (PCA (default), PPCA (probabilistic PCA), FA (factor analysis), random)"
)
parser.add_option("--optimiser",
type="choice",
choices=["SCG_adapted", "GD"], default="SCG_adapted",
help="Which optimiser to use (SCG_adapted (adapted scaled gradient descent - default), GD (gradient descent))"
)
parser.add_option("--drop_out_fraction", type=float, dest="drop_out_fraction",
help="Fraction of nodes to drop out (default: 0)", default=0)
parser.add_option("--local-no-pool", action="store_true", dest="local_no_pool", default=False, help="When using local_MapReduce, do not do any parallelisation.")
# Sparse GPs specific options
SparseGPs_group = OptionGroup(parser, "Sparse GPs Options")
SparseGPs_group.add_option("-M", "--inducing_points", type=int, dest="M",
help="Number of inducing points (default: 10)", default="10")
SparseGPs_group.add_option("-Q", "--latent_dimensions", type=int, dest="Q",
help="Number of latent dimensions (default: 10)", default="10")
SparseGPs_group.add_option("-D", "--output_dimensions", type=int, dest="D",
help="Number of output dimensions given in Y (default value set to 10)", default="10")
SparseGPs_group.add_option("--fixed_embeddings", action="store_true", dest="fixed_embeddings",
help="If given, embeddings (X) are treated as fixed. Only makes sense when embeddings are given in the folder in advance", default=False)
SparseGPs_group.add_option("--fixed_beta", action="store_true", dest="fixed_beta",
help="If given, beta is treated as fixed.", default=False)
parser.add_option_group(SparseGPs_group)
# SGE specific options
SGE_group = OptionGroup(parser, "SGE Options")
SGE_group.add_option("--simplejson", dest="simplejson",
help="SGE simplejson location", default="/scratch/python/lib.linux-x86_64-2.5/")
parser.add_option_group(SGE_group)
# Hadoop specific options
Hadoop_group = OptionGroup(parser, "Hadoop Options")
Hadoop_group.add_option("--hadoop", dest="hadoop",
help="Hadoop folder", default="/usr/local/hadoop/bin/hadoop")
Hadoop_group.add_option("--jar", dest="jar",
help="Jar file for Hadoop streaming", default="/usr/local/hadoop/share/hadoop/tools/lib/hadoop-*streaming*.jar")
parser.add_option_group(Hadoop_group)
# Check that the options are correct and create the required folders
(options, args) = parser.parse_args()
options = vars(options)
if not options['input']:
parser.error('Input folder not given')
elif not os.path.exists(options['input']):
raise Exception('Input folder does not exist')
input_files_names = os.listdir(options['input'] + '/')
if len(input_files_names) == 0:
raise Exception('No input files!')
if not options['embeddings']:
parser.error('Embeddings folder not given')
elif not os.path.exists(options['embeddings']):
raise Exception('Folder to save embeddings in does not exist')
if not os.path.exists(options['statistics']):
raise Exception('Statistics folder ' + options['statistics'] + ' does not exist')
if not os.path.exists(options['tmp']):
raise Exception('TMP folder ' + options['tmp'] + ' does not exist')
if options['parallel'] == "SGE":
try:
subprocess.call(["qstat"])
except:
raise Exception('Cannot call SGE''s qstat; please make sure the environment was set up correctly')
if options['parallel'] == "SGE" and not os.path.exists(options['simplejson']):
raise Exception('SGE simplejson ' + options['simplejson'] + ' does not exist')
return options
if __name__ == '__main__':
main()
|
|
import sys
import math
import paramiko
import util_uploader
class GetBSDData():
def __init__(self, ip, SSH_PORT, TIMEOUT, usr, pwd, USE_KEY_FILE, KEY_FILE, \
GET_SERIAL_INFO, GET_HARDWARE_INFO, GET_OS_DETAILS, \
GET_CPU_INFO, GET_MEMORY_INFO, IGNORE_DOMAIN, UPLOAD_IPV6, DEBUG):
self.machine_name = ip
self.port = int(SSH_PORT)
self.timeout = TIMEOUT
self.username = usr
self.password = pwd
self.ssh = paramiko.SSHClient()
self.USE_KEY_FILE = USE_KEY_FILE
self.KEY_FILE = KEY_FILE
self.GET_SERIAL_INFO = GET_SERIAL_INFO
self.GET_HARDWARE_INFO = GET_HARDWARE_INFO
self.GET_OS_DETAILS = GET_OS_DETAILS
self.GET_CPU_INFO = GET_CPU_INFO
self.GET_MEMORY_INFO = GET_MEMORY_INFO
self.IGNORE_DOMAIN = IGNORE_DOMAIN
self.UPLOAD_IPV6 = UPLOAD_IPV6
self.DEBUG = DEBUG
self.ssh = paramiko.SSHClient()
self.conn = None
self.sysData = {}
self.allData = []
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
def main(self):
self.connect()
self.get_sys()
self.get_CPU()
self.get_RAM()
self.allData.append(self.sysData)
self.get_IP()
return self.allData
def connect(self):
try:
if not self.USE_KEY_FILE:
self.ssh.connect(str(self.machine_name), port=self.port, username=self.username, password=self.password, timeout=self.timeout)
else:
self.ssh.connect(str(self.machine_name), port=self.port, username=self.username, key_filename=self.KEY_FILE, timeout=self.timeout)
except paramiko.AuthenticationException:
print str(self.machine_name) + ': authentication failed'
return None
except Exception as err:
print str(self.machine_name) + ': ' + str(err)
return None
def get_CPU(self):
if self.GET_CPU_INFO:
stdin, stdout, stderr = self.ssh.exec_command("sysctl -n hw.model; sysctl -n hw.ncpu; sysctl -n hw.cpuspeed")
data_out = stdout.readlines()
data_err = stderr.readlines()
if not data_err:
cpumodel = data_out[0].strip()
cpucount = data_out[1].strip()
cpuspeed = data_out[2].strip()
self.sysData.update({'cpumodel':cpumodel})
self.sysData.update({'cpucount':cpucount})
self.sysData.update({'cpuspeed':cpuspeed})
else:
print data_err
def get_RAM(self):
if self.GET_MEMORY_INFO:
stdin, stdout, stderr = self.ssh.exec_command("sysctl -n hw.physmem")
data_out = stdout.readlines()
data_err = stderr.readlines()
if not data_err:
memory = int(data_out[0].strip()) /1024 /1024
self.sysData.update({'memory':memory})
else:
print 'Error: ', data_err
def get_name(self):
stdin, stdout, stderr = self.ssh.exec_command("/bin/hostname")
data_out = stdout.readlines()
data_err = stderr.readlines()
if not data_err:
full_name = data_out[0].strip()
if self.IGNORE_DOMAIN:
if '.' in full_name:
return full_name.split('.')[0]
else:
return full_name
else:
return full_name
else:
print 'Error: ', data_err
def get_IP(self):
addresses = {}
stdin, stdout, stderr = self.ssh.exec_command("ifconfig")
data_out = stdout.readlines()
data_err = stderr.readlines()
if not data_err:
nics = []
tmpv4 = {}
tmpv6 = {}
macs = {}
for rec in data_out:
if 'flags=' in rec:
device = rec.split(':')[0]
if tmpv4 == {}:
tmpv4.update({'device':self.device_name})
tmpv4.update({'tag':device})
else:
nics.append(tmpv4)
tmpv4 = {}
tmpv4.update({'device':self.device_name})
tmpv4.update({'tag':device})
if tmpv6 == {}:
tmpv6.update({'device':self.device_name})
tmpv6.update({'tag':device})
else:
nics.append(tmpv6)
tmpv6 = {}
tmpv6.update({'device':self.device_name})
tmpv6.update({'tag':device})
if macs != {}:
nics.append(macs)
macs = {}
macs.update({'device':self.device_name})
macs.update({'port_name':device})
else:
if rec.strip().startswith('lladdr'):
mac = rec.split()[1].strip()
tmpv4.update({'macaddress':mac})
tmpv6.update({'macaddress':mac})
macs.update({'macaddress':mac})
if rec.strip().startswith('inet '):
ipv4 = rec.split()[1]
tmpv4.update({'ipaddress':ipv4})
if rec.strip().startswith('inet6'):
ipv6 = rec.split()[1]
if '%' in ipv6:
ipv6 = ipv6.split('%')[0]
tmpv6.update({'ipaddress':ipv6})
nics.append(tmpv4)
nics.append(tmpv6)
nics.append(macs)
for nic in nics:
if 'tag' in nic:
if nic['tag'].startswith('lo'):
pass
else:
if 'ipaddress' in nic or 'macaddress' in nic:
self.allData.append(nic)
elif 'port_name' in nic:
if nic['port_name'].startswith('lo'):
pass
else:
if 'ipaddress' in nic or 'macaddress' in nic:
self.allData.append(nic)
else:
print 'Error: ', data_err
def get_sys(self):
self.device_name = self.get_name()
stdin, stdout, stderr = self.ssh.exec_command("uname -rsv")
data_out = stdout.readlines()
data_err = stderr.readlines()
if not data_err:
data = ' '.join(data_out).split()
os = data[0].strip()
self.sysData.update({'os':os})
version = data[1].strip()
self.sysData.update({'osver':version})
kernel_version = data[2].strip()
self.sysData.update({'osverno':kernel_version})
self.sysData.update({'name':self.device_name})
else:
print 'Error: ', data_err
stdin, stdout, stderr = self.ssh.exec_command("sysctl -n hw.product; sysctl -n hw.vendor ; sysctl -n hw.uuid")
data_out = stdout.readlines()
data_err = stderr.readlines()
if not data_err:
vendor = data_out[0].strip()
uuid = data_out[2].strip()
self.sysData.update({'uuid':uuid})
mft = data_out[1].strip()
if mft.lower().strip() in ['vmware, inc.', 'bochs', 'kvm', 'qemu', 'microsoft corporation', 'xen', 'innotek gmbh']:
manufacturer = 'virtual'
self.sysData.update({'type':manufacturer})
else:
self.sysData.update({'type':'physical'})
self.sysData.update({'manufacturer':vendor})
else:
print 'Error: ', data_err
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from tuskar.templates import heat
from tuskar.templates import namespace as ns_utils
from tuskar.templates import plan
class DeploymentPlanTests(unittest.TestCase):
def test_empty(self):
# Test
p = plan.DeploymentPlan(description='test-desc')
str(p) # should not error
# Verify
self.assertTrue(isinstance(p.master_template, heat.Template))
self.assertTrue(isinstance(p.environment, heat.Environment))
self.assertEqual('test-desc', p.master_template.description)
self.assertEqual(p.add_scaling, True)
def test_existing_pieces(self):
# Test
t = heat.Template()
e = heat.Environment()
p = plan.DeploymentPlan(master_template=t, environment=e)
# Verify
self.assertTrue(p.master_template is t)
self.assertTrue(p.environment is e)
def test_add_template_no_scaling(self):
# Test
p = plan.DeploymentPlan(add_scaling=False)
t = self._generate_template()
p.add_template('ns1', t, 'template-1.yaml')
# Verify Master Template Parameters
self.assertEqual(2, len(p.master_template.parameters))
for original, added in zip(t.parameters, p.master_template.parameters):
self.assertTrue(added is not original)
expected_name = ns_utils.apply_template_namespace('ns1',
original.name)
self.assertEqual(added.name, expected_name)
self.assertEqual(added.param_type, original.param_type)
# Verify Resource
self.assertEqual(1, len(p.master_template.resources))
added = p.master_template.resources[0]
expected_id = plan.generate_resource_id('ns1')
self.assertEqual(added.resource_id, expected_id)
expected_type = ns_utils.apply_resource_alias_namespace('ns1')
self.assertEqual(added.resource_type, expected_type)
for param, prop in zip(t.parameters, added.properties):
v = ns_utils.apply_template_namespace('ns1', param.name)
expected_value = {'get_param': [v]}
self.assertEqual(prop.value, expected_value)
# Verify Environment Parameters
self.assertEqual(2, len(p.environment.parameters))
for env_param, template_param in zip(p.environment.parameters,
t.parameters):
expected_name = (
ns_utils.apply_template_namespace('ns1', template_param.name))
self.assertEqual(env_param.name, expected_name)
self.assertEqual(env_param.value, '')
# Verify Resource Registry Entry
self.assertEqual(1, len(p.environment.registry_entries))
added = p.environment.registry_entries[0]
expected_alias = ns_utils.apply_resource_alias_namespace('ns1')
self.assertEqual(added.alias, expected_alias)
self.assertEqual(added.filename, 'template-1.yaml')
def test_add_template_with_default_parameter_value(self):
# Test
p = plan.DeploymentPlan()
t = heat.Template()
t.add_parameter(heat.Parameter('param-1', 'type-1', default='d1'))
t.add_parameter(heat.Parameter('param-2', 'type-2'))
t.add_parameter(heat.Parameter('param-3', 'type-3', default=0))
p.add_template('ns1', t, 'template-1.yaml')
# Verify
p1 = p.environment.parameters[0]
self.assertEqual(ns_utils.apply_template_namespace('ns1', 'param-1'),
p1.name)
self.assertEqual('d1', p1.value)
p2 = p.environment.parameters[1]
self.assertEqual(ns_utils.apply_template_namespace('ns1', 'param-2'),
p2.name)
self.assertEqual('', p2.value)
p3 = p.environment.parameters[2]
self.assertEqual(ns_utils.apply_template_namespace('ns1', 'param-3'),
p3.name)
self.assertEqual(0, p3.value)
def test_add_template_with_colliding_namespace(self):
# Test
p = plan.DeploymentPlan()
p.environment.add_parameter(
heat.EnvironmentParameter('ns1::param-1', 'value-1'))
t = heat.Template()
t.add_parameter(heat.Parameter('param-2', 'type-1'))
# Verify
self.assertRaises(ValueError,
p.add_template, 'ns1', t, 'template-1.yaml')
def test_add_scaling_with_scaling(self):
# Test
p = plan.DeploymentPlan(add_scaling=True)
t = self._generate_template()
p.add_template('ns1', t, 'template-1.yaml')
# Verify Master Template Count Parameters
self.assertEqual(4, len(p.master_template.parameters))
count_param = p.master_template.parameters[2]
expected_count_name = plan.generate_count_property_name('ns1')
self.assertEqual(count_param.name, expected_count_name)
self.assertEqual(count_param.param_type, 'number')
removal_param = p.master_template.parameters[3]
expected_removal_name = plan.generate_removal_policies_name('ns1')
self.assertEqual(removal_param.name, expected_removal_name)
self.assertEqual(removal_param.param_type, 'json')
self.assertEqual(1, len(count_param.constraints))
const = count_param.constraints[0]
self.assertTrue(isinstance(const, heat.ParameterConstraint))
self.assertEqual(const.constraint_type, 'range')
self.assertEqual(const.definition, {'min': '0'})
# Verify Resource Group Wrapper
self.assertEqual(1, len(p.master_template.resources))
group_res = p.master_template.resources[0]
group_id = plan.generate_group_id('ns1')
self.assertEqual(group_res.resource_id, group_id)
self.assertEqual(group_res.resource_type,
plan.HEAT_TYPE_RESOURCE_GROUP)
self.assertEqual(3, len(group_res.properties))
count_prop = group_res.properties[0]
self.assertEqual(count_prop.name, plan.PROPERTY_SCALING_COUNT)
self.assertEqual(count_prop.value,
{'get_param': [expected_count_name]})
removal_prop = group_res.properties[1]
self.assertEqual(removal_prop.name, plan.PROPERTY_REMOVAL_POLICIES)
self.assertEqual(removal_prop.value,
{'get_param': [expected_removal_name]})
def_prop = group_res.properties[2]
self.assertEqual(def_prop.name, plan.PROPERTY_RESOURCE_DEFINITION)
self.assertTrue(isinstance(def_prop.value, heat.Resource))
# Verify Environment Parameters
self.assertEqual(4, len(p.environment.parameters))
count_param = p.environment.parameters[2]
self.assertEqual(count_param.name, expected_count_name)
self.assertEqual(count_param.value, '1')
removal_param = p.environment.parameters[3]
self.assertEqual(removal_param.name, expected_removal_name)
self.assertEqual(removal_param.value, [])
def test_remove_template(self):
# Setup & Sanity Check
p = plan.DeploymentPlan(add_scaling=False)
t = self._generate_template()
p.add_template('ns1', t, 'template-1.yaml')
p.add_template('ns2', t, 'template-2.yaml')
self.assertEqual(4, len(p.master_template.parameters))
self.assertEqual(0, len(p.master_template.outputs))
self.assertEqual(2, len(p.master_template.resources))
self.assertEqual(4, len(p.environment.parameters))
self.assertEqual(2, len(p.environment.registry_entries))
# Test
p.remove_template('ns1')
# Verify
self.assertEqual(2, len(p.master_template.parameters))
self.assertEqual(0, len(p.master_template.outputs))
self.assertEqual(1, len(p.master_template.resources))
self.assertEqual(2, len(p.environment.parameters))
self.assertEqual(1, len(p.environment.registry_entries))
def test_set_value(self):
# Setup
p = plan.DeploymentPlan()
set_me = heat.EnvironmentParameter('p1', 'v1')
p.environment.add_parameter(set_me)
# Test
p.set_value('p1', 'v2')
# Verify
self.assertEqual(p.environment.find_parameter_by_name('p1').value,
'v2')
def test_set_value_missing_parameter(self):
# Setup
p = plan.DeploymentPlan()
# Test
self.assertRaises(ValueError, p.set_value, 'missing', 'irrelevant')
def _generate_template(self):
t = heat.Template()
t.add_parameter(heat.Parameter('param-1', 'type-1'))
t.add_parameter(heat.Parameter('param-2', 'type-2'))
t.add_output(heat.Output('out-1', 'value-1'))
t.add_output(heat.Output('out-2', 'value-2'))
return t
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.video.transcoder_v1.services.transcoder_service import pagers
from google.cloud.video.transcoder_v1.types import resources
from google.cloud.video.transcoder_v1.types import services
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from .transports.base import TranscoderServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import TranscoderServiceGrpcAsyncIOTransport
from .client import TranscoderServiceClient
class TranscoderServiceAsyncClient:
"""Using the Transcoder API, you can queue asynchronous jobs for
transcoding media into various output formats. Output formats
may include different streaming standards such as HTTP Live
Streaming (HLS) and Dynamic Adaptive Streaming over HTTP (DASH).
You can also customize jobs using advanced features such as
Digital Rights Management (DRM), audio equalization, content
concatenation, and digital ad-stitch ready content generation.
"""
_client: TranscoderServiceClient
DEFAULT_ENDPOINT = TranscoderServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = TranscoderServiceClient.DEFAULT_MTLS_ENDPOINT
job_path = staticmethod(TranscoderServiceClient.job_path)
parse_job_path = staticmethod(TranscoderServiceClient.parse_job_path)
job_template_path = staticmethod(TranscoderServiceClient.job_template_path)
parse_job_template_path = staticmethod(
TranscoderServiceClient.parse_job_template_path
)
common_billing_account_path = staticmethod(
TranscoderServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
TranscoderServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(TranscoderServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
TranscoderServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
TranscoderServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
TranscoderServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(TranscoderServiceClient.common_project_path)
parse_common_project_path = staticmethod(
TranscoderServiceClient.parse_common_project_path
)
common_location_path = staticmethod(TranscoderServiceClient.common_location_path)
parse_common_location_path = staticmethod(
TranscoderServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TranscoderServiceAsyncClient: The constructed client.
"""
return TranscoderServiceClient.from_service_account_info.__func__(TranscoderServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TranscoderServiceAsyncClient: The constructed client.
"""
return TranscoderServiceClient.from_service_account_file.__func__(TranscoderServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return TranscoderServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> TranscoderServiceTransport:
"""Returns the transport used by the client instance.
Returns:
TranscoderServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(TranscoderServiceClient).get_transport_class, type(TranscoderServiceClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, TranscoderServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the transcoder service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.TranscoderServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = TranscoderServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_job(
self,
request: Union[services.CreateJobRequest, dict] = None,
*,
parent: str = None,
job: resources.Job = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.Job:
r"""Creates a job in the specified region.
.. code-block:: python
from google.cloud.video import transcoder_v1
def sample_create_job():
# Create a client
client = transcoder_v1.TranscoderServiceClient()
# Initialize request argument(s)
job = transcoder_v1.Job()
job.template_id = "template_id_value"
request = transcoder_v1.CreateJobRequest(
parent="parent_value",
job=job,
)
# Make the request
response = client.create_job(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.video.transcoder_v1.types.CreateJobRequest, dict]):
The request object. Request message for
`TranscoderService.CreateJob`.
parent (:class:`str`):
Required. The parent location to create and process this
job. Format: ``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job (:class:`google.cloud.video.transcoder_v1.types.Job`):
Required. Parameters for creating
transcoding job.
This corresponds to the ``job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.video.transcoder_v1.types.Job:
Transcoding job resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, job])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = services.CreateJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if job is not None:
request.job = job
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_job,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_jobs(
self,
request: Union[services.ListJobsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListJobsAsyncPager:
r"""Lists jobs in the specified region.
.. code-block:: python
from google.cloud.video import transcoder_v1
def sample_list_jobs():
# Create a client
client = transcoder_v1.TranscoderServiceClient()
# Initialize request argument(s)
request = transcoder_v1.ListJobsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_jobs(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.video.transcoder_v1.types.ListJobsRequest, dict]):
The request object. Request message for
`TranscoderService.ListJobs`. The parent location from
which to retrieve the collection of jobs.
parent (:class:`str`):
Required. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.video.transcoder_v1.services.transcoder_service.pagers.ListJobsAsyncPager:
Response message for TranscoderService.ListJobs.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = services.ListJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_jobs,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListJobsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def get_job(
self,
request: Union[services.GetJobRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.Job:
r"""Returns the job data.
.. code-block:: python
from google.cloud.video import transcoder_v1
def sample_get_job():
# Create a client
client = transcoder_v1.TranscoderServiceClient()
# Initialize request argument(s)
request = transcoder_v1.GetJobRequest(
name="name_value",
)
# Make the request
response = client.get_job(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.video.transcoder_v1.types.GetJobRequest, dict]):
The request object. Request message for
`TranscoderService.GetJob`.
name (:class:`str`):
Required. The name of the job to retrieve. Format:
``projects/{project}/locations/{location}/jobs/{job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.video.transcoder_v1.types.Job:
Transcoding job resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = services.GetJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_job,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def delete_job(
self,
request: Union[services.DeleteJobRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a job.
.. code-block:: python
from google.cloud.video import transcoder_v1
def sample_delete_job():
# Create a client
client = transcoder_v1.TranscoderServiceClient()
# Initialize request argument(s)
request = transcoder_v1.DeleteJobRequest(
name="name_value",
)
# Make the request
client.delete_job(request=request)
Args:
request (Union[google.cloud.video.transcoder_v1.types.DeleteJobRequest, dict]):
The request object. Request message for
`TranscoderService.DeleteJob`.
name (:class:`str`):
Required. The name of the job to delete. Format:
``projects/{project}/locations/{location}/jobs/{job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = services.DeleteJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_job,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
async def create_job_template(
self,
request: Union[services.CreateJobTemplateRequest, dict] = None,
*,
parent: str = None,
job_template: resources.JobTemplate = None,
job_template_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.JobTemplate:
r"""Creates a job template in the specified region.
.. code-block:: python
from google.cloud.video import transcoder_v1
def sample_create_job_template():
# Create a client
client = transcoder_v1.TranscoderServiceClient()
# Initialize request argument(s)
request = transcoder_v1.CreateJobTemplateRequest(
parent="parent_value",
job_template_id="job_template_id_value",
)
# Make the request
response = client.create_job_template(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.video.transcoder_v1.types.CreateJobTemplateRequest, dict]):
The request object. Request message for
`TranscoderService.CreateJobTemplate`.
parent (:class:`str`):
Required. The parent location to create this job
template. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job_template (:class:`google.cloud.video.transcoder_v1.types.JobTemplate`):
Required. Parameters for creating job
template.
This corresponds to the ``job_template`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job_template_id (:class:`str`):
Required. The ID to use for the job template, which will
become the final component of the job template's
resource name.
This value should be 4-63 characters, and valid
characters must match the regular expression
``[a-zA-Z][a-zA-Z0-9_-]*``.
This corresponds to the ``job_template_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.video.transcoder_v1.types.JobTemplate:
Transcoding job template resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, job_template, job_template_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = services.CreateJobTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if job_template is not None:
request.job_template = job_template
if job_template_id is not None:
request.job_template_id = job_template_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_job_template,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_job_templates(
self,
request: Union[services.ListJobTemplatesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListJobTemplatesAsyncPager:
r"""Lists job templates in the specified region.
.. code-block:: python
from google.cloud.video import transcoder_v1
def sample_list_job_templates():
# Create a client
client = transcoder_v1.TranscoderServiceClient()
# Initialize request argument(s)
request = transcoder_v1.ListJobTemplatesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_job_templates(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.video.transcoder_v1.types.ListJobTemplatesRequest, dict]):
The request object. Request message for
`TranscoderService.ListJobTemplates`.
parent (:class:`str`):
Required. The parent location from which to retrieve the
collection of job templates. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.video.transcoder_v1.services.transcoder_service.pagers.ListJobTemplatesAsyncPager:
Response message for TranscoderService.ListJobTemplates.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = services.ListJobTemplatesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_job_templates,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListJobTemplatesAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def get_job_template(
self,
request: Union[services.GetJobTemplateRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.JobTemplate:
r"""Returns the job template data.
.. code-block:: python
from google.cloud.video import transcoder_v1
def sample_get_job_template():
# Create a client
client = transcoder_v1.TranscoderServiceClient()
# Initialize request argument(s)
request = transcoder_v1.GetJobTemplateRequest(
name="name_value",
)
# Make the request
response = client.get_job_template(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.video.transcoder_v1.types.GetJobTemplateRequest, dict]):
The request object. Request message for
`TranscoderService.GetJobTemplate`.
name (:class:`str`):
Required. The name of the job template to retrieve.
Format:
``projects/{project}/locations/{location}/jobTemplates/{job_template}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.video.transcoder_v1.types.JobTemplate:
Transcoding job template resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = services.GetJobTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_job_template,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def delete_job_template(
self,
request: Union[services.DeleteJobTemplateRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a job template.
.. code-block:: python
from google.cloud.video import transcoder_v1
def sample_delete_job_template():
# Create a client
client = transcoder_v1.TranscoderServiceClient()
# Initialize request argument(s)
request = transcoder_v1.DeleteJobTemplateRequest(
name="name_value",
)
# Make the request
client.delete_job_template(request=request)
Args:
request (Union[google.cloud.video.transcoder_v1.types.DeleteJobTemplateRequest, dict]):
The request object. Request message for
`TranscoderService.DeleteJobTemplate`.
name (:class:`str`):
Required. The name of the job template to delete.
``projects/{project}/locations/{location}/jobTemplates/{job_template}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = services.DeleteJobTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_job_template,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-video-transcoder",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("TranscoderServiceAsyncClient",)
|
|
from bson import Code
import datetime
from django.conf import settings
from crits.core.mongo_tools import mongo_connector
from crits.emails.email import Email
from crits.samples.yarahit import YaraHit
from crits.targets.target import Target
from crits.targets.division import Division
def generate_yara_hits():
"""
Generate yara hits mapreduce.
"""
samples = mongo_connector(settings.COL_SAMPLES)
map_code = """
function() {
this.analysis.forEach(function(z) {
if ("results" in z && z.service_name == "yara") {
z.results.forEach(function(x) {
emit({engine: z.service_name, version: x.version, result: x.result} ,{count: 1});
})
}
})
}
"""
m = Code(map_code, {})
r = Code('function(k,v) { var count=0; v.forEach(function(v) { count += v["count"]; }); return {count: count}; }', {})
try:
yarahits = samples.inline_map_reduce(m, r,
query={'analysis.service_name': 'yara'})
except:
return
yarahits_col = mongo_connector(settings.COL_YARAHITS)
yarahits_col.drop()
sv = YaraHit._meta['latest_schema_version']
for hit in yarahits:
yarahits_col.update({'engine': hit["_id"]["engine"],
"version": hit["_id"]["version"],
"result": hit["_id"]["result"]},
{"$set": {"sample_count": hit["value"]["count"],
"schema_version": sv}},
True,
False)
def generate_sources():
"""
Generate sources mapreduce.
"""
samples = mongo_connector(settings.COL_SAMPLES)
m = Code('function() { this.source.forEach(function(z) {emit({name: z.name}, {count: 1});}) }', {})
r = Code('function(k,v) { var count=0; v.forEach(function(v) { count += v["count"]; }); return {count: count}; }', {})
try:
sources = samples.inline_map_reduce(m,r,
query={"source.name": {"$exists": 1}})
except:
return
source_access = mongo_connector(settings.COL_SOURCE_ACCESS)
for source in sources:
source_access.update({"name": source["_id"]["name"]},
{"$set": {"sample_count": source["value"]["count"]}})
def generate_filetypes():
"""
Generate filetypes mapreduce.
"""
samples = mongo_connector(settings.COL_SAMPLES)
m = Code('function() emit({filetype: this.mimetype} ,{count: 1});}) }', {})
r = Code('function(k,v) { var count = 0; v.forEach(function(v) { count += v["count"]; }); return {count: count}; }', {})
try:
samples.map_reduce(m,r, settings.COL_FILETYPES)
except:
return
def generate_backdoors():
"""
Generate backdoors mapreduce.
"""
samples = mongo_connector(settings.COL_SAMPLES)
m = Code('function() emit({name: this.backdoor.name} ,{count: 1});}) }', {})
r = Code('function(k,v) { var count = 0; v.forEach(function(v) { count += v["count"]; }); return {count: count}; }', {})
try:
backdoors = samples.inline_map_reduce(m,r,
query={"backdoor.name": {"$ne": "None"}})
except:
return
backdoor_details = mongo_connector(settings.COL_BACKDOOR_DETAILS)
for backdoor in backdoors:
backdoor_details.update({"name": backdoor["_id"]["name"]},
{"$set": {"sample_count": backdoor["value"]["count"]}})
def generate_exploits():
"""
Generate exploits mapreduce.
"""
samples = mongo_connector(settings.COL_SAMPLES)
m = Code('function() { this.exploit.forEach(function(z) {emit({cve: z.cve} ,{count: 1});}) }', {})
r = Code('function(k,v) { var count = 0; v.forEach(function(v) { count += v["count"]; }); return {count: count}; }', {})
try:
exploits = samples.inline_map_reduce(m,r,
query={"exploit.cve": {"$exists": 1}})
except:
return
exploit_details = mongo_connector(settings.COL_EXPLOIT_DETAILS)
for exploit in exploits:
exploit_details.update({"name": exploit["_id"]["cve"]},
{"$set": {"sample_count": exploit["value"]["count"]}})
def zero_campaign():
"""
Zero out the campaign counts before recalculating.
"""
return {
'indicator_count': 0,
'sample_count': 0,
'email_count': 0,
'domain_count': 0,
'event_count': 0,
'ip_count': 0,
'pcap_count': 0,
}
def update_results(collection, m, r, stat_query, field, campaign_stats):
"""
Update campaign results.
:param collection: The collection to get campaign results for.
:type collection: str
:param m: The map.
:type m: :class:`bson.Code`
:param r: The reduce.
:type r: :clas:`bson.Code`
:param stat_query: The query to use in the mapreduce.
:type stat_query: dict
:param field: The field to update.
:type field: str
:param campaign_stats: The campaign stats.
:type campaign_stats: dict
:returns: dict
"""
if collection.find().count() > 0:
results = collection.inline_map_reduce(m,r, query=stat_query)
for result in results:
if result["_id"] != None:
if result["_id"] not in campaign_stats:
campaign_stats[result["_id"]] = zero_campaign()
campaign_stats[result["_id"]][field] = result["value"]["count"]
return campaign_stats
def generate_campaign_stats(source_name=None):
"""
Generate campaign stats.
:param source_name: Limit to a specific source.
:type source_name: None, str
"""
# build the query used in the mapreduces
stat_query = {}
stat_query["campaign.name"] = {"$exists": "true"}
if source_name:
stat_query["source.name"] = source_name
campaigns = mongo_connector(settings.COL_CAMPAIGNS)
domains = mongo_connector(settings.COL_DOMAINS)
emails = mongo_connector(settings.COL_EMAIL)
events = mongo_connector(settings.COL_EVENTS)
indicators = mongo_connector(settings.COL_INDICATORS)
ips = mongo_connector(settings.COL_IPS)
pcaps = mongo_connector(settings.COL_PCAPS)
samples = mongo_connector(settings.COL_SAMPLES)
# generate an initial campaign listing so we can make sure all campaigns get updated
campaign_listing = campaigns.find({}, {'name': 1})
# initialize each campaign to zeroed out stats
campaign_stats = {}
for campaign in campaign_listing:
campaign_stats[campaign["name"]] = zero_campaign()
mapcode = """
function() {
if ("campaign" in this) {
campaign_list = this.campaign; }
if (campaign_list.length > 0) {
campaign_list.forEach(function(c) {
emit(c.name, {count: 1}); }); }
}
"""
m = Code(mapcode, {})
r = Code('function(k,v) { var count = 0; v.forEach(function(v) { count += v["count"]; }); return {count: count}; }', {})
campaign_stats = update_results(domains, m, r, stat_query,
"domain_count", campaign_stats)
campaign_stats = update_results(emails, m, r, stat_query,
"email_count", campaign_stats)
campaign_stats = update_results(events, m, r, stat_query,
"event_count", campaign_stats)
campaign_stats = update_results(indicators, m, r, stat_query,
"indicator_count", campaign_stats)
campaign_stats = update_results(ips, m, r, stat_query,
"ip_count", campaign_stats)
campaign_stats = update_results(pcaps, m, r, stat_query,
"pcap_count", campaign_stats)
campaign_stats = update_results(samples, m, r, stat_query,
"sample_count", campaign_stats)
# update all of the campaigns here
for campaign in campaign_stats.keys():
campaigns.update({"name": campaign},
{"$set": campaign_stats[campaign]}, upsert=True)
def generate_counts():
"""
Generate dashboard counts.
"""
counts = mongo_connector(settings.COL_COUNTS)
samples = mongo_connector(settings.COL_SAMPLES)
emails = mongo_connector(settings.COL_EMAIL)
indicators = mongo_connector(settings.COL_INDICATORS)
domains = mongo_connector(settings.COL_DOMAINS)
pcaps = mongo_connector(settings.COL_PCAPS)
today = datetime.datetime.fromordinal(datetime.datetime.now().toordinal())
start = datetime.datetime.now()
last_seven = start - datetime.timedelta(7)
last_thirty = start - datetime.timedelta(30)
count = {}
count['Samples'] = samples.find().count()
count['Emails'] = emails.find().count()
count['Indicators'] = indicators.find().count()
count['PCAPs'] = pcaps.find().count()
count['Domains'] = domains.find().count()
count['Emails Today'] = emails.find({"source.instances.date": {"$gte": today}}).count()
count['Emails Last 7'] = emails.find({'source.instances.date': {'$gte': last_seven}}).count()
count['Emails Last 30'] = emails.find({'source.instances.date': {'$gte': last_thirty}}).count()
count['Indicators Today'] = indicators.find({"source.instances.date": {"$gte": today}}).count()
count['Indicators Last 7'] = indicators.find({"source.instances.date": {"$gte": last_seven}}).count()
count['Indicators Last 30'] = indicators.find({"source.instances.date": {"$gte": last_thirty}}).count()
counts.update({'name': "counts"}, {'$set': {'counts': count}}, upsert=True)
def target_user_stats():
"""
Generate targets from email To/CC fields, then generate divisions from
targets list.
No cleanup or logic is being done on the To/CC fields. If they are not
valid email addresses (user@domain), they do not get added as a target.
"""
mapcode = """
function () {
try {
this.to.forEach(function(z) {
emit(z.toLowerCase(), {count: 1});
});
} catch(err) {}
}
"""
reducecode = """
function(k,v) {
var count = 0;
v.forEach(function(v) {
count += v["count"];
});
return {count: count};
}
"""
m = Code(mapcode)
r = Code(reducecode)
results = Email.objects(to__exists=True).map_reduce(m, r, 'inline')
for result in results:
try:
targs = Target.objects(email_address__iexact=result.key)
if not targs:
targs = [Target()]
targs[0].email_address = result.key.strip().lower()
for targ in targs:
targ.email_count = result.value['count']
targ.save()
except:
pass
mapcode = """
function() {
if ("division" in this) {
emit(this.division, {count: this.email_count})
}
}
"""
m = Code(mapcode)
try:
results = Target.objects().map_reduce(m, r, 'inline')
for result in results:
div = Division.objects(division__iexact=result.key).first()
if not div:
div = Division()
div.division = result.key
div.email_count = result.value['count']
div.save()
except:
raise
def campaign_date_stats():
"""
Generate Campaign date stats.
"""
emails = mongo_connector(settings.COL_EMAIL)
mapcode = """
function () {
try {
if ("campaign" in this) {
stats = {};
if ("isodate" in this) {
var d = new Date(this.isodate);
stats[new Date(d.getFullYear(), d.getMonth()).getTime()] = 1;
}
else {
stats[new Date(this.source[0].instances[0].date.getFullYear(), this.source[0].instances[0].date.getMonth()).getTime()] = 1;
}
emit({campaign:this.campaign[0].name}, stats);
} }
catch (err) {}
}
"""
reducecode = """
function reduce(key, values) {
var out = {};
function merge(a, b) {
for (var k in b) {
if (!b.hasOwnProperty(k)) {
continue;
}
a[k] = (a[k] || 0) + b[k];
}
}
for (var i=0; i < values.length; i++) {
merge(out, values[i]);
}
return out;
}
"""
m = Code(mapcode, {})
r = Code(reducecode, {})
results = emails.inline_map_reduce(m, r)
stat_coll = mongo_connector(settings.COL_STATISTICS)
stats = {}
stats["results"] = []
for result in results:
stats["results"].append({
"campaign": result["_id"]["campaign"],
"value": result["value"]
})
stat_coll.update({'name': 'campaign_monthly'}, {"$set": stats},
upsert=True)
|
|
# Copyright 2019 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division
import re
from itertools import takewhile
from datetime import timedelta
from devlib.collector import (CollectorBase, CollectorOutput,
CollectorOutputEntry)
from devlib.target import KernelConfigTristate
from devlib.exception import TargetStableError
class KernelLogEntry(object):
"""
Entry of the kernel ring buffer.
:param facility: facility the entry comes from
:type facility: str
:param level: log level
:type level: str
:param timestamp: Timestamp of the entry
:type timestamp: datetime.timedelta
:param msg: Content of the entry
:type msg: str
"""
_TIMESTAMP_MSG_REGEX = re.compile(r'\[(.*?)\] (.*)')
_RAW_LEVEL_REGEX = re.compile(r'<([0-9]+)>(.*)')
_PRETTY_LEVEL_REGEX = re.compile(r'\s*([a-z]+)\s*:([a-z]+)\s*:\s*(.*)')
def __init__(self, facility, level, timestamp, msg):
self.facility = facility
self.level = level
self.timestamp = timestamp
self.msg = msg
@classmethod
def from_str(cls, line):
"""
Parses a "dmesg --decode" output line, formatted as following:
kern :err : [3618282.310743] nouveau 0000:01:00.0: systemd-logind[988]: nv50cal_space: -16
Or the more basic output given by "dmesg -r":
<3>[3618282.310743] nouveau 0000:01:00.0: systemd-logind[988]: nv50cal_space: -16
"""
def parse_raw_level(line):
match = cls._RAW_LEVEL_REGEX.match(line)
if not match:
raise ValueError('dmesg entry format not recognized: {}'.format(line))
level, remainder = match.groups()
levels = DmesgCollector.LOG_LEVELS
# BusyBox dmesg can output numbers that need to wrap around
level = levels[int(level) % len(levels)]
return level, remainder
def parse_pretty_level(line):
match = cls._PRETTY_LEVEL_REGEX.match(line)
facility, level, remainder = match.groups()
return facility, level, remainder
def parse_timestamp_msg(line):
match = cls._TIMESTAMP_MSG_REGEX.match(line)
timestamp, msg = match.groups()
timestamp = timedelta(seconds=float(timestamp.strip()))
return timestamp, msg
line = line.strip()
# If we can parse the raw prio directly, that is a basic line
try:
level, remainder = parse_raw_level(line)
facility = None
except ValueError:
facility, level, remainder = parse_pretty_level(line)
timestamp, msg = parse_timestamp_msg(remainder)
return cls(
facility=facility,
level=level,
timestamp=timestamp,
msg=msg.strip(),
)
@classmethod
def from_dmesg_output(cls, dmesg_out):
"""
Return a generator of :class:`KernelLogEntry` for each line of the
output of dmesg command.
.. note:: The same restrictions on the dmesg output format as for
:meth:`from_str` apply.
"""
for line in dmesg_out.splitlines():
if line.strip():
yield cls.from_str(line)
def __str__(self):
facility = self.facility + ': ' if self.facility else ''
return '{facility}{level}: [{timestamp}] {msg}'.format(
facility=facility,
level=self.level,
timestamp=self.timestamp.total_seconds(),
msg=self.msg,
)
class DmesgCollector(CollectorBase):
"""
Dmesg output collector.
:param level: Minimum log level to enable. All levels that are more
critical will be collected as well.
:type level: str
:param facility: Facility to record, see dmesg --help for the list.
:type level: str
.. warning:: If BusyBox dmesg is used, facility and level will be ignored,
and the parsed entries will also lack that information.
"""
# taken from "dmesg --help"
# This list needs to be ordered by priority
LOG_LEVELS = [
"emerg", # system is unusable
"alert", # action must be taken immediately
"crit", # critical conditions
"err", # error conditions
"warn", # warning conditions
"notice", # normal but significant condition
"info", # informational
"debug", # debug-level messages
]
def __init__(self, target, level=LOG_LEVELS[-1], facility='kern'):
super(DmesgCollector, self).__init__(target)
if not target.is_rooted:
raise TargetStableError('Cannot collect dmesg on non-rooted target')
self.output_path = None
if level not in self.LOG_LEVELS:
raise ValueError('level needs to be one of: {}'.format(
', '.join(self.LOG_LEVELS)
))
self.level = level
# Check if dmesg is the BusyBox one, or the one from util-linux in a
# recent version.
# Note: BusyBox dmesg does not support -h, but will still print the
# help with an exit code of 1
self.basic_dmesg = '--force-prefix' not in \
self.target.execute('dmesg -h', check_exit_code=False)
self.facility = facility
self.needs_root = bool(target.config.typed_config.get(
'CONFIG_SECURITY_DMESG_RESTRICT', KernelConfigTristate.NO))
self.reset()
@property
def entries(self):
return KernelLogEntry.from_dmesg_output(self.dmesg_out)
def reset(self):
self.dmesg_out = None
def start(self):
self.reset()
# Empty the dmesg ring buffer. This requires root in all cases
self.target.execute('dmesg -c', as_root=True)
def stop(self):
levels_list = list(takewhile(
lambda level: level != self.level,
self.LOG_LEVELS
))
levels_list.append(self.level)
if self.basic_dmesg:
cmd = 'dmesg -r'
else:
cmd = 'dmesg --facility={facility} --force-prefix --decode --level={levels}'.format(
levels=','.join(levels_list),
facility=self.facility,
)
self.dmesg_out = self.target.execute(cmd, as_root=self.needs_root)
def set_output(self, output_path):
self.output_path = output_path
def get_data(self):
if self.output_path is None:
raise RuntimeError("Output path was not set.")
with open(self.output_path, 'wt') as f:
f.write(self.dmesg_out + '\n')
return CollectorOutput([CollectorOutputEntry(self.output_path, 'file')])
|
|
import os
import re
import sys
import time
import fcntl
import socket
import select
import signal
import subprocess
import paramiko
import weakref
from contextlib import contextmanager
from unix.processes import Processes as _Processes
from unix.path import Path as _Path, escape
from unix.remote import Remote as _Remote
from unix.users import Users as _Users
from unix.groups import Groups as _Groups
from paramiko.py3compat import u, b
if sys.version_info.major < 3:
from pipes import quote
else:
from shlex import quote
#
# Logs.
#
import logging
logger = logging.getLogger('unix')
logger.setLevel('NOTSET')
cli_handler = logging.StreamHandler()
cli_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
logger.addHandler(cli_handler)
#
# Utils functions.
#
def instances(host):
return list(reversed([elt.__name__.replace('Host', '')
for elt in host.__class__.mro()[:-2]]))
def ishost(host, value):
return True if value in instances(host) else False
def isvalid(host):
if instances(host)[0] not in ('Local', 'Remote'):
raise ValueError("this is not a 'Local' or a 'Remote' host")
#
# Constants
#
# Available controls with their defaults values.
_CONTROLS = {'options_place': 'before',
'locale': 'en_US.utf-8',
'decode': 'utf-8',
'envs': {},
'timeout': 0,
'escape_args': True,
'shell': None,
'su': None}
# Errors.
_HOST_CLASS_ERR = ("don't use 'Host' class directly, use 'Local' or "
"'Remote' class instead.")
_NOT_CONNECTED_ERR = 'you are not connected'
_IP_ERR = 'unable to get an IPv4 or an IPv6 addresse.'
# Regular expression for matching IPv4 address.
_IPV4 = re.compile(r'^[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}$')
# Regular expression for matching IPv6 address.
_IPV6 = re.compile(r'^[0-9a-fA-F]{1,4}:[0-9a-fA-F]{1,4}:[0-9a-fA-F]{1,4}:'
'[0-9a-fA-F]{1,4}:[0-9a-fA-F]{1,4}:[0-9a-fA-F]{1,4}:'
'[0-9a-fA-F]{1,4}:[0-9a-fA-F]{1,4}$')
#
# Exceptions.
#
class UnixError(Exception):
pass
class TimeoutError(Exception):
pass
class timeout:
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
if self.seconds != 0:
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
if self.seconds != 0:
signal.alarm(0)
#
# Abstract class for managing a host.
#
class Host(object):
"""Class that implement commands that are commons to local or remote
host."""
def __init__(self):
self.return_code = -1
for control, value in _CONTROLS.items():
setattr(self, '_%s' % control, value)
@property
def path(self):
return _Path(weakref.ref(self)())
@property
def remote(self):
return _Remote(weakref.ref(self)())
@property
def users(self):
return _Users(weakref.ref(self)())
@property
def groups(self):
return _Groups(weakref.ref(self)())
@property
def processes(self):
return _Processes(weakref.ref(self)())
@property
def controls(self):
return {control: getattr(self, '_%s' % control) for control in _CONTROLS}
def get_control(self, control):
if control not in _CONTROLS:
raise UnixError("invalid control '%s'" % control)
return getattr(self, '_%s' % control)
def set_control(self, control, value):
if control not in _CONTROLS:
raise UnixError("invalid control '%s'" % control)
setattr(self, '_%s' % control, value)
@contextmanager
def set_controls(self, **controls):
cur_controls = dict(self.controls)
try:
for control, value in controls.items():
self.set_control(control, value)
yield None
finally:
for control, value in cur_controls.items():
self.set_control(control, value)
def _format_command(self, cmd, args, options):
command = []
args = [quote(arg) if self._escape_args else arg for arg in args]
# Get environments variables (from 'locale' and 'envs' controls).
envs = ({var: self._locale for var in ('LC_ALL', 'LANGUAGE', 'LANG')}
if self._locale
else {})
envs.update(self._envs)
# For CSH shell, we need to declare environments variables with 'env' keyword.
if envs and (self._shell or self.default_shell) == 'csh':
command.append('env')
command.extend('%s=%s' % (var, value) for var, value in sorted(envs.items()))
# Add command to execute.
command.append(cmd)
# Get specials options.
stdin = options.pop('STDIN', None)
stdout = options.pop('STDOUT', None)
stderr = options.pop('STDERR', None)
# Add arguments before options if 'options_place' control is set to 'after'.
if self._options_place == 'after':
command.extend([str(arg) for arg in args])
# Add options.
for option, value in options.items():
option = ('-%s' % option
if len(option) == 1
else '--%s' % option.replace('_', '-'))
if not isinstance(value, (list, tuple, set)):
value = [value]
command.extend(
option if isinstance(val, bool) else '{:s} {:s}'.format(option, val)
for val in value
if val)
# Add arguments now if 'options_place' control is set to 'before' (the default).
if self._options_place == 'before':
command.extend(args)
if stdin:
command.append(' < %s' % stdin)
if stdout:
command.append(' >> {:s}'.format(stdout[1:])
if stdout.startswith('+')
else ' > {:s}'.format(stdout))
if stderr:
command.append(' 2>> {:s}'.format(stderr[1:])
if stderr.startswith('+')
else ' 2> {:s}'.format(stderr))
command = ' '.join(map(str, command))
if self._shell:
command = '%s -c %s' % (self._shell, quote(command))
if self._su:
command = 'su - %s -c %s' % (self._su, quote(command))
logger.debug('[execute] %s' % command)
return command
def _manage_encoding(self, output):
return u(output, self._decode) if self._decode else b(output)
def execute(self):
raise NotImplementedError(_HOST_CLASS_ERR)
@property
def type(self):
"""Property that return the type of the operating system by executing
``uname -s`` command."""
return self.execute('uname', s=True)[1].splitlines()[0].lower()
@property
def arch(self):
"""Property that return the architecture of the operating system by
executing ``uname -m`` command."""
return self.execute('uname', m=True)[1].splitlines()[0]
@property
def hostname(self):
return self.execute('hostname')[1].splitlines()[0]
def list(self, path, **opts):
status, stdout, stderr = self.execute('ls', escape(path), **opts)
if not status:
raise OSError(stderr)
return stdout
def listdir(self, path, hidden=False):
"""List files in a directory.
.. note::
As the exception raised is different when using local function
``os.listdir(path)`` or remote function ``sftp.listdir(path)``, this
method use ``ls`` command for listing directory and raise the
**IOError** exception if **path** not exists or **OSError** if there
is another unexpected error.
"""
if not self.path.exists(path):
raise IOError("'%s' not exists" % path)
if not self.path.isdir(path):
raise IOError("'%s' is not a directory" % path)
# ls -1 allows to have one file per line.
opts = {'1': True}
if hidden:
opts.update(a=True)
return [filename
for filename in self.list(path, **opts).splitlines()
if filename not in ('.', '..')]
def touch(self, *paths, **options):
paths = [escape(path) for path in paths]
return self.execute('touch', *paths, **options)
def mkdir(self, *paths, **options):
"""Create a directory. *args and **options contains options that can be
passed to the command. **options can contain an additionnal key
*INTERACTIVE* that will be pass to ``execute`` function."""
paths = [escape(path) for path in paths]
return self.execute('mkdir', *paths, **options)
def copy(self, *paths, **options):
"""Copy **src** file or directory to **dst**. *paths and **options
contains options that can be passed to the command. **options can
contain an additionnal key *INTERACTIVE* that will be pass to
``execute`` function."""
paths = [escape(path) for path in paths]
return self.execute('cp', *paths, **options)
def move(self, *paths, **options):
paths = [escape(path) for path in paths]
return self.execute('mv', *paths, **options)
def remove(self, *paths, **options):
paths = [escape(path) for path in paths]
return self.execute('rm', *paths, **options)
def chmod(self, permissions, *paths, **options):
paths = [escape(path) for path in paths]
return self.execute('chmod', permissions, *paths, **options)
def chown(self, owner, *paths, **options):
paths = [escape(path) for path in paths]
return self.execute('chown', owner, *paths, **options)
def chgrp(self, group, *paths, **options):
paths = [escape(path) for path in paths]
return self.execute('chgrp', group, *path, **options)
def which(self, command, **options):
try:
return self.execute('which', command, **options)[1].splitlines()[0]
except IndexError:
raise UnixError("which: unable to find command '%s'" % command)
def read(self, filepath):
with self.open(filepath) as fhandler:
return fhandler.read().decode()
def write(self, filepath, content):
with self.open(filepath, 'w') as fhandler:
fhandler.write(content)
def mount(self, device, mount_point, **options):
mount_point = escape(mount_point)
return self.execute('mount', device, mount_point, **options)
def umount(self, mount_point, **options):
mount_point = escape(mount_point)
return self.execute('umount', mount_point, **options)
@contextmanager
def mountfs(self, device, mount_point, **options):
try:
self.mount(device, mount_point, **options)
yield None
finally:
self.umount(mount_point)
def replace(self, filepath, pattern, replacement, backup=None):
with self.open(filepath) as fhandler:
new_content = re.sub(pattern, replacement, fhandler.read().decode())
if backup:
if not self.copy(filepath, '%s.%s' % (filepath, backup)):
return [False, '', 'unable to backup file']
with self.open(filepath, 'w') as fhandler:
fhandler.write(new_content)
return [True, '', '']
#
# Class for managing localhost (subprocess).
#
class Local(Host):
"""Implementing specifics functions of localhost."""
def __init__(self):
Host.__init__(self)
# Get the default shell (without using any environments variables as
# shells differ for managing them).
with self.set_controls(locale='', envs={}):
self.default_shell = self.execute('echo $0')[1].strip()
@staticmethod
def clone(host):
new_host = Local()
new_host.__dict__.update(return_code=host.return_code)
new_host.__dict__.update(host.controls)
new_host.__dict__.update(default_shell=host.default_shell)
return new_host
@property
def username(self):
return self.users.username(os.getuid())
def is_connected(self):
pass
def execute(self, command, *args, **options):
"""Function that execute a command using english utf8 locale. The output
is a list of three elements: a boolean representing the status of the
command (True if return code equal to 0), the standard output (stdout)
and the error output (stderr). If **INTERACTIVE**, the command is
executed interactively (printing output in real time and waiting for
inputs) and stdout and stderr are empty. The return code of the last
command is put in *return_code* attribut."""
command = self._format_command(command, args, options)
with timeout(self._timeout):
try:
obj = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = obj.communicate()
self.return_code = obj.returncode
return [True if self.return_code == 0 else False,
self._manage_encoding(stdout),
self._manage_encoding(stderr)]
except OSError as err:
return [False,
self._manage_encoding(''),
self._manage_encoding(str(err))]
def interactive(self, command, *args, **options):
"""
"""
command = self._format_command(command, args, options)
self.return_code = subprocess.call(command,
shell=True,
stderr=subprocess.STDOUT)
def iter(self, command, *args, **options):
"""
"""
command = self._format_command(command, args, options)
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
# Prevent read* methods on the stdout and stderr buffers to wait for
# new data (ie: set stdout and stderr files in non-blocking mode).
stdout_flags = fcntl.fcntl(process.stdout, fcntl.F_GETFL)
fcntl.fcntl(process.stdout, fcntl.F_SETFL, stdout_flags | os.O_NDELAY)
stderr_flags = fcntl.fcntl(process.stderr, fcntl.F_GETFL)
fcntl.fcntl(process.stderr, fcntl.F_SETFL, stderr_flags | os.O_NDELAY)
while process.poll() is None:
ready = select.select([process.stdout, process.stderr], [], [])
if process.stdout in ready[0]:
for line in process.stdout.read().splitlines():
yield (u'stdout', self._manage_encoding(line))
if process.stderr in ready[0]:
for line in process.stderr.read().splitlines():
yield (u'stderr', self._manage_encoding(line))
self.return_code = process.returncode
yield (u'status', True if self.return_code == 0 else False)
def open(self, filepath, mode='r'):
# For compatibility with SFTPClient object, the file is always open
# in binary mode.
if 'b' not in mode:
mode += 'b'
return open(filepath, mode)
def tail(self, filepath, delta=1):
prev_size = os.stat(filepath).st_size
while 1:
cur_size = os.stat(filepath).st_size
# File has been rotate.
if cur_size < prev_size:
with self.open(filepath) as fhandler:
for line in fhandler.read().splitlines():
yield line
else:
with self.open(filepath) as fhandler:
fhandler.seek(prev_size, 0)
for line in fhandler.read().splitlines():
yield line
prev_size = cur_size
time.sleep(delta)
#
# Context Manager for connecting to a remote host.
#
class connect(object):
def __init__(self, host, force_ssh=False, **kwargs):
self.hostname = host
self.options = kwargs
self.force_ssh = force_ssh
def __enter__(self):
if self.hostname == 'localhost' and not self.force_ssh:
self._host = Local()
else:
self._host = Remote()
self._host.connect(self.hostname, **self.options)
return self._host
def __exit__(self, type, value, traceback):
if self.hostname != 'localhost':
self._host.disconnect()
del self._host
#
# Class for managing a remote host with SSH (paramiko).
#
class Remote(Host):
def __init__(self):
Host.__init__(self)
self.forward_agent = True
self.ip = None
self.ipv4 = None
self.ipv6 = None
self.fqdn = None
self.username = None
self._conn = None
@staticmethod
def clone(host):
"""Static method for enforcing clone of this object."""
new_host = Remote()
new_host.__dict__.update(return_code=host.return_code)
new_host.__dict__.update(host.controls)
attrs = ('ip', 'ipv4', 'ipv6', 'fqdn', 'username', 'default_shell')
new_host.__dict__.update({attr: getattr(host, attr) for attr in attrs})
if hasattr(host, '_conn'):
new_host.__dict__.update(_conn=host._conn)
return new_host
def __ipv4(self):
try:
return socket.getaddrinfo(self.fqdn, 22, 2, 1, 6)[0][4][0]
except socket.gaierror:
return None
def __ipv6(self):
try:
return socket.getaddrinfo(self.fqdn, 22, 10, 1, 6)[0][4][0]
except socket.gaierror:
return None
def __fqdn(self):
try:
if self.ipv4:
return socket.gethostbyaddr(self.ipv4)[0]
elif self.ipv6:
return socket.gethostbyadd(self.ipv6)[0]
else:
return None
except socket.herror:
return None
def connect(self, host, **kwargs):
keepalive = kwargs.pop('keepalive', 0)
self.forward_agent = kwargs.pop('forward_agent', True)
self.username = kwargs.pop('username', 'root')
if _IPV4.match(host):
self.ipv4 = host
self.fqdn = self.__fqdn()
self.ipv6 = self.__ipv6()
elif _IPV6.match(host):
self.ipv6 = host
self.ipv4 = self.__ipv4()
self.fqdn = self.__fqdn()
else:
self.fqdn = host
self.ipv4 = self.__ipv4()
self.ipv6 = self.__ipv6()
self.fqdn = self.__fqdn()
if not self.ipv4 and not self.ipv6:
raise UnixError(_IP_ERR)
self.ip = (self.ipv6 if self.ipv6 and kwargs.pop('ipv6', False)
else self.ipv4)
params = {'username': self.username}
for param, value in kwargs.items():
params[param] = value
self._conn = paramiko.SSHClient()
try:
self._conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self._conn.connect(self.ip, **params)
except Exception as err:
raise UnixError(err)
# Add keepalive on connection.
self._conn.get_transport().set_keepalive(keepalive)
# Optimizations for file transfert
# (see https://github.com/paramiko/paramiko/issues/175)
# From 6Mb/s to 12Mb/s => still very slow (scp = 40Mb/s)!
self._conn.get_transport().window_size = 2147483647
self._conn.get_transport().packetizer.REKEY_BYTES = pow(2, 40)
self._conn.get_transport().packetizer.REKEY_PACKETS = pow(2, 40)
# Get the default shell (without using any environments variables as
# shells differ for managing them).
with self.set_controls(locale='', envs={}):
self.default_shell = self.execute('echo $0')[1].strip()
def disconnect(self):
self._conn.close()
def is_connected(self):
if self._conn is None or not self._conn.get_transport():
raise UnixError(_NOT_CONNECTED_ERR)
@contextmanager
def _get_chan(self, get_pty=False):
self.is_connected()
chan = self._conn.get_transport().open_session()
try:
if get_pty:
chan.get_pty()
yield chan
finally:
chan.close()
@contextmanager
def _forward_agent(self, chan):
forward = (paramiko.agent.AgentRequestHandler(chan)
if self.forward_agent
else None)
try:
yield forward
finally:
if forward:
forward.close()
def execute(self, command, *args, **options):
with self._get_chan(options.pop('get_pty', False)) as chan:
with self._forward_agent(chan):
with timeout(self._timeout):
command = self._format_command(command, args, options)
chan.exec_command(command)
self.return_code = chan.recv_exit_status()
stdout = chan.makefile('rb', -1).read()
stderr = chan.makefile_stderr('rb', -1).read()
return [True if self.return_code == 0 else False,
self._manage_encoding(stdout),
self._manage_encoding(stderr)]
def interactive(self, command, *args, **options):
import termios
import tty
with self._get_chan(options.pop('get_pty', False)) as chan:
with self._forward_agent(chan):
with timeout(self._timeout):
command = self._format_command(command, args, options)
chan.get_pty()
oldtty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
chan.settimeout(0.0)
chan.exec_command(command)
while True:
rlist = select.select([chan, sys.stdin], [], [])[0]
if chan in rlist:
try:
data = self._manage_encoding(chan.recv(1024))
if len(data) == 0:
break
sys.stdout.write(data)
sys.stdout.flush()
except socket.timeout:
pass
if sys.stdin in rlist:
data = sys.stdin.read(1)
if len(data) == 0:
break
chan.send(data)
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
def iter(self, command, *args, **options):
with self._get_chan(options.pop('get_pty', False)) as chan:
with self._forward_agent(chan):
with timeout(self._timeout):
# Non-blocking mode.
chan.settimeout(0.0)
command = self._format_command(command, args, options)
chan.exec_command(command)
end = False
while not end:
try:
stdout = chan.recv(1024)
for line in stdout.splitlines():
yield (u('stdout'), self._manage_encoding(line))
if not stdout:
end = True
except socket.timeout:
pass
try:
for line in chan.recv_stderr(1024).splitlines():
yield (u('stderr'), self._manage_encoding(line))
except socket.timeout:
pass
self.return_code = chan.recv_exit_status()
yield ('status', True if self.return_code == 0 else False)
def open(self, filepath, mode='r'):
self.is_connected()
sftp = paramiko.SFTPClient.from_transport(self._conn.get_transport())
# File is always open in binary mode but 'readline' function decode
# the line if the binary mode is not specified! So force the binary mode
# for letting client program decoding lines.
if 'b' not in mode:
mode += 'b'
return sftp.open(filepath, mode)
def tail(self, filepath, delta=1):
sftp = paramiko.SFTPClient.from_transport(self._conn.get_transport())
prev_size = sftp.stat(filepath).st_size
while 1:
with timeout(self._timeout):
cur_size = sftp.stat(filepath).st_size
# File has been rotate.
if cur_size < prev_size:
with self.open(filepath) as fhandler:
for line in fhandler.read().splitlines():
yield line.decode()
else:
with self.open(filepath) as fhandler:
fhandler.seek(prev_size, 0)
for line in fhandler.read().splitlines():
yield line.decode()
prev_size = cur_size
time.sleep(delta)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A source for reading from VCF files (version 4.x).
The 4.2 spec is available at https://samtools.github.io/hts-specs/VCFv4.2.pdf.
"""
from __future__ import absolute_import
import logging
import sys
import traceback
import warnings
from builtins import next
from builtins import object
from collections import namedtuple
from future.utils import iteritems
from past.builtins import long
from past.builtins import unicode
from apache_beam.coders import coders
from apache_beam.io import filebasedsource
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import Read
from apache_beam.io.textio import _TextSource as TextSource
from apache_beam.transforms import PTransform
if sys.version_info[0] < 3:
import vcf
else:
warnings.warn("VCF IO will support Python 3 after migration to Nucleus, "
"see: BEAM-5628.")
__all__ = ['ReadFromVcf', 'Variant', 'VariantCall', 'VariantInfo',
'MalformedVcfRecord']
# Stores data about variant INFO fields. The type of 'data' is specified in the
# VCF headers. 'field_count' is a string that specifies the number of fields
# that the data type contains. Its value can either be a number representing a
# constant number of fields, `None` indicating that the value is not set
# (equivalent to '.' in the VCF file) or one of:
# - 'A': one value per alternate allele.
# - 'G': one value for each possible genotype.
# - 'R': one value for each possible allele (including the reference).
VariantInfo = namedtuple('VariantInfo', ['data', 'field_count'])
# Stores data about failed VCF record reads. `line` is the text line that
# caused the failed read and `file_name` is the name of the file that the read
# failed in.
MalformedVcfRecord = namedtuple('MalformedVcfRecord', ['file_name', 'line'])
MISSING_FIELD_VALUE = '.' # Indicates field is missing in VCF record.
PASS_FILTER = 'PASS' # Indicates that all filters have been passed.
END_INFO_KEY = 'END' # The info key that explicitly specifies end of a record.
GENOTYPE_FORMAT_KEY = 'GT' # The genotype format key in a call.
PHASESET_FORMAT_KEY = 'PS' # The phaseset format key.
DEFAULT_PHASESET_VALUE = '*' # Default phaseset value if call is phased, but
# no 'PS' is present.
MISSING_GENOTYPE_VALUE = -1 # Genotype to use when '.' is used in GT field.
class Variant(object):
"""A class to store info about a genomic variant.
Each object corresponds to a single record in a VCF file.
"""
__hash__ = None
def __init__(self,
reference_name=None,
start=None,
end=None,
reference_bases=None,
alternate_bases=None,
names=None,
quality=None,
filters=None,
info=None,
calls=None):
"""Initialize the :class:`Variant` object.
Args:
reference_name (str): The reference on which this variant occurs
(such as `chr20` or `X`). .
start (int): The position at which this variant occurs (0-based).
Corresponds to the first base of the string of reference bases.
end (int): The end position (0-based) of this variant. Corresponds to the
first base after the last base in the reference allele.
reference_bases (str): The reference bases for this variant.
alternate_bases (List[str]): The bases that appear instead of the
reference bases.
names (List[str]): Names for the variant, for example a RefSNP ID.
quality (float): Phred-scaled quality score (-10log10 prob(call is wrong))
Higher values imply better quality.
filters (List[str]): A list of filters (normally quality filters) this
variant has failed. `PASS` indicates this variant has passed all
filters.
info (dict): A map of additional variant information. The key is specified
in the VCF record and the value is of type ``VariantInfo``.
calls (list of :class:`VariantCall`): The variant calls for this variant.
Each one represents the determination of genotype with respect to this
variant.
"""
self.reference_name = reference_name
self.start = start
self.end = end
self.reference_bases = reference_bases
self.alternate_bases = alternate_bases or []
self.names = names or []
self.quality = quality
self.filters = filters or []
self.info = info or {}
self.calls = calls or []
def __eq__(self, other):
return (isinstance(other, Variant) and
vars(self) == vars(other))
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __repr__(self):
return ', '.join(
[str(s) for s in [self.reference_name,
self.start,
self.end,
self.reference_bases,
self.alternate_bases,
self.names,
self.quality,
self.filters,
self.info,
self.calls]])
def __lt__(self, other):
if not isinstance(other, Variant):
return NotImplemented
# Elements should first be sorted by reference_name, start, end.
# Ordering of other members is not important, but must be
# deterministic.
if self.reference_name != other.reference_name:
return self.reference_name < other.reference_name
elif self.start != other.start:
return self.start < other.start
elif self.end != other.end:
return self.end < other.end
self_vars = vars(self)
other_vars = vars(other)
for key in sorted(self_vars):
if self_vars[key] != other_vars[key]:
return self_vars[key] < other_vars[key]
return False
def __le__(self, other):
if not isinstance(other, Variant):
return NotImplemented
return self < other or self == other
def __ne__(self, other):
return not self == other
def __gt__(self, other):
if not isinstance(other, Variant):
return NotImplemented
return other < self
def __ge__(self, other):
if not isinstance(other, Variant):
return NotImplemented
return other <= self
class VariantCall(object):
"""A class to store info about a variant call.
A call represents the determination of genotype with respect to a particular
variant. It may include associated information such as quality and phasing.
"""
__hash__ = None
def __init__(self, name=None, genotype=None, phaseset=None, info=None):
"""Initialize the :class:`VariantCall` object.
Args:
name (str): The name of the call.
genotype (List[int]): The genotype of this variant call as specified by
the VCF schema. The values are either `0` representing the reference,
or a 1-based index into alternate bases. Ordering is only important if
`phaseset` is present. If a genotype is not called (that is, a `.` is
present in the GT string), -1 is used
phaseset (str): If this field is present, this variant call's genotype
ordering implies the phase of the bases and is consistent with any other
variant calls in the same reference sequence which have the same
phaseset value. If the genotype data was phased but no phase set was
specified, this field will be set to `*`.
info (dict): A map of additional variant call information. The key is
specified in the VCF record and the type of the value is specified by
the VCF header FORMAT.
"""
self.name = name
self.genotype = genotype or []
self.phaseset = phaseset
self.info = info or {}
def __eq__(self, other):
return ((self.name, self.genotype, self.phaseset, self.info) ==
(other.name, other.genotype, other.phaseset, other.info))
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __repr__(self):
return ', '.join(
[str(s) for s in [self.name, self.genotype, self.phaseset, self.info]])
class _VcfSource(filebasedsource.FileBasedSource):
"""A source for reading VCF files.
Parses VCF files (version 4) using PyVCF library. If file_pattern specifies
multiple files, then the header from each file is used separately to parse
the content. However, the output will be a uniform PCollection of
:class:`Variant` objects.
"""
DEFAULT_VCF_READ_BUFFER_SIZE = 65536 # 64kB
def __init__(self,
file_pattern,
compression_type=CompressionTypes.AUTO,
buffer_size=DEFAULT_VCF_READ_BUFFER_SIZE,
validate=True,
allow_malformed_records=False):
super(_VcfSource, self).__init__(file_pattern,
compression_type=compression_type,
validate=validate)
self._header_lines_per_file = {}
self._compression_type = compression_type
self._buffer_size = buffer_size
self._allow_malformed_records = allow_malformed_records
def read_records(self, file_name, range_tracker):
record_iterator = _VcfSource._VcfRecordIterator(
file_name,
range_tracker,
self._pattern,
self._compression_type,
self._allow_malformed_records,
buffer_size=self._buffer_size,
skip_header_lines=0)
# Convert iterator to generator to abstract behavior
for line in record_iterator:
yield line
class _VcfRecordIterator(object):
"""An Iterator for processing a single VCF file."""
def __init__(self,
file_name,
range_tracker,
file_pattern,
compression_type,
allow_malformed_records,
**kwargs):
self._header_lines = []
self._last_record = None
self._file_name = file_name
self._allow_malformed_records = allow_malformed_records
text_source = TextSource(
file_pattern,
0, # min_bundle_size
compression_type,
True, # strip_trailing_newlines
coders.StrUtf8Coder(), # coder
validate=False,
header_processor_fns=(lambda x: x.startswith('#'),
self._store_header_lines),
**kwargs)
self._text_lines = text_source.read_records(self._file_name,
range_tracker)
try:
self._vcf_reader = vcf.Reader(fsock=self._create_generator())
except SyntaxError as e:
# Throw the exception inside the generator to ensure file is properly
# closed (it's opened inside TextSource.read_records).
self._text_lines.throw(
ValueError('An exception was raised when reading header from VCF '
'file %s: %s' % (self._file_name,
traceback.format_exc(e))))
def _store_header_lines(self, header_lines):
self._header_lines = header_lines
def _create_generator(self):
header_processed = False
for text_line in self._text_lines:
if not header_processed and self._header_lines:
for header in self._header_lines:
self._last_record = header
yield self._last_record
header_processed = True
# PyVCF has explicit str() calls when parsing INFO fields, which fails
# with UTF-8 decoded strings. Encode the line back to UTF-8.
self._last_record = text_line.encode('utf-8')
yield self._last_record
def __iter__(self):
return self
# pylint: disable=next-method-defined
def next(self):
return self.__next__()
def __next__(self):
try:
record = next(self._vcf_reader)
return self._convert_to_variant_record(record, self._vcf_reader.infos,
self._vcf_reader.formats)
except (LookupError, ValueError) as e:
if self._allow_malformed_records:
logging.warning(
'An exception was raised when reading record from VCF file '
'%s. Invalid record was %s: %s',
self._file_name, self._last_record, traceback.format_exc(e))
return MalformedVcfRecord(self._file_name, self._last_record)
# Throw the exception inside the generator to ensure file is properly
# closed (it's opened inside TextSource.read_records).
self._text_lines.throw(
ValueError('An exception was raised when reading record from VCF '
'file %s. Invalid record was %s: %s' % (
self._file_name,
self._last_record,
traceback.format_exc(e))))
def _convert_to_variant_record(self, record, infos, formats):
"""Converts the PyVCF record to a :class:`Variant` object.
Args:
record (:class:`~vcf.model._Record`): An object containing info about a
variant.
infos (dict): The PyVCF dict storing INFO extracted from the VCF header.
The key is the info key and the value is :class:`~vcf.parser._Info`.
formats (dict): The PyVCF dict storing FORMAT extracted from the VCF
header. The key is the FORMAT key and the value is
:class:`~vcf.parser._Format`.
Returns:
A :class:`Variant` object from the given record.
"""
variant = Variant()
variant.reference_name = record.CHROM
variant.start = record.start
variant.end = record.end
variant.reference_bases = (
record.REF if record.REF != MISSING_FIELD_VALUE else None)
# ALT fields are classes in PyVCF (e.g. Substitution), so need convert
# them to their string representations.
variant.alternate_bases.extend(
[str(r) for r in record.ALT if r] if record.ALT else [])
variant.names.extend(record.ID.split(';') if record.ID else [])
variant.quality = record.QUAL
# PyVCF uses None for '.' and an empty list for 'PASS'.
if record.FILTER is not None:
variant.filters.extend(
record.FILTER if record.FILTER else [PASS_FILTER])
for k, v in iteritems(record.INFO):
# Special case: END info value specifies end of the record, so adjust
# variant.end and do not include it as part of variant.info.
if k == END_INFO_KEY:
variant.end = v
continue
field_count = None
if k in infos:
field_count = self._get_field_count_as_string(infos[k].num)
variant.info[k] = VariantInfo(data=v, field_count=field_count)
for sample in record.samples:
call = VariantCall()
call.name = sample.sample
for allele in sample.gt_alleles or [MISSING_GENOTYPE_VALUE]:
if allele is None:
allele = MISSING_GENOTYPE_VALUE
call.genotype.append(int(allele))
phaseset_from_format = (getattr(sample.data, PHASESET_FORMAT_KEY)
if PHASESET_FORMAT_KEY in sample.data._fields
else None)
# Note: Call is considered phased if it contains the 'PS' key regardless
# of whether it uses '|'.
if phaseset_from_format or sample.phased:
call.phaseset = (str(phaseset_from_format) if phaseset_from_format
else DEFAULT_PHASESET_VALUE)
for field in sample.data._fields:
# Genotype and phaseset (if present) are already included.
if field in (GENOTYPE_FORMAT_KEY, PHASESET_FORMAT_KEY):
continue
data = getattr(sample.data, field)
# Convert single values to a list for cases where the number of fields
# is unknown. This is to ensure consistent types across all records.
# Note: this is already done for INFO fields in PyVCF.
if (field in formats and
formats[field].num is None and
isinstance(data, (int, float, long, str, unicode, bool))):
data = [data]
call.info[field] = data
variant.calls.append(call)
return variant
def _get_field_count_as_string(self, field_count):
"""Returns the string representation of field_count from PyVCF.
PyVCF converts field counts to an integer with some predefined constants
as specified in the vcf.parser.field_counts dict (e.g. 'A' is -1). This
method converts them back to their string representation to avoid having
direct dependency on the arbitrary PyVCF constants.
Args:
field_count (int): An integer representing the number of fields in INFO
as specified by PyVCF.
Returns:
A string representation of field_count (e.g. '-1' becomes 'A').
Raises:
ValueError: if the field_count is not valid.
"""
if field_count is None:
return None
elif field_count >= 0:
return str(field_count)
field_count_to_string = {v: k for k, v in vcf.parser.field_counts.items()}
if field_count in field_count_to_string:
return field_count_to_string[field_count]
else:
raise ValueError('Invalid value for field_count: %d' % field_count)
class ReadFromVcf(PTransform):
"""A :class:`~apache_beam.transforms.ptransform.PTransform` for reading VCF
files.
Parses VCF files (version 4) using PyVCF library. If file_pattern specifies
multiple files, then the header from each file is used separately to parse
the content. However, the output will be a PCollection of
:class:`Variant` (or :class:`MalformedVcfRecord` for failed reads) objects.
"""
def __init__(
self,
file_pattern=None,
compression_type=CompressionTypes.AUTO,
validate=True,
allow_malformed_records=False,
**kwargs):
"""Initialize the :class:`ReadFromVcf` transform.
Args:
file_pattern (str): The file path to read from either as a single file or
a glob pattern.
compression_type (str): Used to handle compressed input files.
Typical value is :attr:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
underlying file_path's extension will be used to detect the compression.
validate (bool): flag to verify that the files exist during the pipeline
creation time.
allow_malformed_records (bool): determines if failed VCF
record reads will be tolerated. Failed record reads will result in a
:class:`MalformedVcfRecord` being returned from the read of the record
rather than a :class:`Variant`.
"""
super(ReadFromVcf, self).__init__(**kwargs)
self._source = _VcfSource(
file_pattern,
compression_type,
validate=validate,
allow_malformed_records=allow_malformed_records)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# softmax, i.e. softmax(WX + b)
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.scalar_summary('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.histogram_summary(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
|
# coding=utf-8
"""
sqldiff.py - Prints the (approximated) difference between models and database
TODO:
- better support for relations
- better support for constraints (mainly postgresql?)
- support for table spaces with postgresql
- when a table is not managed (meta.managed==False) then only do a one-way
sqldiff ? show differences from db->table but not the other way around since
it's not managed.
KNOWN ISSUES:
- MySQL has by far the most problems with introspection. Please be
carefull when using MySQL with sqldiff.
- Booleans are reported back as Integers, so there's no way to know if
there was a real change.
- Varchar sizes are reported back without unicode support so their size
may change in comparison to the real length of the varchar.
- Some of the 'fixes' to counter these problems might create false
positives or false negatives.
"""
import sys
import django
import six
from django.core.management import CommandError, sql as _sql
from django.core.management.color import no_style
from django.db import connection, transaction
from django.db.models.fields import AutoField, IntegerField
from django_extensions.compat import get_app_models
from django_extensions.management.utils import signalcommand
from django_extensions.compat import CompatibilityBaseCommand as BaseCommand
try:
from django.core.management.base import OutputWrapper
HAS_OUTPUTWRAPPER = True
except ImportError:
HAS_OUTPUTWRAPPER = False
ORDERING_FIELD = IntegerField('_order', null=True)
def flatten(l, ltypes=(list, tuple)):
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
def all_local_fields(meta):
all_fields = []
if meta.proxy:
for parent in meta.parents:
all_fields.extend(all_local_fields(parent._meta))
else:
for f in meta.local_fields:
col_type = f.db_type(connection=connection)
if col_type is None:
continue
all_fields.append(f)
return all_fields
class SQLDiff(object):
DATA_TYPES_REVERSE_OVERRIDE = {}
IGNORE_MISSING_TABLES = [
"django_migrations",
"south_migrationhistory",
]
DIFF_TYPES = [
'error',
'comment',
'table-missing-in-db',
'table-missing-in-model',
'field-missing-in-db',
'field-missing-in-model',
'fkey-missing-in-db',
'fkey-missing-in-model',
'index-missing-in-db',
'index-missing-in-model',
'unique-missing-in-db',
'unique-missing-in-model',
'field-type-differ',
'field-parameter-differ',
'notnull-differ',
]
DIFF_TEXTS = {
'error': 'error: %(0)s',
'comment': 'comment: %(0)s',
'table-missing-in-db': "table '%(0)s' missing in database",
'table-missing-in-model': "table '%(0)s' missing in models",
'field-missing-in-db': "field '%(1)s' defined in model but missing in database",
'field-missing-in-model': "field '%(1)s' defined in database but missing in model",
'fkey-missing-in-db': "field '%(1)s' FOREIGN KEY defined in model but missing in database",
'fkey-missing-in-model': "field '%(1)s' FOREIGN KEY defined in database but missing in model",
'index-missing-in-db': "field '%(1)s' INDEX defined in model but missing in database",
'index-missing-in-model': "field '%(1)s' INDEX defined in database schema but missing in model",
'unique-missing-in-db': "field '%(1)s' UNIQUE defined in model but missing in database",
'unique-missing-in-model': "field '%(1)s' UNIQUE defined in database schema but missing in model",
'field-type-differ': "field '%(1)s' not of same type: db='%(3)s', model='%(2)s'",
'field-parameter-differ': "field '%(1)s' parameters differ: db='%(3)s', model='%(2)s'",
'notnull-differ': "field '%(1)s' null constraint should be '%(2)s' in the database",
}
SQL_FIELD_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD COLUMN'), style.SQL_FIELD(qn(args[1])), ' '.join(style.SQL_COLTYPE(a) if i == 0 else style.SQL_KEYWORD(a) for i, a in enumerate(args[2:])))
SQL_FIELD_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s\n\t%s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('DROP COLUMN'), style.SQL_FIELD(qn(args[1])))
SQL_FKEY_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s %s (%s)%s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD COLUMN'), style.SQL_FIELD(qn(args[1])), ' '.join(style.SQL_COLTYPE(a) if i == 0 else style.SQL_KEYWORD(a) for i, a in enumerate(args[4:])), style.SQL_KEYWORD('REFERENCES'), style.SQL_TABLE(qn(args[2])), style.SQL_FIELD(qn(args[3])), connection.ops.deferrable_sql())
SQL_INDEX_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s (%s%s);" % (style.SQL_KEYWORD('CREATE INDEX'), style.SQL_TABLE(qn("%s" % '_'.join(a for a in args[0:3] if a))), style.SQL_KEYWORD('ON'), style.SQL_TABLE(qn(args[0])), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD(args[3]))
# FIXME: need to lookup index name instead of just appending _idx to table + fieldname
SQL_INDEX_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s;" % (style.SQL_KEYWORD('DROP INDEX'), style.SQL_TABLE(qn("%s" % '_'.join(a for a in args[0:3] if a))))
SQL_UNIQUE_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s (%s);" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD'), style.SQL_KEYWORD('UNIQUE'), style.SQL_FIELD(qn(args[1])))
# FIXME: need to lookup unique constraint name instead of appending _key to table + fieldname
SQL_UNIQUE_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('DROP'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_TABLE(qn("%s_key" % ('_'.join(args[:2])))))
SQL_FIELD_TYPE_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD("MODIFY"), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2]))
SQL_FIELD_PARAMETER_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD("MODIFY"), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2]))
SQL_NOTNULL_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('MODIFY'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD(args[2]), style.SQL_KEYWORD('NOT NULL'))
SQL_ERROR = lambda self, style, qn, args: style.NOTICE('-- Error: %s' % style.ERROR(args[0]))
SQL_COMMENT = lambda self, style, qn, args: style.NOTICE('-- Comment: %s' % style.SQL_TABLE(args[0]))
SQL_TABLE_MISSING_IN_DB = lambda self, style, qn, args: style.NOTICE('-- Table missing: %s' % args[0])
SQL_TABLE_MISSING_IN_MODEL = lambda self, style, qn, args: style.NOTICE('-- Model missing for table: %s' % args[0])
can_detect_notnull_differ = False
can_detect_unsigned_differ = False
unsigned_suffix = None
def __init__(self, app_models, options):
self.has_differences = None
self.app_models = app_models
self.options = options
self.dense = options.get('dense_output', False)
try:
self.introspection = connection.introspection
except AttributeError:
from django.db import get_introspection_module
self.introspection = get_introspection_module()
self.cursor = connection.cursor()
self.django_tables = self.get_django_tables(options.get('only_existing', True))
self.db_tables = self.introspection.get_table_list(self.cursor)
if django.VERSION[:2] >= (1, 8):
# TODO: We are losing information about tables which are views here
self.db_tables = [table_info.name for table_info in self.db_tables]
self.differences = []
self.unknown_db_fields = {}
self.new_db_fields = set()
self.null = {}
self.unsigned = set()
self.DIFF_SQL = {
'error': self.SQL_ERROR,
'comment': self.SQL_COMMENT,
'table-missing-in-db': self.SQL_TABLE_MISSING_IN_DB,
'table-missing-in-model': self.SQL_TABLE_MISSING_IN_MODEL,
'field-missing-in-db': self.SQL_FIELD_MISSING_IN_DB,
'field-missing-in-model': self.SQL_FIELD_MISSING_IN_MODEL,
'fkey-missing-in-db': self.SQL_FKEY_MISSING_IN_DB,
'fkey-missing-in-model': self.SQL_FIELD_MISSING_IN_MODEL,
'index-missing-in-db': self.SQL_INDEX_MISSING_IN_DB,
'index-missing-in-model': self.SQL_INDEX_MISSING_IN_MODEL,
'unique-missing-in-db': self.SQL_UNIQUE_MISSING_IN_DB,
'unique-missing-in-model': self.SQL_UNIQUE_MISSING_IN_MODEL,
'field-type-differ': self.SQL_FIELD_TYPE_DIFFER,
'field-parameter-differ': self.SQL_FIELD_PARAMETER_DIFFER,
'notnull-differ': self.SQL_NOTNULL_DIFFER,
}
if self.can_detect_notnull_differ:
self.load_null()
if self.can_detect_unsigned_differ:
self.load_unsigned()
def load_null(self):
raise NotImplementedError("load_null functions must be implemented if diff backend has 'can_detect_notnull_differ' set to True")
def load_unsigned(self):
raise NotImplementedError("load_unsigned function must be implemented if diff backend has 'can_detect_unsigned_differ' set to True")
def add_app_model_marker(self, app_label, model_name):
self.differences.append((app_label, model_name, []))
def add_difference(self, diff_type, *args):
assert diff_type in self.DIFF_TYPES, 'Unknown difference type'
self.differences[-1][-1].append((diff_type, args))
def get_django_tables(self, only_existing):
try:
django_tables = self.introspection.django_table_names(only_existing=only_existing)
except AttributeError:
# backwards compatibility for before introspection refactoring (r8296)
try:
django_tables = _sql.django_table_names(only_existing=only_existing)
except AttributeError:
# backwards compatibility for before svn r7568
django_tables = _sql.django_table_list(only_existing=only_existing)
return django_tables
def sql_to_dict(self, query, param):
""" sql_to_dict(query, param) -> list of dicts
code from snippet at http://www.djangosnippets.org/snippets/1383/
"""
cursor = connection.cursor()
cursor.execute(query, param)
fieldnames = [name[0] for name in cursor.description]
result = []
for row in cursor.fetchall():
rowset = []
for field in zip(fieldnames, row):
rowset.append(field)
result.append(dict(rowset))
return result
def get_field_model_type(self, field):
return field.db_type(connection=connection)
def get_field_db_type(self, description, field=None, table_name=None):
from django.db import models
# DB-API cursor.description
# (name, type_code, display_size, internal_size, precision, scale, null_ok) = description
type_code = description[1]
if type_code in self.DATA_TYPES_REVERSE_OVERRIDE:
reverse_type = self.DATA_TYPES_REVERSE_OVERRIDE[type_code]
else:
try:
try:
reverse_type = self.introspection.data_types_reverse[type_code]
except AttributeError:
# backwards compatibility for before introspection refactoring (r8296)
reverse_type = self.introspection.DATA_TYPES_REVERSE.get(type_code)
except KeyError:
reverse_type = self.get_field_db_type_lookup(type_code)
if not reverse_type:
# type_code not found in data_types_reverse map
key = (self.differences[-1][:2], description[:2])
if key not in self.unknown_db_fields:
self.unknown_db_fields[key] = 1
self.add_difference('comment', "Unknown database type for field '%s' (%s)" % (description[0], type_code))
return None
kwargs = {}
if type_code == 16946 and field and getattr(field, 'geom_type', None) == 'POINT':
reverse_type = 'django.contrib.gis.db.models.fields.PointField'
if isinstance(reverse_type, tuple):
kwargs.update(reverse_type[1])
reverse_type = reverse_type[0]
if reverse_type == "CharField" and description[3]:
kwargs['max_length'] = description[3]
if reverse_type == "DecimalField":
kwargs['max_digits'] = description[4]
kwargs['decimal_places'] = description[5] and abs(description[5]) or description[5]
if description[6]:
kwargs['blank'] = True
if reverse_type not in ('TextField', 'CharField'):
kwargs['null'] = True
if field and getattr(field, 'geography', False):
kwargs['geography'] = True
if '.' in reverse_type:
from django_extensions.compat import importlib
module_path, package_name = reverse_type.rsplit('.', 1)
module = importlib.import_module(module_path)
field_db_type = getattr(module, package_name)(**kwargs).db_type(connection=connection)
else:
field_db_type = getattr(models, reverse_type)(**kwargs).db_type(connection=connection)
tablespace = field.db_tablespace
if not tablespace:
tablespace = "public"
if (tablespace, table_name, field.column) in self.unsigned:
field_db_type = '%s %s' % (field_db_type, self.unsigned_suffix)
return field_db_type
def get_field_db_type_lookup(self, type_code):
return None
def get_field_db_nullable(self, field, table_name):
tablespace = field.db_tablespace
if tablespace == "":
tablespace = "public"
attname = field.db_column or field.attname
return self.null.get((tablespace, table_name, attname), 'fixme')
def strip_parameters(self, field_type):
if field_type and field_type != 'double precision':
return field_type.split(" ")[0].split("(")[0].lower()
return field_type
def find_unique_missing_in_db(self, meta, table_indexes, table_constraints, table_name):
for field in all_local_fields(meta):
if field.unique and meta.managed:
attname = field.db_column or field.attname
db_field_unique = table_indexes.get(attname, {}).get('unique')
if not db_field_unique and table_constraints:
db_field_unique = any(constraint['unique'] for contraint_name, constraint in six.iteritems(table_constraints) if [attname] == constraint['columns'])
if attname in table_indexes and db_field_unique:
continue
self.add_difference('unique-missing-in-db', table_name, attname)
def find_unique_missing_in_model(self, meta, table_indexes, table_constraints, table_name):
# TODO: Postgresql does not list unique_togethers in table_indexes
# MySQL does
fields = dict([(field.db_column or field.name, field.unique) for field in all_local_fields(meta)])
for att_name, att_opts in six.iteritems(table_indexes):
db_field_unique = att_opts['unique']
if not db_field_unique and table_constraints:
db_field_unique = any(constraint['unique'] for contraint_name, constraint in six.iteritems(table_constraints) if att_name in constraint['columns'])
if db_field_unique and att_name in fields and not fields[att_name]:
if att_name in flatten(meta.unique_together):
continue
self.add_difference('unique-missing-in-model', table_name, att_name)
def find_index_missing_in_db(self, meta, table_indexes, table_constraints, table_name):
for field in all_local_fields(meta):
if field.db_index:
attname = field.db_column or field.attname
if attname not in table_indexes:
self.add_difference('index-missing-in-db', table_name, attname, '', '')
db_type = field.db_type(connection=connection)
if db_type.startswith('varchar'):
self.add_difference('index-missing-in-db', table_name, attname, 'like', ' varchar_pattern_ops')
if db_type.startswith('text'):
self.add_difference('index-missing-in-db', table_name, attname, 'like', ' text_pattern_ops')
def find_index_missing_in_model(self, meta, table_indexes, table_constraints, table_name):
fields = dict([(field.name, field) for field in all_local_fields(meta)])
for att_name, att_opts in six.iteritems(table_indexes):
if att_name in fields:
field = fields[att_name]
db_field_unique = att_opts['unique']
if not db_field_unique and table_constraints:
db_field_unique = any(constraint['unique'] for contraint_name, constraint in six.iteritems(table_constraints) if att_name in constraint['columns'])
if field.db_index:
continue
if getattr(field, 'spatial_index', False):
continue
if att_opts['primary_key'] and field.primary_key:
continue
if db_field_unique and field.unique:
continue
if db_field_unique and att_name in flatten(meta.unique_together):
continue
self.add_difference('index-missing-in-model', table_name, att_name)
db_type = field.db_type(connection=connection)
if db_type.startswith('varchar') or db_type.startswith('text'):
self.add_difference('index-missing-in-model', table_name, att_name, 'like')
def find_field_missing_in_model(self, fieldmap, table_description, table_name):
for row in table_description:
if row[0] not in fieldmap:
self.add_difference('field-missing-in-model', table_name, row[0])
def find_field_missing_in_db(self, fieldmap, table_description, table_name):
db_fields = [row[0] for row in table_description]
for field_name, field in six.iteritems(fieldmap):
if field_name not in db_fields:
field_output = []
if field.rel:
field_output.extend([field.rel.to._meta.db_table, field.rel.to._meta.get_field(field.rel.field_name).column])
op = 'fkey-missing-in-db'
else:
op = 'field-missing-in-db'
field_output.append(field.db_type(connection=connection))
if not field.null:
field_output.append('NOT NULL')
self.add_difference(op, table_name, field_name, *field_output)
self.new_db_fields.add((table_name, field_name))
def find_field_type_differ(self, meta, table_description, table_name, func=None):
db_fields = dict([(row[0], row) for row in table_description])
for field in all_local_fields(meta):
if field.name not in db_fields:
continue
description = db_fields[field.name]
model_type = self.get_field_model_type(field)
db_type = self.get_field_db_type(description, field, table_name)
# use callback function if defined
if func:
model_type, db_type = func(field, description, model_type, db_type)
if not self.strip_parameters(db_type) == self.strip_parameters(model_type):
self.add_difference('field-type-differ', table_name, field.name, model_type, db_type)
def find_field_parameter_differ(self, meta, table_description, table_name, func=None):
db_fields = dict([(row[0], row) for row in table_description])
for field in all_local_fields(meta):
if field.name not in db_fields:
continue
description = db_fields[field.name]
model_type = self.get_field_model_type(field)
db_type = self.get_field_db_type(description, field, table_name)
if not self.strip_parameters(model_type) == self.strip_parameters(db_type):
continue
# use callback function if defined
if func:
model_type, db_type = func(field, description, model_type, db_type)
if django.VERSION[:2] >= (1, 7):
# Django >=1.7
model_check = field.db_parameters(connection=connection)['check']
if ' CHECK' in db_type:
db_type, db_check = db_type.split(" CHECK", 1)
db_check = db_check.strip().lstrip("(").rstrip(")")
else:
db_check = None
if not model_type == db_type and not model_check == db_check:
self.add_difference('field-parameter-differ', table_name, field.name, model_type, db_type)
else:
# Django <1.7
if not model_type == db_type:
self.add_difference('field-parameter-differ', table_name, field.name, model_type, db_type)
def find_field_notnull_differ(self, meta, table_description, table_name):
if not self.can_detect_notnull_differ:
return
for field in all_local_fields(meta):
attname = field.db_column or field.attname
if (table_name, attname) in self.new_db_fields:
continue
null = self.get_field_db_nullable(field, table_name)
if field.null != null:
action = field.null and 'DROP' or 'SET'
self.add_difference('notnull-differ', table_name, attname, action)
def get_constraints(self, cursor, table_name, introspection):
return {}
def find_differences(self):
if self.options['all_applications']:
self.add_app_model_marker(None, None)
for table in self.db_tables:
if table not in self.django_tables and table not in self.IGNORE_MISSING_TABLES:
self.add_difference('table-missing-in-model', table)
cur_app_label = None
for app_model in self.app_models:
meta = app_model._meta
table_name = meta.db_table
app_label = meta.app_label
if cur_app_label != app_label:
# Marker indicating start of difference scan for this table_name
self.add_app_model_marker(app_label, app_model.__name__)
if table_name not in self.db_tables:
# Table is missing from database
self.add_difference('table-missing-in-db', table_name)
continue
table_indexes = self.introspection.get_indexes(self.cursor, table_name)
if hasattr(self.introspection, 'get_constraints'):
table_constraints = self.introspection.get_constraints(self.cursor, table_name)
else:
table_constraints = self.get_constraints(self.cursor, table_name, self.introspection)
fieldmap = dict([(field.db_column or field.get_attname(), field) for field in all_local_fields(meta)])
# add ordering field if model uses order_with_respect_to
if meta.order_with_respect_to:
fieldmap['_order'] = ORDERING_FIELD
try:
table_description = self.introspection.get_table_description(self.cursor, table_name)
except Exception as e:
self.add_difference('error', 'unable to introspect table: %s' % str(e).strip())
transaction.rollback() # reset transaction
continue
# Fields which are defined in database but not in model
# 1) find: 'unique-missing-in-model'
self.find_unique_missing_in_model(meta, table_indexes, table_constraints, table_name)
# 2) find: 'index-missing-in-model'
self.find_index_missing_in_model(meta, table_indexes, table_constraints, table_name)
# 3) find: 'field-missing-in-model'
self.find_field_missing_in_model(fieldmap, table_description, table_name)
# Fields which are defined in models but not in database
# 4) find: 'field-missing-in-db'
self.find_field_missing_in_db(fieldmap, table_description, table_name)
# 5) find: 'unique-missing-in-db'
self.find_unique_missing_in_db(meta, table_indexes, table_constraints, table_name)
# 6) find: 'index-missing-in-db'
self.find_index_missing_in_db(meta, table_indexes, table_constraints, table_name)
# Fields which have a different type or parameters
# 7) find: 'type-differs'
self.find_field_type_differ(meta, table_description, table_name)
# 8) find: 'type-parameter-differs'
self.find_field_parameter_differ(meta, table_description, table_name)
# 9) find: 'field-notnull'
self.find_field_notnull_differ(meta, table_description, table_name)
self.has_differences = max([len(diffs) for _app_label, _model_name, diffs in self.differences])
def print_diff(self, style=no_style()):
""" print differences to stdout """
if self.options.get('sql', True):
self.print_diff_sql(style)
else:
self.print_diff_text(style)
def print_diff_text(self, style):
if not self.can_detect_notnull_differ:
print(style.NOTICE("# Detecting notnull changes not implemented for this database backend"))
print("")
if not self.can_detect_unsigned_differ:
print(style.NOTICE("# Detecting unsigned changes not implemented for this database backend"))
print("")
cur_app_label = None
for app_label, model_name, diffs in self.differences:
if not diffs:
continue
if not self.dense and app_label and cur_app_label != app_label:
print("%s %s" % (style.NOTICE("+ Application:"), style.SQL_TABLE(app_label)))
cur_app_label = app_label
if not self.dense and model_name:
print("%s %s" % (style.NOTICE("|-+ Differences for model:"), style.SQL_TABLE(model_name)))
for diff in diffs:
diff_type, diff_args = diff
text = self.DIFF_TEXTS[diff_type] % dict((str(i), style.SQL_TABLE(e)) for i, e in enumerate(diff_args))
text = "'".join(i % 2 == 0 and style.ERROR(e) or e for i, e in enumerate(text.split("'")))
if not self.dense:
print("%s %s" % (style.NOTICE("|--+"), text))
else:
if app_label:
print("%s %s %s %s %s" % (style.NOTICE("App"), style.SQL_TABLE(app_label), style.NOTICE('Model'), style.SQL_TABLE(model_name), text))
else:
print(text)
def print_diff_sql(self, style):
if not self.can_detect_notnull_differ:
print(style.NOTICE("-- Detecting notnull changes not implemented for this database backend"))
print("")
cur_app_label = None
qn = connection.ops.quote_name
if not self.has_differences:
if not self.dense:
print(style.SQL_KEYWORD("-- No differences"))
else:
print(style.SQL_KEYWORD("BEGIN;"))
for app_label, model_name, diffs in self.differences:
if not diffs:
continue
if not self.dense and cur_app_label != app_label:
print(style.NOTICE("-- Application: %s" % style.SQL_TABLE(app_label)))
cur_app_label = app_label
if not self.dense and model_name:
print(style.NOTICE("-- Model: %s" % style.SQL_TABLE(model_name)))
for diff in diffs:
diff_type, diff_args = diff
text = self.DIFF_SQL[diff_type](style, qn, diff_args)
if self.dense:
text = text.replace("\n\t", " ")
print(text)
print(style.SQL_KEYWORD("COMMIT;"))
class GenericSQLDiff(SQLDiff):
can_detect_notnull_differ = False
class MySQLDiff(SQLDiff):
can_detect_notnull_differ = True
can_detect_unsigned_differ = True
unsigned_suffix = 'UNSIGNED'
def __init__(self, app_models, options):
super(MySQLDiff, self).__init__(app_models, options)
self.auto_increment = set()
self.load_auto_increment()
if not getattr(connection.features, 'can_introspect_small_integer_field', False):
from MySQLdb.constants import FIELD_TYPE
# Django version < 1.8 does not support MySQL small integer introspection, adding override.
self.DATA_TYPES_REVERSE_OVERRIDE[FIELD_TYPE.SHORT] = 'SmallIntegerField'
def load_null(self):
tablespace = 'public'
for table_name in self.db_tables:
result = self.sql_to_dict("""
SELECT column_name, is_nullable
FROM information_schema.columns
WHERE table_schema = DATABASE()
AND table_name = %s""", [table_name])
for table_info in result:
key = (tablespace, table_name, table_info['column_name'])
self.null[key] = table_info['is_nullable'] == 'YES'
def load_unsigned(self):
tablespace = 'public'
for table_name in self.db_tables:
result = self.sql_to_dict("""
SELECT column_name
FROM information_schema.columns
WHERE table_schema = DATABASE()
AND table_name = %s
AND column_type LIKE '%%unsigned'""", [table_name])
for table_info in result:
key = (tablespace, table_name, table_info['column_name'])
self.unsigned.add(key)
def load_auto_increment(self):
for table_name in self.db_tables:
result = self.sql_to_dict("""
SELECT column_name
FROM information_schema.columns
WHERE table_schema = DATABASE()
AND table_name = %s
AND extra = 'auto_increment'""", [table_name])
for table_info in result:
key = (table_name, table_info['column_name'])
self.auto_increment.add(key)
# All the MySQL hacks together create something of a problem
# Fixing one bug in MySQL creates another issue. So just keep in mind
# that this is way unreliable for MySQL atm.
def get_field_db_type(self, description, field=None, table_name=None):
from MySQLdb.constants import FIELD_TYPE
db_type = super(MySQLDiff, self).get_field_db_type(description, field, table_name)
if not db_type:
return
if field:
# MySQL isn't really sure about char's and varchar's like sqlite
field_type = self.get_field_model_type(field)
# Fix char/varchar inconsistencies
if self.strip_parameters(field_type) == 'char' and self.strip_parameters(db_type) == 'varchar':
db_type = db_type.lstrip("var")
# They like to call 'bool's 'tinyint(1)' and introspection makes that a integer
# just convert it back to it's proper type, a bool is a bool and nothing else.
if db_type == 'integer' and description[1] == FIELD_TYPE.TINY and description[4] == 1:
db_type = 'bool'
if (table_name, field.column) in self.auto_increment:
db_type += ' AUTO_INCREMENT'
return db_type
class SqliteSQLDiff(SQLDiff):
can_detect_notnull_differ = True
def load_null(self):
for table_name in self.db_tables:
# sqlite does not support tablespaces
tablespace = "public"
# index, column_name, column_type, nullable, default_value
# see: http://www.sqlite.org/pragma.html#pragma_table_info
for table_info in self.sql_to_dict("PRAGMA table_info(%s);" % table_name, []):
key = (tablespace, table_name, table_info['name'])
self.null[key] = not table_info['notnull']
# Unique does not seem to be implied on Sqlite for Primary_key's
# if this is more generic among databases this might be usefull
# to add to the superclass's find_unique_missing_in_db method
def find_unique_missing_in_db(self, meta, table_indexes, table_constraints, table_name):
for field in all_local_fields(meta):
if field.unique:
attname = field.db_column or field.attname
if attname in table_indexes and table_indexes[attname]['unique']:
continue
if attname in table_indexes and table_indexes[attname]['primary_key']:
continue
self.add_difference('unique-missing-in-db', table_name, attname)
# Finding Indexes by using the get_indexes dictionary doesn't seem to work
# for sqlite.
def find_index_missing_in_db(self, meta, table_indexes, table_constraints, table_name):
pass
def find_index_missing_in_model(self, meta, table_indexes, table_constraints, table_name):
pass
def get_field_db_type(self, description, field=None, table_name=None):
db_type = super(SqliteSQLDiff, self).get_field_db_type(description, field, table_name)
if not db_type:
return
if field:
field_type = self.get_field_model_type(field)
# Fix char/varchar inconsistencies
if self.strip_parameters(field_type) == 'char' and self.strip_parameters(db_type) == 'varchar':
db_type = db_type.lstrip("var")
return db_type
class PostgresqlSQLDiff(SQLDiff):
can_detect_notnull_differ = True
can_detect_unsigned_differ = True
DATA_TYPES_REVERSE_OVERRIDE = {
1042: 'CharField',
# postgis types (TODO: support is very incomplete)
17506: 'django.contrib.gis.db.models.fields.PointField',
16392: 'django.contrib.gis.db.models.fields.PointField',
55902: 'django.contrib.gis.db.models.fields.MultiPolygonField',
16946: 'django.contrib.gis.db.models.fields.MultiPolygonField'
}
DATA_TYPES_REVERSE_NAME = {
'hstore': 'django_hstore.hstore.DictionaryField',
}
# Hopefully in the future we can add constraint checking and other more
# advanced checks based on this database.
SQL_LOAD_CONSTRAINTS = """
SELECT nspname, relname, conname, attname, pg_get_constraintdef(pg_constraint.oid)
FROM pg_constraint
INNER JOIN pg_attribute ON pg_constraint.conrelid = pg_attribute.attrelid AND pg_attribute.attnum = any(pg_constraint.conkey)
INNER JOIN pg_class ON conrelid=pg_class.oid
INNER JOIN pg_namespace ON pg_namespace.oid=pg_class.relnamespace
ORDER BY CASE WHEN contype='f' THEN 0 ELSE 1 END,contype,nspname,relname,conname;
"""
SQL_LOAD_NULL = """
SELECT nspname, relname, attname, attnotnull
FROM pg_attribute
INNER JOIN pg_class ON attrelid=pg_class.oid
INNER JOIN pg_namespace ON pg_namespace.oid=pg_class.relnamespace;
"""
SQL_FIELD_TYPE_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ALTER'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD("TYPE"), style.SQL_COLTYPE(args[2]))
SQL_FIELD_PARAMETER_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ALTER'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD("TYPE"), style.SQL_COLTYPE(args[2]))
SQL_NOTNULL_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ALTER COLUMN'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD(args[2]), style.SQL_KEYWORD('NOT NULL'))
def __init__(self, app_models, options):
super(PostgresqlSQLDiff, self).__init__(app_models, options)
self.check_constraints = {}
self.load_constraints()
def load_null(self):
for dct in self.sql_to_dict(self.SQL_LOAD_NULL, []):
key = (dct['nspname'], dct['relname'], dct['attname'])
self.null[key] = not dct['attnotnull']
def load_unsigned(self):
# PostgreSQL does not support unsigned, so no columns are
# unsigned. Nothing to do.
pass
def load_constraints(self):
for dct in self.sql_to_dict(self.SQL_LOAD_CONSTRAINTS, []):
key = (dct['nspname'], dct['relname'], dct['attname'])
if 'CHECK' in dct['pg_get_constraintdef']:
self.check_constraints[key] = dct
def get_constraints(self, cursor, table_name, introspection):
""" backport of django's introspection.get_constraints(...) """
constraints = {}
# Loop over the key table, collecting things as constraints
# This will get PKs, FKs, and uniques, but not CHECK
cursor.execute("""
SELECT
kc.constraint_name,
kc.column_name,
c.constraint_type,
array(SELECT table_name::text || '.' || column_name::text FROM information_schema.constraint_column_usage WHERE constraint_name = kc.constraint_name)
FROM information_schema.key_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
kc.table_schema = %s AND
kc.table_name = %s
""", ["public", table_name])
for constraint, column, kind, used_cols in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": kind.lower() == "primary key",
"unique": kind.lower() in ["primary key", "unique"],
"foreign_key": tuple(used_cols[0].split(".", 1)) if kind.lower() == "foreign key" else None,
"check": False,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get CHECK constraint columns
cursor.execute("""
SELECT kc.constraint_name, kc.column_name
FROM information_schema.constraint_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
c.constraint_type = 'CHECK' AND
kc.table_schema = %s AND
kc.table_name = %s
""", ["public", table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": True,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get indexes
cursor.execute("""
SELECT
c2.relname,
ARRAY(
SELECT (SELECT attname FROM pg_catalog.pg_attribute WHERE attnum = i AND attrelid = c.oid)
FROM unnest(idx.indkey) i
),
idx.indisunique,
idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND c.relname = %s
""", [table_name])
for index, columns, unique, primary in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": list(columns),
"primary_key": primary,
"unique": unique,
"foreign_key": None,
"check": False,
"index": True,
}
return constraints
def get_field_db_type(self, description, field=None, table_name=None):
db_type = super(PostgresqlSQLDiff, self).get_field_db_type(description, field, table_name)
if not db_type:
return
if field:
if field.primary_key and isinstance(field, AutoField):
if db_type == 'integer':
db_type = 'serial'
elif db_type == 'bigint':
db_type = 'bigserial'
if table_name:
tablespace = field.db_tablespace
if tablespace == "":
tablespace = "public"
attname = field.db_column or field.attname
check_constraint = self.check_constraints.get((tablespace, table_name, attname), {}).get('pg_get_constraintdef', None)
if check_constraint:
check_constraint = check_constraint.replace("((", "(")
check_constraint = check_constraint.replace("))", ")")
check_constraint = '("'.join([')' in e and '" '.join(p.strip('"') for p in e.split(" ", 1)) or e for e in check_constraint.split("(")])
# TODO: might be more then one constraint in definition ?
db_type += ' ' + check_constraint
return db_type
def get_field_db_type_lookup(self, type_code):
try:
name = self.sql_to_dict("SELECT typname FROM pg_type WHERE typelem=%s;", [type_code])[0]['typname']
return self.DATA_TYPES_REVERSE_NAME.get(name.strip('_'))
except (IndexError, KeyError):
pass
"""
def find_field_type_differ(self, meta, table_description, table_name):
def callback(field, description, model_type, db_type):
if field.primary_key and db_type=='integer':
db_type = 'serial'
return model_type, db_type
super(PostgresqlSQLDiff, self).find_field_type_differ(meta, table_description, table_name, callback)
"""
DATABASE_SQLDIFF_CLASSES = {
'postgis': PostgresqlSQLDiff,
'postgresql_psycopg2': PostgresqlSQLDiff,
'postgresql': PostgresqlSQLDiff,
'mysql': MySQLDiff,
'sqlite3': SqliteSQLDiff,
'oracle': GenericSQLDiff
}
class Command(BaseCommand):
help = """Prints the (approximated) difference between models and fields in the database for the given app name(s).
It indicates how columns in the database are different from the sql that would
be generated by Django. This command is not a database migration tool. (Though
it can certainly help) It's purpose is to show the current differences as a way
to check/debug ur models compared to the real database tables and columns."""
output_transaction = False
args = '<appname appname ...>'
def add_arguments(self, parser):
parser.add_argument(
'--all-applications', '-a', action='store_true',
dest='all_applications',
help="Automaticly include all application from INSTALLED_APPS.")
parser.add_argument(
'--not-only-existing', '-e', action='store_false',
dest='only_existing',
help="Check all tables that exist in the database, not only "
"tables that should exist based on models.")
parser.add_argument(
'--dense-output', '-d', action='store_true', dest='dense_output',
help="Shows the output in dense format, normally output is "
"spreaded over multiple lines.")
parser.add_argument(
'--output_text', '-t', action='store_false', dest='sql',
default=True,
help="Outputs the differences as descriptive text instead of SQL")
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.exit_code = 1
@signalcommand
def handle(self, *app_labels, **options):
from django.conf import settings
engine = None
if hasattr(settings, 'DATABASES'):
engine = settings.DATABASES['default']['ENGINE']
else:
engine = settings.DATABASE_ENGINE
if engine == 'dummy':
# This must be the "dummy" database backend, which means the user
# hasn't set DATABASE_ENGINE.
raise CommandError("""Django doesn't know which syntax to use for your SQL statements,
because you haven't specified the DATABASE_ENGINE setting.
Edit your settings file and change DATABASE_ENGINE to something like 'postgresql' or 'mysql'.""")
if options.get('all_applications', False):
app_models = get_app_models()
else:
if not app_labels:
raise CommandError('Enter at least one appname.')
app_models = get_app_models(app_labels)
if not app_models:
raise CommandError('Unable to execute sqldiff no models founds.')
if not engine:
engine = connection.__module__.split('.')[-2]
if '.' in engine:
engine = engine.split('.')[-1]
cls = DATABASE_SQLDIFF_CLASSES.get(engine, GenericSQLDiff)
sqldiff_instance = cls(app_models, options)
sqldiff_instance.find_differences()
if not sqldiff_instance.has_differences:
self.exit_code = 0
sqldiff_instance.print_diff(self.style)
def execute(self, *args, **options):
try:
super(Command, self).execute(*args, **options)
except CommandError as e:
if options.get('traceback', False):
raise
# self.stderr is not guaranteed to be set here
stderr = getattr(self, 'stderr', None)
if not stderr:
if HAS_OUTPUTWRAPPER:
stderr = OutputWrapper(sys.stderr, self.style.ERROR)
else:
stderr = sys.stderr
stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(2)
def run_from_argv(self, argv):
super(Command, self).run_from_argv(argv)
sys.exit(self.exit_code)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for DepthToSpace op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class DepthToSpaceTest(test.TestCase):
def _testOne(self, inputs, block_size, outputs):
input_nhwc = math_ops.to_float(inputs)
with self.test_session(use_gpu=False):
# test NHWC (default) on CPU
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
self.assertAllEqual(x_tf.eval(), outputs)
if test.is_gpu_available():
with self.test_session(use_gpu=True):
# test NHWC (default) on GPU
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
self.assertAllEqual(x_tf.eval(), outputs)
# test NCHW on GPU
input_nchw = test_util.NHWCToNCHW(input_nhwc)
output_nchw = array_ops.depth_to_space(
input_nchw, block_size, data_format="NCHW")
output_nhwc = test_util.NCHWToNHWC(output_nchw)
self.assertAllEqual(output_nhwc.eval(), outputs)
def testBasic(self):
x_np = [[[[1, 2, 3, 4]]]]
block_size = 2
x_out = [[[[1], [2]], [[3], [4]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
def testBlockSize2(self):
x_np = [[[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]]]
block_size = 2
x_out = [[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]]
self._testOne(x_np, block_size, x_out)
def testBlockSize2Batch10(self):
block_size = 2
def batch_input_elt(i):
return [[[1 * i, 2 * i, 3 * i, 4 * i],
[5 * i, 6 * i, 7 * i, 8 * i]],
[[9 * i, 10 * i, 11 * i, 12 * i],
[13 * i, 14 * i, 15 * i, 16 * i]]]
def batch_output_elt(i):
return [[[1 * i], [2 * i], [5 * i], [6 * i]],
[[3 * i], [4 * i], [7 * i], [8 * i]],
[[9 * i], [10 * i], [13 * i], [14 * i]],
[[11 * i], [12 * i], [15 * i], [16 * i]]]
batch_size = 10
x_np = [batch_input_elt(i) for i in range(batch_size)]
x_out = [batch_output_elt(i) for i in range(batch_size)]
self._testOne(x_np, block_size, x_out)
# Tests for different width and height.
def testNonSquare(self):
x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40]],
[[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120]]]]
block_size = 2
x_out = [[[[1, 10], [2, 20]],
[[3, 30], [4, 40]],
[[5, 50], [6, 60]],
[[7, 70], [8, 80]],
[[9, 90], [10, 100]],
[[11, 110], [12, 120]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
def testBlockSize4FlatInput(self):
x_np = [[[[1, 2, 5, 6, 3, 4, 7, 8, 9, 10, 13, 14, 11, 12, 15, 16]]]]
block_size = 4
x_out = [[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
def testDepthInterleaved(self):
x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40]]]]
block_size = 2
x_out = [[[[1, 10], [2, 20]],
[[3, 30], [4, 40]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths. Here an odd depth.
# To make sure elements are properly interleaved in depth.
def testDepthInterleavedDepth3(self):
x_np = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
def testDepthInterleavedLarger(self):
x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40],
[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120],
[13, 130, 14, 140, 15, 150, 16, 160]]]]
block_size = 2
x_out = [[[[1, 10], [2, 20], [5, 50], [6, 60]],
[[3, 30], [4, 40], [7, 70], [8, 80]],
[[9, 90], [10, 100], [13, 130], [14, 140]],
[[11, 110], [12, 120], [15, 150], [16, 160]]]]
self._testOne(x_np, block_size, x_out)
# Error handling:
# Tests for a block larger for the depth. In this case should raise an
# exception.
def testBlockSizeTooLarge(self):
x_np = [[[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]]]
block_size = 4
# Raise an exception, since th depth is only 4 and needs to be
# divisible by 16.
with self.assertRaises(ValueError):
out_tf = array_ops.depth_to_space(x_np, block_size)
out_tf.eval()
# Test when the block size is 0.
def testBlockSize0(self):
x_np = [[[[1], [2]],
[[3], [4]]]]
block_size = 0
with self.assertRaises(ValueError):
out_tf = array_ops.depth_to_space(x_np, block_size)
out_tf.eval()
# Test when the block size is 1. The block size should be > 1.
def testBlockSizeOne(self):
x_np = [[[[1, 1, 1, 1],
[2, 2, 2, 2]],
[[3, 3, 3, 3],
[4, 4, 4, 4]]]]
block_size = 1
with self.assertRaises(ValueError):
out_tf = array_ops.depth_to_space(x_np, block_size)
out_tf.eval()
def testBlockSizeLargerThanInput(self):
# The block size is too large for this input.
x_np = [[[[1], [2]],
[[3], [4]]]]
block_size = 10
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeNotDivisibleDepth(self):
# The depth is not divisible by the square of the block size.
x_np = [[[[1, 1, 1, 1],
[2, 2, 2, 2]],
[[3, 3, 3, 3],
[4, 4, 4, 4]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testUnknownShape(self):
t = array_ops.depth_to_space(array_ops.placeholder(dtypes.float32), block_size=4)
self.assertEqual(4, t.get_shape().ndims)
def depthToSpaceUsingTranspose(self, tensor, block_size, data_format):
block_size_sq = block_size * block_size
if data_format == "NHWC":
b, ih, iw, ic = tensor.shape.as_list()
assert ic % block_size_sq == 0, (ic, block_size_sq)
ow, oh, oc = iw * block_size, ih * block_size, ic // block_size_sq
tensor = array_ops.reshape(tensor,
[b, ih, iw, block_size, block_size, oc])
tensor = array_ops.transpose(tensor, [0, 1, 3, 2, 4, 5])
tensor = array_ops.reshape(tensor, [b, oh, ow, oc])
elif data_format == "NCHW":
b, ic, ih, iw = tensor.shape.as_list()
assert ic % block_size_sq == 0, (ic, block_size_sq)
ow, oh, oc = iw * block_size, ih * block_size, ic // block_size_sq
tensor = array_ops.reshape(tensor,
[b, block_size, block_size, oc, ih, iw])
tensor = array_ops.transpose(tensor, [0, 3, 4, 1, 5, 2])
tensor = array_ops.reshape(tensor, [b, oc, oh, ow])
return tensor
def compareToTranspose(self, data_format, batch_size, in_height, in_width,
out_channels, block_size, use_gpu):
if use_gpu and not test.is_gpu_available():
print("gpu not available")
return
dtype = dtypes.float32
in_channels = out_channels * block_size * block_size
if data_format == "NHWC":
input_shape = [batch_size, in_height, in_width, in_channels]
elif data_format == "NCHW":
input_shape = [batch_size, in_channels, in_height, in_width]
else:
assert False, "unsupported format"
# Initialize the input tensor with ascending whole numbers.
total_size = 1
for dim_size in input_shape:
total_size *= dim_size
x = [f for f in range(total_size)]
inputs = constant_op.constant(x, shape=input_shape, dtype=dtype)
expected = self.depthToSpaceUsingTranspose(inputs, block_size, data_format)
actual = array_ops.depth_to_space(
inputs, block_size, data_format=data_format)
with self.test_session(use_gpu=use_gpu) as sess:
actual_vals, expected_vals = sess.run([actual, expected])
self.assertTrue(np.array_equal(actual_vals, expected_vals))
def testAgainstTranspose(self):
self.compareToTranspose("NHWC", 3, 2, 3, 1, 2, False)
self.compareToTranspose("NHWC", 3, 2, 3, 2, 2, False)
self.compareToTranspose("NHWC", 3, 2, 3, 1, 2, True)
self.compareToTranspose("NHWC", 3, 2, 3, 2, 2, True)
self.compareToTranspose("NCHW", 3, 2, 3, 1, 2, True)
self.compareToTranspose("NCHW", 3, 2, 3, 2, 2, True)
self.compareToTranspose("NCHW", 3, 2, 3, 1, 3, True)
self.compareToTranspose("NCHW", 3, 2, 3, 2, 3, True)
self.compareToTranspose("NCHW", 5, 7, 11, 3, 2, True)
self.compareToTranspose("NCHW", 3, 200, 300, 32, 2, True)
class DepthToSpaceGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_size):
assert 4 == x.ndim
with self.test_session(use_gpu=True):
tf_x = ops.convert_to_tensor(x)
tf_y = array_ops.depth_to_space(tf_x, block_size)
epsilon = 1e-2
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for depth_to_space of x which is a four dimensional
# tensor of shape [b, h, w, d * block_size * block_size].
def _compare(self, b, h, w, d, block_size):
block_size_sq = block_size * block_size
x = np.random.normal(
0, 1, b * h * w * d * block_size_sq).astype(np.float32).reshape(
[b, h, w, d * block_size_sq])
self._checkGrad(x, block_size)
# Don't use very large numbers as dimensions here, as the result is tensor
# with cartesian product of the dimensions.
def testSmall(self):
block_size = 2
self._compare(3, 2, 5, 3, block_size)
def testSmall2(self):
block_size = 3
self._compare(1, 2, 3, 2, block_size)
if __name__ == "__main__":
test.main()
|
|
import pygame
import time
import socket
# Define some colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
sendData = True
TCP_IP = "192.168.0.10"
TCP_PORT = 5005
# This is a simple class that will help us print to the screen
# It has nothing to do with the joysticks, just outputing the
# information.
class TextPrint:
def __init__(self):
self.reset()
self.font = pygame.font.Font(None, 16)
def printf(self, screen, textString):
textBitmap = self.font.render(textString, True, BLACK)
screen.blit(textBitmap, [self.x, self.y])
self.y += self.line_height
def reset(self):
self.x = 10
self.y = 10
self.line_height = 15
def indent(self):
self.x += 10
def unindent(self):
self.x -= 10
pygame.init()
# Set the width and height of the screen [width,height]
size = [500, 700]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Joystick Testing")
#Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# Initialize the joysticks
pygame.joystick.init()
# Get ready to print
textPrint = TextPrint()
# Setup socket
if sendData:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
# -------- Main Program Loop -----------
while done==False:
# EVENT PROCESSING STEP
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done=True # Flag that we are done so we exit this loop
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
#if event.type == pygame.JOYBUTTONDOWN:
# print ("Joystick button pressed.")
#if event.type == pygame.JOYBUTTONUP:
# print ("Joystick button released.")
# DRAWING STEP
# First, clear the screen to white. Don't put other drawing commands
# above this, or they will be erased with this command.
screen.fill(WHITE)
textPrint.reset()
# Get count of joysticks
joystick_count = pygame.joystick.get_count()
textPrint.printf(screen, "Number of joysticks: {}".format(joystick_count) )
textPrint.indent()
# Usable count of joysticks for for loop
count = 0
randomVariable = 0
delimeters = 11
axesData = ""
comma = True
# For each joystick:
for i in range(joystick_count):
# Increase joystick number
count += 1
joystick = pygame.joystick.Joystick(i)
joystick.init()
textPrint.printf(screen, "Joystick {}".format(i) )
textPrint.indent()
# Get the name from the OS for the controller/joystick
name = joystick.get_name()
textPrint.printf(screen, "Joystick name: {}".format(name) )
joystickString = "Joystick Name: " + str(name)
# Usually axis run in pairs, up/down for one, and left/right for
# the other.
axes = joystick.get_numaxes()
textPrint.printf(screen, "Number of axes: {}".format(axes) )
axesCountString = "Number of Axes: " + str(axes)
textPrint.indent()
#axesString = "Axes Values:\n\t" UNCOMMENT THIS FOR SENDING
axesString = "<"
#axesString += str(i) + ","
count2 = 0
#for i in range( axes ): UNCOMMENT THIS LATER
# count2 += 1
# axis = joystick.get_axis( i )
# textPrint.printf(screen, "Axis {} value: {:>6.3f}".format(i, axis) )
# axesString += "\tAxis " + str(count2) + ": " + str(round(axis, 4))
listAxes1 = [0,1,2,4,5]
listAxes2 = [1,2,5]
listButtons2 = [0,1,4,5]
if i == 1:
for j in listAxes1: #change range sometime
axis = joystick.get_axis(j)
axesData += str(i) + "~" + str(round(axis, 4)) #changed from axesString
textPrint.printf(screen, "Axis {} value: {:>6.3f}".format(j, axis) )
if randomVariable < delimeters: #number of entries - 1 for < number
axesData += ","
randomVariable += 1
else:
for j in listAxes2:
axis = joystick.get_axis(j)
axesData += str(i) + "~" + str(round(axis, 4))
textPrint.printf(screen, "Axis {} value: {:>6.3f}".format(j, axis) )
if randomVariable < delimeters:
axesData += ","
randomVariable += 1
for j in listButtons2:
button = joystick.get_button(j)
axesData += str(i) + "~" + str(button)
if randomVariable < delimeters:
axesData += ","
randomVariable += 1
textPrint.printf(screen, "Button {:>2} value: {}".format(i,button) )
#axesString += ">"
textPrint.unindent()
buttons = joystick.get_numbuttons()
textPrint.printf(screen, "Number of buttons: {}".format(buttons) )
buttonCountString = "Number of Buttons: " + str(buttons)
textPrint.indent()
buttonString = "Button Values:\n\t"
buttonOptimized = ""
#for i in range( buttons ):
# button = joystick.get_button( i )
# textPrint.printf(screen, "Button {:>2} value: {}".format(i,button) )
# buttonString += "\t" + str(button)
# buttonOptimized = str(button)
#textPrint.unindent()
# Hat switch. All or nothing for direction, not like joysticks.
# Value comes back in an array.
hats = joystick.get_numhats()
textPrint.printf(screen, "Number of hats: {}".format(hats) )
hatCountString = "Number of Hats: " + str(hats)
textPrint.indent()
hatString = "Hat Values:\n\t"
count3 = 0
for i in range( hats ):
count3 += 1
hat = joystick.get_hat( i )
textPrint.printf(screen, "Hat {} value: {}".format(i, str(hat)) )
hatString += "\tHat " + str(count3) + ": " + str(hat)
textPrint.unindent()
textPrint.unindent()
#if comma:
# axesData += ","
# comma = False
# s.send("Joystick " + str(count) + ":\n\t" + joystickString + "\n\n\t" + axesCountString + "\n\t" + axesString + "\n\n\t" + buttonCountString + "\n\t" + buttonString + "\n\n\t" + hatCountString + "\n\t" + hatString + "\n\n\n")
# s.send(buttonOptimized)
#s.send(axesString)
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
#axesData += ">"
print axesData + "\n"
if sendData:
s.send(axesData)
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Limit to 10 frames per second
clock.tick(50)
# l = s.recv(1024)
# print l
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.quit()
|
|
# -*- coding: utf-8 -*-
"""Test cluster store."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import os
import os.path as op
import numpy as np
from numpy.testing import assert_array_equal as ae
from numpy.testing import assert_allclose as ac
from ...utils._types import Bunch
from ...utils.array import _spikes_per_cluster
from ..store import (MemoryStore,
DiskStore,
ClusterStore,
VariableSizeItem,
FixedSizeItem,
)
#------------------------------------------------------------------------------
# Test data stores
#------------------------------------------------------------------------------
def test_memory_store():
ms = MemoryStore()
assert ms.load(2) == {}
assert ms.load(3).get('key', None) is None
assert ms.load(3) == {}
assert ms.load(3, ['key']) == {'key': None}
assert ms.load(3) == {}
assert ms.cluster_ids == []
ms.store(3, key='a')
assert ms.load(3) == {'key': 'a'}
assert ms.load(3, ['key']) == {'key': 'a'}
assert ms.load(3, 'key') == 'a'
assert ms.cluster_ids == [3]
ms.store(3, key_bis='b')
assert ms.load(3) == {'key': 'a', 'key_bis': 'b'}
assert ms.load(3, ['key']) == {'key': 'a'}
assert ms.load(3, ['key_bis']) == {'key_bis': 'b'}
assert ms.load(3, ['key', 'key_bis']) == {'key': 'a', 'key_bis': 'b'}
assert ms.load(3, 'key_bis') == 'b'
assert ms.cluster_ids == [3]
ms.erase([2, 3])
assert ms.load(3) == {}
assert ms.load(3, ['key']) == {'key': None}
assert ms.cluster_ids == []
def test_disk_store(tempdir):
dtype = np.float32
sha = (2, 4)
shb = (3, 5)
a = np.random.rand(*sha).astype(dtype)
b = np.random.rand(*shb).astype(dtype)
def _assert_equal(d_0, d_1):
"""Test the equality of two dictionaries containing NumPy arrays."""
assert sorted(d_0.keys()) == sorted(d_1.keys())
for key in d_0.keys():
ac(d_0[key], d_1[key])
ds = DiskStore(tempdir)
ds.register_file_extensions(['key', 'key_bis'])
assert ds.cluster_ids == []
ds.store(3, key=a)
_assert_equal(ds.load(3,
['key'],
dtype=dtype,
shape=sha,
),
{'key': a})
loaded = ds.load(3, 'key', dtype=dtype, shape=sha)
ac(loaded, a)
# Loading a non-existing key returns None.
assert ds.load(3, 'key_bis') is None
assert ds.cluster_ids == [3]
ds.store(3, key_bis=b)
_assert_equal(ds.load(3, ['key'], dtype=dtype, shape=sha), {'key': a})
_assert_equal(ds.load(3, ['key_bis'],
dtype=dtype,
shape=shb,
),
{'key_bis': b})
_assert_equal(ds.load(3,
['key', 'key_bis'],
dtype=dtype,
),
{'key': a.ravel(), 'key_bis': b.ravel()})
ac(ds.load(3, 'key_bis', dtype=dtype, shape=shb), b)
assert ds.cluster_ids == [3]
ds.erase([2, 3])
assert ds.load(3, ['key']) == {'key': None}
assert ds.cluster_ids == []
# Test load/save file.
ds.save_file('test', {'a': a})
ds = DiskStore(tempdir)
data = ds.load_file('test')
ae(data['a'], a)
assert ds.load_file('test2') is None
def test_cluster_store_1(tempdir):
# We define some data and a model.
n_spikes = 100
n_clusters = 10
spike_ids = np.arange(n_spikes)
spike_clusters = np.random.randint(size=n_spikes,
low=0, high=n_clusters)
spikes_per_cluster = _spikes_per_cluster(spike_ids, spike_clusters)
model = {'spike_clusters': spike_clusters}
# We initialize the ClusterStore.
cs = ClusterStore(model=model,
path=tempdir,
spikes_per_cluster=spikes_per_cluster,
)
# We create a n_spikes item to be stored in memory,
# and we define how to generate it for a given cluster.
class MyItem(FixedSizeItem):
name = 'my item'
fields = ['n_spikes']
def store(self, cluster):
spikes = self.spikes_per_cluster[cluster]
self.memory_store.store(cluster, n_spikes=len(spikes))
def load(self, cluster, name):
return self.memory_store.load(cluster, name)
def on_cluster(self, up):
if up.description == 'merge':
n = sum(len(up.old_spikes_per_cluster[cl])
for cl in up.deleted)
self.memory_store.store(up.added[0], n_spikes=n)
else:
super(MyItem, self).on_cluster(up)
item = cs.register_item(MyItem)
item.progress_reporter.set_progress_message("Progress {progress}.\n")
item.progress_reporter.set_complete_message("Finished.\n")
# Now we generate the store.
cs.generate()
# We check that the n_spikes field has successfully been created.
for cluster in sorted(spikes_per_cluster):
assert cs.n_spikes(cluster) == len(spikes_per_cluster[cluster])
# Merge.
spc = spikes_per_cluster.copy()
spikes = np.sort(np.concatenate([spc[0], spc[1]]))
spc[20] = spikes
del spc[0]
del spc[1]
up = Bunch(description='merge',
added=[20],
deleted=[0, 1],
spike_ids=spikes,
new_spikes_per_cluster=spc,
old_spikes_per_cluster=spikes_per_cluster,)
cs.items['my item'].on_cluster(up)
# Check the list of clusters in the store.
ae(cs.memory_store.cluster_ids, list(range(0, n_clusters)) + [20])
ae(cs.disk_store.cluster_ids, [])
assert cs.n_spikes(20) == len(spikes)
# Recreate the cluster store.
cs = ClusterStore(model=model,
spikes_per_cluster=spikes_per_cluster,
path=tempdir,
)
cs.register_item(MyItem)
cs.generate()
ae(cs.memory_store.cluster_ids, list(range(n_clusters)))
ae(cs.disk_store.cluster_ids, [])
def test_cluster_store_multi():
"""This tests the cluster store when a store item has several fields."""
cs = ClusterStore(spikes_per_cluster={0: [0, 2], 1: [1, 3, 4]})
class MyItem(FixedSizeItem):
name = 'my item'
fields = ['d', 'm']
def store(self, cluster):
spikes = self.spikes_per_cluster[cluster]
self.memory_store.store(cluster, d=len(spikes), m=len(spikes) ** 2)
def load(self, cluster, name):
return self.memory_store.load(cluster, name)
cs.register_item(MyItem)
cs.generate()
assert cs.memory_store.load(0, ['d', 'm']) == {'d': 2, 'm': 4}
assert cs.d(0) == 2
assert cs.m(0) == 4
assert cs.memory_store.load(1, ['d', 'm']) == {'d': 3, 'm': 9}
assert cs.d(1) == 3
assert cs.m(1) == 9
def test_cluster_store_load(tempdir):
# We define some data and a model.
n_spikes = 100
n_clusters = 10
spike_ids = np.arange(n_spikes)
spike_clusters = np.random.randint(size=n_spikes,
low=0, high=n_clusters)
spikes_per_cluster = _spikes_per_cluster(spike_ids, spike_clusters)
model = {'spike_clusters': spike_clusters}
# We initialize the ClusterStore.
cs = ClusterStore(model=model,
spikes_per_cluster=spikes_per_cluster,
path=tempdir,
)
# We create a n_spikes item to be stored in memory,
# and we define how to generate it for a given cluster.
class MyItem(VariableSizeItem):
name = 'my item'
fields = ['spikes_square']
def store(self, cluster):
spikes = spikes_per_cluster[cluster]
data = (spikes ** 2).astype(np.int32)
self.disk_store.store(cluster, spikes_square=data)
def load(self, cluster, name):
return self.disk_store.load(cluster, name, np.int32)
def load_spikes(self, spikes, name):
return (spikes ** 2).astype(np.int32)
cs.register_item(MyItem)
cs.generate()
# All spikes in cluster 1.
cluster = 1
spikes = spikes_per_cluster[cluster]
ae(cs.load('spikes_square', clusters=[cluster]), spikes ** 2)
# Some spikes in several clusters.
clusters = [2, 3, 5]
spikes = np.concatenate([spikes_per_cluster[cl][::3]
for cl in clusters])
ae(cs.load('spikes_square', spikes=spikes), np.unique(spikes) ** 2)
# Empty selection.
assert len(cs.load('spikes_square', clusters=[])) == 0
assert len(cs.load('spikes_square', spikes=[])) == 0
def test_cluster_store_management(tempdir):
# We define some data and a model.
n_spikes = 100
n_clusters = 10
spike_ids = np.arange(n_spikes)
spike_clusters = np.random.randint(size=n_spikes,
low=0, high=n_clusters)
spikes_per_cluster = _spikes_per_cluster(spike_ids, spike_clusters)
model = Bunch({'spike_clusters': spike_clusters,
'cluster_ids': np.arange(n_clusters),
})
# We initialize the ClusterStore.
cs = ClusterStore(model=model,
spikes_per_cluster=spikes_per_cluster,
path=tempdir,
)
# We create a n_spikes item to be stored in memory,
# and we define how to generate it for a given cluster.
class MyItem(VariableSizeItem):
name = 'my item'
fields = ['spikes_square']
def store(self, cluster):
spikes = self.spikes_per_cluster[cluster]
if not self.is_consistent(cluster, spikes):
data = (spikes ** 2).astype(np.int32)
self.disk_store.store(cluster, spikes_square=data)
def is_consistent(self, cluster, spikes):
data = self.disk_store.load(cluster,
'spikes_square',
dtype=np.int32,
)
if data is None:
return False
if len(data) != len(spikes):
return False
expected = (spikes ** 2).astype(np.int32)
return np.all(data == expected)
cs.register_item(MyItem)
cs.update_spikes_per_cluster(spikes_per_cluster)
def _check_to_generate(cs, clusters):
item = cs.items['my item']
ae(item.to_generate(), clusters)
ae(item.to_generate(None), clusters)
ae(item.to_generate('default'), clusters)
ae(item.to_generate('force'), np.arange(n_clusters))
ae(item.to_generate('read-only'), [])
# Check the list of clusters to generate.
_check_to_generate(cs, np.arange(n_clusters))
# Generate the store.
cs.generate()
# Check the status.
assert 'True' in cs.status
# We re-initialize the ClusterStore.
cs = ClusterStore(model=model,
spikes_per_cluster=spikes_per_cluster,
path=tempdir,
)
cs.register_item(MyItem)
cs.update_spikes_per_cluster(spikes_per_cluster)
# Check the list of clusters to generate.
_check_to_generate(cs, [])
cs.display_status()
# We erase a file.
path = op.join(cs.path, '1.spikes_square')
os.remove(path)
# Check the list of clusters to generate.
_check_to_generate(cs, [1])
assert '9' in cs.status
assert 'False' in cs.status
cs.generate()
# Check the status.
assert 'True' in cs.status
# Now, we make new assignements.
spike_clusters = np.random.randint(size=n_spikes,
low=n_clusters, high=n_clusters + 5)
spikes_per_cluster = _spikes_per_cluster(spike_ids, spike_clusters)
cs.update_spikes_per_cluster(spikes_per_cluster)
# All files are now old and should be removed by clean().
assert not cs.is_consistent()
item = cs.items['my item']
ae(item.to_generate(), np.arange(n_clusters, n_clusters + 5))
ae(cs.cluster_ids, np.arange(n_clusters, n_clusters + 5))
ae(cs.old_clusters, np.arange(n_clusters))
cs.clean()
ae(cs.cluster_ids, np.arange(n_clusters, n_clusters + 5))
ae(cs.old_clusters, [])
ae(item.to_generate(), np.arange(n_clusters, n_clusters + 5))
assert not cs.is_consistent()
cs.generate()
assert cs.is_consistent()
ae(cs.cluster_ids, np.arange(n_clusters, n_clusters + 5))
ae(cs.old_clusters, [])
ae(item.to_generate(), [])
|
|
"""
Provides functionality to interact with lights.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/light/
"""
import logging
import os
import csv
from homeassistant.components import (
group, discovery, wemo, wink, isy994, zwave, insteon_hub, mysensors)
from homeassistant.config import load_yaml_config_file
from homeassistant.const import (
STATE_ON, SERVICE_TURN_ON, SERVICE_TURN_OFF, SERVICE_TOGGLE,
ATTR_ENTITY_ID)
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
import homeassistant.util as util
import homeassistant.util.color as color_util
DOMAIN = "light"
SCAN_INTERVAL = 30
GROUP_NAME_ALL_LIGHTS = 'all lights'
ENTITY_ID_ALL_LIGHTS = group.ENTITY_ID_FORMAT.format('all_lights')
ENTITY_ID_FORMAT = DOMAIN + ".{}"
# Integer that represents transition time in seconds to make change.
ATTR_TRANSITION = "transition"
# Lists holding color values
ATTR_RGB_COLOR = "rgb_color"
ATTR_XY_COLOR = "xy_color"
ATTR_COLOR_TEMP = "color_temp"
# int with value 0 .. 255 representing brightness of the light.
ATTR_BRIGHTNESS = "brightness"
# String representing a profile (built-in ones or external defined).
ATTR_PROFILE = "profile"
# If the light should flash, can be FLASH_SHORT or FLASH_LONG.
ATTR_FLASH = "flash"
FLASH_SHORT = "short"
FLASH_LONG = "long"
# Apply an effect to the light, can be EFFECT_COLORLOOP.
ATTR_EFFECT = "effect"
EFFECT_COLORLOOP = "colorloop"
EFFECT_RANDOM = "random"
EFFECT_WHITE = "white"
LIGHT_PROFILES_FILE = "light_profiles.csv"
# Maps discovered services to their platforms.
DISCOVERY_PLATFORMS = {
wemo.DISCOVER_LIGHTS: 'wemo',
wink.DISCOVER_LIGHTS: 'wink',
insteon_hub.DISCOVER_LIGHTS: 'insteon_hub',
isy994.DISCOVER_LIGHTS: 'isy994',
discovery.SERVICE_HUE: 'hue',
zwave.DISCOVER_LIGHTS: 'zwave',
mysensors.DISCOVER_LIGHTS: 'mysensors',
}
PROP_TO_ATTR = {
'brightness': ATTR_BRIGHTNESS,
'color_temp': ATTR_COLOR_TEMP,
'rgb_color': ATTR_RGB_COLOR,
'xy_color': ATTR_XY_COLOR,
}
_LOGGER = logging.getLogger(__name__)
def is_on(hass, entity_id=None):
"""Return if the lights are on based on the statemachine."""
entity_id = entity_id or ENTITY_ID_ALL_LIGHTS
return hass.states.is_state(entity_id, STATE_ON)
# pylint: disable=too-many-arguments
def turn_on(hass, entity_id=None, transition=None, brightness=None,
rgb_color=None, xy_color=None, color_temp=None, profile=None,
flash=None, effect=None):
"""Turn all or specified light on."""
data = {
key: value for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_PROFILE, profile),
(ATTR_TRANSITION, transition),
(ATTR_BRIGHTNESS, brightness),
(ATTR_RGB_COLOR, rgb_color),
(ATTR_XY_COLOR, xy_color),
(ATTR_COLOR_TEMP, color_temp),
(ATTR_FLASH, flash),
(ATTR_EFFECT, effect),
] if value is not None
}
hass.services.call(DOMAIN, SERVICE_TURN_ON, data)
def turn_off(hass, entity_id=None, transition=None):
"""Turn all or specified light off."""
data = {
key: value for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_TRANSITION, transition),
] if value is not None
}
hass.services.call(DOMAIN, SERVICE_TURN_OFF, data)
def toggle(hass, entity_id=None, transition=None):
"""Toggle all or specified light."""
data = {
key: value for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_TRANSITION, transition),
] if value is not None
}
hass.services.call(DOMAIN, SERVICE_TOGGLE, data)
# pylint: disable=too-many-branches, too-many-locals, too-many-statements
def setup(hass, config):
"""Expose light control via statemachine and services."""
component = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, DISCOVERY_PLATFORMS,
GROUP_NAME_ALL_LIGHTS)
component.setup(config)
# Load built-in profiles and custom profiles
profile_paths = [os.path.join(os.path.dirname(__file__),
LIGHT_PROFILES_FILE),
hass.config.path(LIGHT_PROFILES_FILE)]
profiles = {}
for profile_path in profile_paths:
if not os.path.isfile(profile_path):
continue
with open(profile_path) as inp:
reader = csv.reader(inp)
# Skip the header
next(reader, None)
try:
for profile_id, color_x, color_y, brightness in reader:
profiles[profile_id] = (float(color_x), float(color_y),
int(brightness))
except ValueError:
# ValueError if not 4 values per row
# ValueError if convert to float/int failed
_LOGGER.error(
"Error parsing light profiles from %s", profile_path)
return False
def handle_light_service(service):
"""Hande a turn light on or off service call."""
# Get and validate data
dat = service.data
# Convert the entity ids to valid light ids
target_lights = component.extract_from_service(service)
params = {}
transition = util.convert(dat.get(ATTR_TRANSITION), int)
if transition is not None:
params[ATTR_TRANSITION] = transition
service_fun = None
if service.service == SERVICE_TURN_OFF:
service_fun = 'turn_off'
elif service.service == SERVICE_TOGGLE:
service_fun = 'toggle'
if service_fun:
for light in target_lights:
getattr(light, service_fun)(**params)
for light in target_lights:
if light.should_poll:
light.update_ha_state(True)
return
# Processing extra data for turn light on request.
# We process the profile first so that we get the desired
# behavior that extra service data attributes overwrite
# profile values.
profile = profiles.get(dat.get(ATTR_PROFILE))
if profile:
*params[ATTR_XY_COLOR], params[ATTR_BRIGHTNESS] = profile
if ATTR_BRIGHTNESS in dat:
# We pass in the old value as the default parameter if parsing
# of the new one goes wrong.
params[ATTR_BRIGHTNESS] = util.convert(
dat.get(ATTR_BRIGHTNESS), int, params.get(ATTR_BRIGHTNESS))
if ATTR_XY_COLOR in dat:
try:
# xy_color should be a list containing 2 floats.
xycolor = dat.get(ATTR_XY_COLOR)
# Without this check, a xycolor with value '99' would work.
if not isinstance(xycolor, str):
params[ATTR_XY_COLOR] = [float(val) for val in xycolor]
except (TypeError, ValueError):
# TypeError if xy_color is not iterable
# ValueError if value could not be converted to float
pass
if ATTR_COLOR_TEMP in dat:
# color_temp should be an int of mirads value
colortemp = dat.get(ATTR_COLOR_TEMP)
# Without this check, a ctcolor with value '99' would work
# These values are based on Philips Hue, may need ajustment later
if isinstance(colortemp, int) and 154 <= colortemp <= 500:
params[ATTR_COLOR_TEMP] = colortemp
if ATTR_RGB_COLOR in dat:
try:
# rgb_color should be a list containing 3 ints
rgb_color = dat.get(ATTR_RGB_COLOR)
if len(rgb_color) == 3:
params[ATTR_RGB_COLOR] = [int(val) for val in rgb_color]
except (TypeError, ValueError):
# TypeError if rgb_color is not iterable
# ValueError if not all values can be converted to int
pass
if dat.get(ATTR_FLASH) in (FLASH_SHORT, FLASH_LONG):
params[ATTR_FLASH] = dat[ATTR_FLASH]
if dat.get(ATTR_EFFECT) in (EFFECT_COLORLOOP, EFFECT_WHITE,
EFFECT_RANDOM):
params[ATTR_EFFECT] = dat[ATTR_EFFECT]
for light in target_lights:
light.turn_on(**params)
for light in target_lights:
if light.should_poll:
light.update_ha_state(True)
# Listen for light on and light off service calls.
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
hass.services.register(DOMAIN, SERVICE_TURN_ON, handle_light_service,
descriptions.get(SERVICE_TURN_ON))
hass.services.register(DOMAIN, SERVICE_TURN_OFF, handle_light_service,
descriptions.get(SERVICE_TURN_OFF))
hass.services.register(DOMAIN, SERVICE_TOGGLE, handle_light_service,
descriptions.get(SERVICE_TOGGLE))
return True
class Light(ToggleEntity):
"""Representation of a light."""
# pylint: disable=no-self-use
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return None
@property
def xy_color(self):
"""Return the XY color value [float, float]."""
return None
@property
def rgb_color(self):
"""Return the RGB color value [int, int, int]."""
return None
@property
def color_temp(self):
"""Return the CT color value in mirads."""
return None
@property
def state_attributes(self):
"""Return optional state attributes."""
data = {}
if self.is_on:
for prop, attr in PROP_TO_ATTR.items():
value = getattr(self, prop)
if value:
data[attr] = value
if ATTR_RGB_COLOR not in data and ATTR_XY_COLOR in data and \
ATTR_BRIGHTNESS in data:
data[ATTR_RGB_COLOR] = color_util.color_xy_brightness_to_RGB(
data[ATTR_XY_COLOR][0], data[ATTR_XY_COLOR][1],
data[ATTR_BRIGHTNESS])
return data
|
|
import xarray as xr
import numpy as np
from numpy.testing import assert_allclose
from xyzpy.gen.case_runner import (
case_runner,
case_runner_to_ds,
case_runner_to_df,
find_missing_cases,
)
from . import (
foo3_scalar,
foo3_float_bool,
foo2_array,
foo2_array_array,
foo2_zarray1_zarray2,
)
# --------------------------------------------------------------------------- #
# CASE_RUNNER tests #
# --------------------------------------------------------------------------- #
class TestCaseRunner:
def test_seq(self):
cases = ((1, 10, 100),
(2, 20, 200),
(3, 30, 300))
xs = case_runner(foo3_scalar, ('a', 'b', 'c'), cases, verbosity=0)
assert xs == (111, 222, 333)
def test_progbar(self):
cases = ((1, 10, 100),
(2, 20, 200),
(3, 30, 300))
xs = case_runner(foo3_scalar, ('a', 'b', 'c'), cases, verbosity=2)
assert xs == (111, 222, 333)
def test_constants(self):
cases = ((1,),
(2,),
(3,))
xs = case_runner(foo3_scalar, ('a', 'b', 'c'), cases,
constants={'b': 10, 'c': 100})
assert xs == (111, 112, 113)
def test_parallel(self):
cases = ((1, 10, 100),
(2, 20, 200),
(3, 30, 300))
xs = case_runner(foo3_scalar, ('a', 'b', 'c'), cases, num_workers=1)
assert xs == (111, 222, 333)
def test_split(self):
cases = ((1, 10, 100),
(2, 20, 200),
(3, 30, 300))
a, b = case_runner(foo3_float_bool, ('a', 'b', 'c'), cases, split=True)
assert a == (111, 222, 333)
assert b == (False, True, False)
def test_single_args(self):
cases = (1, 2, 3)
xs = case_runner(foo3_scalar, 'a', cases,
constants={'b': 10, 'c': 100})
assert xs == (111, 112, 113)
class TestCaseRunnerToDS:
def test_single(self):
cases = [(1, 20, 300),
(3, 20, 100)]
ds = case_runner_to_ds(foo3_scalar, ['a', 'b', 'c'], cases=cases,
var_names='sum')
assert_allclose(ds['a'].data, [1, 3])
assert_allclose(ds['b'].data, [20])
assert_allclose(ds['c'].data, [100, 300])
assert ds['sum'].loc[{'a': 1, 'b': 20, 'c': 300}].data == 321
assert ds['sum'].loc[{'a': 3, 'b': 20, 'c': 100}].data == 123
assert ds['sum'].loc[{'a': 1, 'b': 20, 'c': 100}].isnull()
assert ds['sum'].loc[{'a': 3, 'b': 20, 'c': 300}].isnull()
def test_single_dict_cases(self):
cases = [{'a': 1, 'b': 20, 'c': 300}, {'a': 3, 'b': 20, 'c': 100}]
ds = case_runner_to_ds(foo3_scalar, None, cases=cases, var_names='sum')
assert_allclose(ds['a'].data, [1, 3])
assert_allclose(ds['b'].data, [20])
assert_allclose(ds['c'].data, [100, 300])
assert ds['sum'].loc[{'a': 1, 'b': 20, 'c': 300}].data == 321
assert ds['sum'].loc[{'a': 3, 'b': 20, 'c': 100}].data == 123
assert ds['sum'].loc[{'a': 1, 'b': 20, 'c': 100}].isnull()
assert ds['sum'].loc[{'a': 3, 'b': 20, 'c': 300}].isnull()
def test_multires(self):
cases = [(1, 20, 300),
(3, 20, 100)]
ds = case_runner_to_ds(foo3_float_bool,
fn_args=['a', 'b', 'c'],
cases=cases,
var_names=['sum', 'a_even'])
assert_allclose(ds['a'].data, [1, 3])
assert_allclose(ds['b'].data, [20])
assert_allclose(ds['c'].data, [100, 300])
assert ds['sum'].loc[{'a': 1, 'b': 20, 'c': 300}].data == 321
assert ds['sum'].loc[{'a': 3, 'b': 20, 'c': 100}].data == 123
assert ds['sum'].loc[{'a': 1, 'b': 20, 'c': 100}].isnull()
assert ds['sum'].loc[{'a': 3, 'b': 20, 'c': 300}].isnull()
assert ds['a_even'].data.dtype == object
assert bool(ds['a_even'].sel(a=1, b=20, c=300).data) is False
assert bool(ds['a_even'].sel(a=3, b=20, c=100).data) is False
assert ds['a_even'].loc[{'a': 1, 'b': 20, 'c': 100}].isnull()
assert ds['a_even'].loc[{'a': 3, 'b': 20, 'c': 300}].isnull()
def test_array_return(self):
ds = case_runner_to_ds(fn=foo2_array, fn_args=['a', 'b'],
cases=[(2, 30), (4, 50)],
var_names='x',
var_dims=['time'],
var_coords={'time': np.arange(10) / 10})
assert ds.x.data.dtype == float
assert ds.x.sel(a=2, b=50, time=0.7).isnull()
assert ds.x.sel(a=4, b=50, time=0.3).data == 54.3
def test_multi_array_return(self):
ds = case_runner_to_ds(fn=foo2_array_array, fn_args=['a', 'b'],
cases=[(2, 30), (4, 50)],
var_names=['x', 'y'],
var_dims={('x', 'y'): 'time'},
var_coords={'time': ['a', 'b', 'c', 'd', 'e']})
assert ds['time'].data.dtype != object
assert_allclose(ds['x'].sel(a=4, b=50).data,
[50, 54, 58, 62, 66])
assert_allclose(ds['y'].sel(a=4, b=50).data,
[50, 46, 42, 38, 34])
def test_align_and_fillna_int(self):
ds1 = case_runner_to_ds(foo2_array_array, fn_args=['a', 'b'],
cases=[(1, 10), (2, 20)],
var_names=['x', 'y'],
var_dims={('x', 'y'): 'time'},
var_coords={'time':
['a', 'b', 'c', 'd', 'e']})
ds2 = case_runner_to_ds(foo2_array_array, fn_args=['a', 'b'],
cases=[(2, 10), (1, 20)],
var_names=['x', 'y'],
var_dims={('x', 'y'): 'time'},
var_coords={'time':
['a', 'b', 'c', 'd', 'e']})
assert not np.logical_not(ds1['x'].isnull()).all()
assert not np.logical_not(ds1['y'].isnull()).all()
assert not np.logical_not(ds2['x'].isnull()).all()
assert not np.logical_not(ds2['y'].isnull()).all()
ds1, ds2 = xr.align(ds1, ds2, join='outer')
fds = ds1.fillna(ds2)
assert np.logical_not(fds['x'].isnull()).all()
assert np.logical_not(fds['y'].isnull()).all()
def test_align_and_fillna_complex(self):
ds1 = case_runner_to_ds(foo2_zarray1_zarray2, fn_args=['a', 'b'],
cases=[(1j, 10), (2j, 20)],
var_names=['x', 'y'],
var_dims={('x', 'y'): 'time'},
var_coords={'time':
['a', 'b', 'c', 'd', 'e']})
ds2 = case_runner_to_ds(foo2_zarray1_zarray2, fn_args=['a', 'b'],
cases=[(2j, 10), (1j, 20)],
var_names=['x', 'y'],
var_dims={('x', 'y'): 'time'},
var_coords={'time':
['a', 'b', 'c', 'd', 'e']})
assert not np.logical_not(np.isnan(ds1['x'].data)).all()
assert not np.logical_not(np.isnan(ds1['y'].data)).all()
assert not np.logical_not(np.isnan(ds2['x'].data)).all()
assert not np.logical_not(np.isnan(ds2['y'].data)).all()
assert all(t == complex for t in (ds1.x.dtype, ds2.x.dtype,
ds1.y.dtype, ds2.y.dtype))
assert ds1.y.dtype == complex
assert ds2.y.dtype == complex
ds1, ds2 = xr.align(ds1, ds2, join='outer')
fds = ds1.fillna(ds2)
assert np.logical_not(np.isnan(fds['x'].data)).all()
assert np.logical_not(np.isnan(fds['y'].data)).all()
class TestCaseRunnerToDF:
def test_single_arg(self):
df = case_runner_to_df(
fn=foo3_scalar,
fn_args=['a', 'b', 'c'],
cases=[(1, 10, 100), (2, 20, 200)],
var_names='x',
)
assert len(df) == 2
assert df.columns.tolist() == ['a', 'b', 'c', 'x']
assert 'int' in df.x.dtype.name
# --------------------------------------------------------------------------- #
# Finding and filling missing data #
# --------------------------------------------------------------------------- #
class TestFindMissingCases:
def test_simple(self):
ds = xr.Dataset(coords={'a': [1, 2, 3], 'b': [40, 50]})
ds['x'] = (('a', 'b'), np.array([[0.1, np.nan],
[np.nan, 0.2],
[np.nan, np.nan]]))
# Target cases and settings
t_cases = ((1, 50), (2, 40), (3, 40), (3, 50))
t_configs = tuple(dict(zip(['a', 'b'], t_case)) for t_case in t_cases)
# Missing cases and settings
m_args, m_cases = find_missing_cases(ds)
m_configs = tuple(dict(zip(m_args, m_case)) for m_case in m_cases)
# Assert same set of coordinates
assert all(t_config in m_configs for t_config in t_configs)
assert all(m_config in t_configs for m_config in m_configs)
def test_multires(self):
ds = xr.Dataset(coords={'a': [1, 2, 3], 'b': [40, 50]})
ds['x'] = (('a', 'b'), np.array([[0.1, np.nan],
[np.nan, 0.2],
[np.nan, np.nan]]))
ds['y'] = (('a', 'b'), np.array([['a', None],
[None, 'b'],
[None, None]]))
# Target cases and settings
t_cases = ((1, 50), (2, 40), (3, 40), (3, 50))
t_configs = tuple(dict(zip(['a', 'b'], t_case)) for t_case in t_cases)
# Missing cases and settings
m_args, m_cases = find_missing_cases(ds)
m_configs = tuple(dict(zip(m_args, m_case)) for m_case in m_cases)
# Assert same set of coordinates
assert set(m_args) == {'a', 'b'}
assert all(t_config in m_configs for t_config in t_configs)
assert all(m_config in t_configs for m_config in m_configs)
def test_var_dims_leave(self):
ds = xr.Dataset(coords={'a': [1, 2, 3], 'b': [40, 50],
't': [0.1, 0.2, 0.3]})
ds['x'] = (('a', 'b'), np.array([[0.1, np.nan],
[np.nan, 0.2],
[np.nan, np.nan]]))
ds['y'] = (('a', 'b', 't'), np.array([[[0.2] * 3, [np.nan] * 3],
[[np.nan] * 3, [0.4] * 3],
[[np.nan] * 3, [np.nan] * 3]]))
# Target cases and settings
t_cases = ((1, 50), (2, 40), (3, 40), (3, 50))
t_configs = tuple(dict(zip(['a', 'b'], t_case)) for t_case in t_cases)
# Missing cases and settings
m_args, m_cases = find_missing_cases(ds, ignore_dims='t')
m_configs = tuple(dict(zip(m_args, m_case)) for m_case in m_cases)
# Assert same set of coordinates
assert set(m_args) == {'a', 'b'}
assert all(t_config in m_configs for t_config in t_configs)
assert all(m_config in t_configs for m_config in m_configs)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import fixtures
import netaddr
import sys
import traceback
from nova.compute import manager
from nova import exception
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import test
from nova.tests import fake_libvirt_utils
from nova.tests.image import fake as fake_image
from nova.tests import utils as test_utils
from nova.virt import event as virtevent
from nova.virt import fake
LOG = logging.getLogger(__name__)
def catch_notimplementederror(f):
"""Decorator to simplify catching drivers raising NotImplementedError
If a particular call makes a driver raise NotImplementedError, we
log it so that we can extract this information afterwards to
automatically generate a hypervisor/feature support matrix."""
def wrapped_func(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except NotImplementedError:
frame = traceback.extract_tb(sys.exc_info()[2])[-1]
LOG.error('%(driver)s does not implement %(method)s' % {
'driver': type(self.connection),
'method': frame[2]})
wrapped_func.__name__ = f.__name__
wrapped_func.__doc__ = f.__doc__
return wrapped_func
class _FakeDriverBackendTestCase(object):
def _setup_fakelibvirt(self):
# So that the _supports_direct_io does the test based
# on the current working directory, instead of the
# default instances_path which doesn't exist
self.flags(instances_path='')
# Put fakelibvirt in place
if 'libvirt' in sys.modules:
self.saved_libvirt = sys.modules['libvirt']
else:
self.saved_libvirt = None
import nova.tests.fake_imagebackend as fake_imagebackend
import nova.tests.fake_libvirt_utils as fake_libvirt_utils
import nova.tests.fakelibvirt as fakelibvirt
sys.modules['libvirt'] = fakelibvirt
import nova.virt.libvirt.driver
import nova.virt.libvirt.firewall
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.imagebackend',
fake_imagebackend))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.firewall.libvirt',
fakelibvirt))
self.flags(rescue_image_id="2",
rescue_kernel_id="3",
rescue_ramdisk_id=None,
libvirt_snapshots_directory='./')
def fake_extend(image, size):
pass
def fake_migrateToURI(*a):
pass
def fake_make_drive(_self, _path):
pass
def fake_get_instance_disk_info(_self, instance, xml=None):
return '[]'
self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(nova.virt.libvirt.driver.disk,
'extend', fake_extend)
# Like the existing fakelibvirt.migrateToURI, do nothing,
# but don't fail for these tests.
self.stubs.Set(nova.virt.libvirt.driver.libvirt.Domain,
'migrateToURI', fake_migrateToURI)
# We can't actually make a config drive v2 because ensure_tree has
# been faked out
self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder,
'make_drive', fake_make_drive)
def _teardown_fakelibvirt(self):
# Restore libvirt
if self.saved_libvirt:
sys.modules['libvirt'] = self.saved_libvirt
def setUp(self):
super(_FakeDriverBackendTestCase, self).setUp()
# TODO(sdague): it would be nice to do this in a way that only
# the relevant backends where replaced for tests, though this
# should not harm anything by doing it for all backends
fake_image.stub_out_image_service(self.stubs)
self._setup_fakelibvirt()
def tearDown(self):
fake_image.FakeImageService_reset()
self._teardown_fakelibvirt()
super(_FakeDriverBackendTestCase, self).tearDown()
class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
"""Test that ComputeManager can successfully load both
old style and new style drivers and end up with the correct
final class"""
# if your driver supports being tested in a fake way, it can go here
#
# both long form and short form drivers are supported
new_drivers = {
'nova.virt.fake.FakeDriver': 'FakeDriver',
'nova.virt.libvirt.LibvirtDriver': 'LibvirtDriver',
'fake.FakeDriver': 'FakeDriver',
'libvirt.LibvirtDriver': 'LibvirtDriver'
}
def test_load_new_drivers(self):
for cls, driver in self.new_drivers.iteritems():
self.flags(compute_driver=cls)
# NOTE(sdague) the try block is to make it easier to debug a
# failure by knowing which driver broke
try:
cm = manager.ComputeManager()
except Exception as e:
self.fail("Couldn't load driver %s - %s" % (cls, e))
self.assertEqual(cm.driver.__class__.__name__, driver,
"Could't load driver %s" % cls)
def test_fail_to_load_new_drivers(self):
self.flags(compute_driver='nova.virt.amiga')
def _fake_exit(error):
raise test.TestingException()
self.stubs.Set(sys, 'exit', _fake_exit)
self.assertRaises(test.TestingException, manager.ComputeManager)
class _VirtDriverTestCase(_FakeDriverBackendTestCase):
def setUp(self):
super(_VirtDriverTestCase, self).setUp()
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
self.connection = importutils.import_object(self.driver_module,
fake.FakeVirtAPI())
self.ctxt = test_utils.get_test_admin_context()
self.image_service = fake_image.FakeImageService()
def _get_running_instance(self):
instance_ref = test_utils.get_test_instance()
network_info = test_utils.get_test_network_info()
image_info = test_utils.get_test_image_info(None, instance_ref)
self.connection.spawn(self.ctxt, instance_ref, image_info,
[], 'herp', network_info=network_info)
return instance_ref, network_info
@catch_notimplementederror
def test_init_host(self):
self.connection.init_host('myhostname')
@catch_notimplementederror
def test_list_instances(self):
self.connection.list_instances()
@catch_notimplementederror
def test_spawn(self):
instance_ref, network_info = self._get_running_instance()
domains = self.connection.list_instances()
self.assertIn(instance_ref['name'], domains)
num_instances = self.connection.get_num_instances()
self.assertEqual(1, num_instances)
@catch_notimplementederror
def test_snapshot_not_running(self):
instance_ref = test_utils.get_test_instance()
img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
self.assertRaises(exception.InstanceNotRunning,
self.connection.snapshot,
self.ctxt, instance_ref, img_ref['id'],
lambda *args, **kwargs: None)
@catch_notimplementederror
def test_snapshot_running(self):
img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
instance_ref, network_info = self._get_running_instance()
self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'],
lambda *args, **kwargs: None)
@catch_notimplementederror
def test_reboot(self):
reboot_type = "SOFT"
instance_ref, network_info = self._get_running_instance()
self.connection.reboot(self.ctxt, instance_ref, network_info,
reboot_type)
@catch_notimplementederror
def test_get_host_ip_addr(self):
host_ip = self.connection.get_host_ip_addr()
# Will raise an exception if it's not a valid IP at all
ip = netaddr.IPAddress(host_ip)
# For now, assume IPv4.
self.assertEquals(ip.version, 4)
@catch_notimplementederror
def test_set_admin_password(self):
instance_ref, network_info = self._get_running_instance()
self.connection.set_admin_password(instance_ref, 'p4ssw0rd')
@catch_notimplementederror
def test_inject_file(self):
instance_ref, network_info = self._get_running_instance()
self.connection.inject_file(instance_ref,
base64.b64encode('/testfile'),
base64.b64encode('testcontents'))
@catch_notimplementederror
def test_resume_state_on_host_boot(self):
instance_ref, network_info = self._get_running_instance()
self.connection.resume_state_on_host_boot(self.ctxt, instance_ref,
network_info)
@catch_notimplementederror
def test_rescue(self):
instance_ref, network_info = self._get_running_instance()
self.connection.rescue(self.ctxt, instance_ref, network_info, None, '')
@catch_notimplementederror
def test_unrescue_unrescued_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.unrescue(instance_ref, network_info)
@catch_notimplementederror
def test_unrescue_rescued_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.rescue(self.ctxt, instance_ref, network_info, None, '')
self.connection.unrescue(instance_ref, network_info)
@catch_notimplementederror
def test_poll_rebooting_instances(self):
instances = [self._get_running_instance()]
self.connection.poll_rebooting_instances(10, instances)
@catch_notimplementederror
def test_migrate_disk_and_power_off(self):
instance_ref, network_info = self._get_running_instance()
instance_type_ref = test_utils.get_test_instance_type()
self.connection.migrate_disk_and_power_off(
self.ctxt, instance_ref, 'dest_host', instance_type_ref,
network_info)
@catch_notimplementederror
def test_power_off(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_off(instance_ref)
@catch_notimplementederror
def test_power_on_running(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_on(instance_ref)
@catch_notimplementederror
def test_power_on_powered_off(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_off(instance_ref)
self.connection.power_on(instance_ref)
@catch_notimplementederror
def test_soft_delete(self):
instance_ref, network_info = self._get_running_instance()
self.connection.soft_delete(instance_ref)
@catch_notimplementederror
def test_restore_running(self):
instance_ref, network_info = self._get_running_instance()
self.connection.restore(instance_ref)
@catch_notimplementederror
def test_restore_soft_deleted(self):
instance_ref, network_info = self._get_running_instance()
self.connection.soft_delete(instance_ref)
self.connection.restore(instance_ref)
@catch_notimplementederror
def test_pause(self):
instance_ref, network_info = self._get_running_instance()
self.connection.pause(instance_ref)
@catch_notimplementederror
def test_unpause_unpaused_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.unpause(instance_ref)
@catch_notimplementederror
def test_unpause_paused_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.pause(instance_ref)
self.connection.unpause(instance_ref)
@catch_notimplementederror
def test_suspend(self):
instance_ref, network_info = self._get_running_instance()
self.connection.suspend(instance_ref)
@catch_notimplementederror
def test_resume_unsuspended_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.resume(instance_ref, network_info)
@catch_notimplementederror
def test_resume_suspended_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.suspend(instance_ref)
self.connection.resume(instance_ref, network_info)
@catch_notimplementederror
def test_destroy_instance_nonexistent(self):
fake_instance = {'id': 42, 'name': 'I just made this up!',
'uuid': 'bda5fb9e-b347-40e8-8256-42397848cb00'}
network_info = test_utils.get_test_network_info()
self.connection.destroy(fake_instance, network_info)
@catch_notimplementederror
def test_destroy_instance(self):
instance_ref, network_info = self._get_running_instance()
self.assertIn(instance_ref['name'],
self.connection.list_instances())
self.connection.destroy(instance_ref, network_info)
self.assertNotIn(instance_ref['name'],
self.connection.list_instances())
@catch_notimplementederror
def test_get_volume_connector(self):
result = self.connection.get_volume_connector({'id': 'fake'})
self.assertTrue('ip' in result)
self.assertTrue('initiator' in result)
self.assertTrue('host' in result)
@catch_notimplementederror
def test_attach_detach_volume(self):
instance_ref, network_info = self._get_running_instance()
self.connection.attach_volume({'driver_volume_type': 'fake'},
instance_ref,
'/dev/sda')
self.connection.detach_volume({'driver_volume_type': 'fake'},
instance_ref,
'/dev/sda')
@catch_notimplementederror
def test_attach_detach_different_power_states(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_off(instance_ref)
self.connection.attach_volume({'driver_volume_type': 'fake'},
instance_ref,
'/dev/sda')
self.connection.power_on(instance_ref)
self.connection.detach_volume({'driver_volume_type': 'fake'},
instance_ref,
'/dev/sda')
@catch_notimplementederror
def test_get_info(self):
instance_ref, network_info = self._get_running_instance()
info = self.connection.get_info(instance_ref)
self.assertIn('state', info)
self.assertIn('max_mem', info)
self.assertIn('mem', info)
self.assertIn('num_cpu', info)
self.assertIn('cpu_time', info)
@catch_notimplementederror
def test_get_info_for_unknown_instance(self):
self.assertRaises(exception.NotFound,
self.connection.get_info,
{'name': 'I just made this name up'})
@catch_notimplementederror
def test_get_diagnostics(self):
instance_ref, network_info = self._get_running_instance()
self.connection.get_diagnostics(instance_ref)
@catch_notimplementederror
def test_block_stats(self):
instance_ref, network_info = self._get_running_instance()
stats = self.connection.block_stats(instance_ref['name'], 'someid')
self.assertEquals(len(stats), 5)
@catch_notimplementederror
def test_interface_stats(self):
instance_ref, network_info = self._get_running_instance()
stats = self.connection.interface_stats(instance_ref['name'], 'someid')
self.assertEquals(len(stats), 8)
@catch_notimplementederror
def test_get_console_output(self):
fake_libvirt_utils.files['dummy.log'] = ''
instance_ref, network_info = self._get_running_instance()
console_output = self.connection.get_console_output(instance_ref)
self.assertTrue(isinstance(console_output, basestring))
@catch_notimplementederror
def test_get_vnc_console(self):
instance_ref, network_info = self._get_running_instance()
vnc_console = self.connection.get_vnc_console(instance_ref)
self.assertIn('internal_access_path', vnc_console)
self.assertIn('host', vnc_console)
self.assertIn('port', vnc_console)
@catch_notimplementederror
def test_get_spice_console(self):
instance_ref, network_info = self._get_running_instance()
spice_console = self.connection.get_spice_console(instance_ref)
self.assertIn('internal_access_path', spice_console)
self.assertIn('host', spice_console)
self.assertIn('port', spice_console)
self.assertIn('tlsPort', spice_console)
@catch_notimplementederror
def test_get_console_pool_info(self):
instance_ref, network_info = self._get_running_instance()
console_pool = self.connection.get_console_pool_info(instance_ref)
self.assertIn('address', console_pool)
self.assertIn('username', console_pool)
self.assertIn('password', console_pool)
@catch_notimplementederror
def test_refresh_security_group_rules(self):
# FIXME: Create security group and add the instance to it
instance_ref, network_info = self._get_running_instance()
self.connection.refresh_security_group_rules(1)
@catch_notimplementederror
def test_refresh_security_group_members(self):
# FIXME: Create security group and add the instance to it
instance_ref, network_info = self._get_running_instance()
self.connection.refresh_security_group_members(1)
@catch_notimplementederror
def test_refresh_provider_fw_rules(self):
instance_ref, network_info = self._get_running_instance()
self.connection.refresh_provider_fw_rules()
@catch_notimplementederror
def test_ensure_filtering_for_instance(self):
instance_ref = test_utils.get_test_instance()
network_info = test_utils.get_test_network_info()
self.connection.ensure_filtering_rules_for_instance(instance_ref,
network_info)
@catch_notimplementederror
def test_unfilter_instance(self):
instance_ref = test_utils.get_test_instance()
network_info = test_utils.get_test_network_info()
self.connection.unfilter_instance(instance_ref, network_info)
@catch_notimplementederror
def test_live_migration(self):
instance_ref, network_info = self._get_running_instance()
self.connection.live_migration(self.ctxt, instance_ref, 'otherhost',
lambda *a: None, lambda *a: None)
@catch_notimplementederror
def _check_host_status_fields(self, host_status):
self.assertIn('disk_total', host_status)
self.assertIn('disk_used', host_status)
self.assertIn('host_memory_total', host_status)
self.assertIn('host_memory_free', host_status)
@catch_notimplementederror
def test_get_host_stats(self):
host_status = self.connection.get_host_stats()
self._check_host_status_fields(host_status)
@catch_notimplementederror
def test_set_host_enabled(self):
self.connection.set_host_enabled('a useless argument?', True)
@catch_notimplementederror
def test_get_host_uptime(self):
self.connection.get_host_uptime('a useless argument?')
@catch_notimplementederror
def test_host_power_action_reboot(self):
self.connection.host_power_action('a useless argument?', 'reboot')
@catch_notimplementederror
def test_host_power_action_shutdown(self):
self.connection.host_power_action('a useless argument?', 'shutdown')
@catch_notimplementederror
def test_host_power_action_startup(self):
self.connection.host_power_action('a useless argument?', 'startup')
@catch_notimplementederror
def test_add_to_aggregate(self):
self.connection.add_to_aggregate(self.ctxt, 'aggregate', 'host')
@catch_notimplementederror
def test_remove_from_aggregate(self):
self.connection.remove_from_aggregate(self.ctxt, 'aggregate', 'host')
def test_events(self):
got_events = []
def handler(event):
got_events.append(event)
self.connection.register_event_listener(handler)
event1 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STARTED)
event2 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_PAUSED)
self.connection.emit_event(event1)
self.connection.emit_event(event2)
want_events = [event1, event2]
self.assertEqual(want_events, got_events)
event3 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_RESUMED)
event4 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STOPPED)
self.connection.emit_event(event3)
self.connection.emit_event(event4)
want_events = [event1, event2, event3, event4]
self.assertEqual(want_events, got_events)
def test_event_bad_object(self):
# Passing in something which does not inherit
# from virtevent.Event
def handler(event):
pass
self.connection.register_event_listener(handler)
badevent = {
"foo": "bar"
}
self.assertRaises(ValueError,
self.connection.emit_event,
badevent)
def test_event_bad_callback(self):
# Check that if a callback raises an exception,
# it does not propagate back out of the
# 'emit_event' call
def handler(event):
raise Exception("Hit Me!")
self.connection.register_event_listener(handler)
event1 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STARTED)
self.connection.emit_event(event1)
class AbstractDriverTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
self.driver_module = "nova.virt.driver.ComputeDriver"
super(AbstractDriverTestCase, self).setUp()
class FakeConnectionTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
self.driver_module = 'nova.virt.fake.FakeDriver'
super(FakeConnectionTestCase, self).setUp()
class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
# Point _VirtDriverTestCase at the right module
self.driver_module = 'nova.virt.libvirt.LibvirtDriver'
super(LibvirtConnTestCase, self).setUp()
def test_force_hard_reboot(self):
self.flags(libvirt_wait_soft_reboot_seconds=0)
self.test_reboot()
def test_migrate_disk_and_power_off(self):
# there is lack of fake stuff to execute this method. so pass.
self.skipTest("Test nothing, but this method"
" needed to override superclass.")
|
|
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
import time
from testtools import testcase
from barbican.tests import utils
from functionaltests.api import base
from functionaltests.api.v1.behaviors import container_behaviors
from functionaltests.api.v1.behaviors import order_behaviors
from functionaltests.api.v1.behaviors import secret_behaviors
from functionaltests.api.v1.models import order_models
from cryptography.hazmat import backends
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
def get_default_order_create_data():
return {'type': 'key',
"meta": {
"name": "barbican functional test secret name",
"algorithm": "aes",
"bit_length": 256,
"mode": "cbc",
}
}
# Any field with None will be created in the model with None as the value
# but will be omitted in the final request (via the requests package)
# to the server.
#
# Given that fact, order_create_nones_data is effectively an empty json request
# to the server.
def get_default_order_create_all_none_data():
return {
'type': None,
"meta": {
"name": None,
"algorithm": None,
"bit_length": None,
"mode": None,
}
}
def get_default_order_create_asymmetric_data():
return {
'type': 'asymmetric',
"meta": {
"name": "barbican functional test asymmetric secret name",
"algorithm": "rsa",
"bit_length": 2048,
"mode": "cbc",
}
}
@utils.parameterized_test_case
class OrdersTestCase(base.TestCase):
def setUp(self):
super(OrdersTestCase, self).setUp()
self.behaviors = order_behaviors.OrderBehaviors(self.client)
self.container_behaviors = container_behaviors.ContainerBehaviors(
self.client)
self.secret_behaviors = secret_behaviors.SecretBehaviors(self.client)
self.create_default_data = get_default_order_create_data()
self.create_all_none_data = get_default_order_create_all_none_data()
self.asymmetric_data = get_default_order_create_asymmetric_data()
def tearDown(self):
self.behaviors.delete_all_created_orders()
super(OrdersTestCase, self).tearDown()
def wait_for_order(self, order_resp, order_ref):
# Make sure we have an active order
time_count = 1
while order_resp.model.status != "ACTIVE" and time_count <= 4:
time.sleep(1)
time_count += 1
order_resp = self.behaviors.get_order(order_ref)
@testcase.attr('positive')
def test_order_create_w_out_name(self):
"""Create an order without the name attribute."""
test_model = order_models.OrderModel(**self.create_default_data)
test_model.name = None
create_resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(create_resp.status_code, 202)
self.assertIsNotNone(order_ref)
@testcase.attr('positive')
def test_order_create_w_empty_name(self):
"""Create an order the name attribute an empty string."""
test_model = order_models.OrderModel(**self.create_default_data)
test_model.name = ""
create_resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(create_resp.status_code, 202)
self.assertIsNotNone(order_ref)
@testcase.attr('positive')
def test_orders_create_check_empty_name(self):
"""Create order with empty meta name.
The resulting secret name should be a UUID.
"""
# first create an order with defaults
test_model = order_models.OrderModel(**self.create_default_data)
test_model.meta['name'] = ""
create_resp, order_ref = self.behaviors.create_order(test_model)
# verify that the order was created successfully
self.assertEqual(create_resp.status_code, 202)
self.assertIsNotNone(order_ref)
# given the order href, retrieve the order
order_resp = self.behaviors.get_order(order_ref)
# verify that the get was successful
self.assertEqual(order_resp.status_code, 200)
self.assertTrue(order_resp.model.status == "ACTIVE" or
order_resp.model.status == "PENDING")
# PENDING orders may take a moment to be processed by the workers
# when running tests with queue enabled
self.wait_for_order(order_resp, order_ref)
# verify the new secret's name matches the name in the secret ref
# in the newly created order.
secret_resp = self.secret_behaviors.get_secret_metadata(
order_resp.model.secret_ref)
self.assertEqual(secret_resp.status_code, 200)
self.assertEqual(secret_resp.model.name, test_model.meta['name'])
@testcase.attr('positive')
def test_order_and_secret_metadata_same(self):
"""Checks that metadata from secret GET and order GET are the same.
Covers checking that secret metadata from a get on the order and
secret metadata from a get on the secret are the same. Assumes
that the order status will be active and not pending.
"""
test_model = order_models.OrderModel(**self.create_default_data)
resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(resp.status_code, 202)
order_resp = self.behaviors.get_order(order_ref)
self.assertEqual(order_resp.status_code, 200)
# PENDING orders may take a moment to be processed by the workers
# when running tests with queue enabled
self.wait_for_order(order_resp, order_ref)
secret_ref = order_resp.model.secret_ref
secret_resp = self.secret_behaviors.get_secret_metadata(secret_ref)
self.assertEqual(order_resp.model.meta['name'],
secret_resp.model.name,
'Names were not the same')
self.assertEqual(order_resp.model.meta['algorithm'],
secret_resp.model.algorithm,
'Algorithms were not the same')
self.assertEqual(order_resp.model.meta['bit_length'],
secret_resp.model.bit_length,
'Bit lengths were not the same')
self.assertEqual(order_resp.model.meta['expiration'],
secret_resp.model.expiration,
'Expirations were not the same')
self.assertEqual(order_resp.model.meta['mode'],
secret_resp.model.mode,
'Modes were not the same')
@testcase.attr('negative')
def test_order_get_order_that_doesnt_exist(self):
"""Covers case of getting a non-existent order."""
# try to get a non-existent order
order_resp = self.behaviors.get_order("a ref that does not exist")
# verify that the order get failed
self.assertEqual(order_resp.status_code, 404)
@testcase.attr('negative')
def test_order_create_w_invalid_content_type(self):
"""Covers creating order with invalid content-type header."""
test_model = order_models.OrderModel(**self.create_default_data)
extra_headers = {"Content-Type": "crypto/boom"}
create_resp, order_ref = self.behaviors.create_order(
test_model, extra_headers=extra_headers)
self.assertEqual(create_resp.status_code, 415)
self.assertIsNone(order_ref)
@testcase.attr('negative')
def test_order_create_all_none(self):
"""Covers order creation with empty JSON."""
test_model = order_models.OrderModel(**self.create_all_none_data)
create_resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(create_resp.status_code, 400)
self.assertIsNone(order_ref)
@testcase.attr('negative')
def test_order_create_empty_entries(self):
"""Covers order creation with empty JSON."""
test_model = order_models.OrderModel(**self.create_all_none_data)
test_model.meta['name'] = ""
test_model.meta['algorithm'] = ""
test_model.meta['mode'] = ""
test_model.meta['bit_length'] = ""
test_model.meta['payload_content_type'] = ""
create_resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(create_resp.status_code, 400)
self.assertIsNone(order_ref)
@testcase.attr('negative')
def test_order_create_oversized_strings(self):
"""Covers order creation with empty JSON."""
test_model = order_models.OrderModel(**self.create_default_data)
test_model.meta['name'] = base.TestCase.oversized_field
test_model.meta['algorithm'] = base.TestCase.oversized_field
test_model.meta['mode'] = base.TestCase.oversized_field
create_resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(create_resp.status_code, 400)
self.assertIsNone(order_ref)
@testcase.attr('negative')
def test_order_create_error_message_on_invalid_order_create(self):
"""Related Launchpad issue: 1269594."""
test_model = order_models.OrderModel(**self.create_default_data)
test_model.meta['payload'] = "blarg!"
resp, order_ref = self.behaviors.create_order(test_model)
# Make sure we actually get a message back
error_msg = json.loads(resp.content).get('title')
self.assertEqual(resp.status_code, 400)
self.assertIsNotNone(error_msg)
self.assertNotEqual(error_msg, 'None')
@utils.parameterized_dataset({
'8': [8],
'64': [64],
'128': [128],
'192': [192],
'256': [256],
'1024': [1024],
'2048': [2048],
'4096': [4096]
})
@testcase.attr('positive')
def test_order_create_valid_bit_length(self, bit_length):
"""Covers creating orders with various valid bit lengths."""
test_model = order_models.OrderModel(**self.create_default_data)
test_model.meta['bit_length'] = bit_length
create_resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(create_resp.status_code, 202)
self.assertIsNotNone(order_ref)
@utils.parameterized_dataset({
'negative_maxint': [-sys.maxint],
'negative_7': [-7],
'negative_1': [-1],
'0': [0],
'1': [1],
'7': [7],
'129': [129],
'none': [None],
'empty': [''],
'space': [' '],
'over_signed_small_int': [32768]
})
@testcase.attr('negative')
def test_order_create_invalid_bit_length(self, bit_length):
"""Covers creating orders with various invalid bit lengths."""
test_model = order_models.OrderModel(**self.create_default_data)
test_model.meta['bit_length'] = bit_length
create_resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(create_resp.status_code, 400)
@utils.parameterized_dataset({
'array': [['array']],
'int': [123],
'oversized_payload': [str(base.TestCase.oversized_payload)],
'standard_payload': ['standard payload'],
'empty': ['']
})
@testcase.attr('negative')
def test_order_create_invalid_payload(self, payload):
"""Covers creating orders with various invalid payloads."""
test_model = order_models.OrderModel(**self.create_default_data)
test_model.meta['payload'] = payload
create_resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(create_resp.status_code, 400)
@utils.parameterized_dataset({
'alphanumeric': ['1f34ds'],
'len_255': [base.TestCase.max_sized_field],
'uuid': ['54262d9d-4bc7-4821-8df0-dc2ca8e112bb'],
'punctuation': ['~!@#$%^&*()_+`-={}[]|:;<>,.?'],
'empty': [""]
})
@testcase.attr('positive')
def test_order_create_valid_name(self, name):
"""Covers creating orders with various valid names."""
test_model = order_models.OrderModel(**self.create_default_data)
test_model.meta['name'] = name
create_resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(create_resp.status_code, 202)
self.assertIsNotNone(order_ref)
@utils.parameterized_dataset({
'int': [123]
})
@testcase.attr('negative')
def test_order_create_invalid_name(self, name):
"""Covers creating orders with various invalid names."""
test_model = order_models.OrderModel(**self.create_default_data)
test_model.meta['name'] = name
create_resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(create_resp.status_code, 400)
@utils.parameterized_dataset({
'cbc': ['cbc']
})
@testcase.attr('positive')
def test_order_create_valid_mode(self, mode):
"""Covers creating orders with various valid modes."""
test_model = order_models.OrderModel(**self.create_default_data)
test_model.meta['mode'] = mode
create_resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(create_resp.status_code, 202)
self.assertIsNotNone(order_ref)
@utils.parameterized_dataset({
'int': [123]
})
@testcase.attr('negative')
def test_order_create_invalid_mode(self, mode):
"""Covers creating orders with various invalid modes."""
test_model = order_models.OrderModel(**self.create_default_data)
test_model.meta['mode'] = mode
create_resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(create_resp.status_code, 400)
@utils.parameterized_dataset({
'aes': ['aes']
})
@testcase.attr('positive')
def test_order_create_valid_algorithm(self, algorithm):
"""Covers creating orders with various valid algorithms."""
test_model = order_models.OrderModel(**self.create_default_data)
test_model.meta['algorithm'] = algorithm
create_resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(create_resp.status_code, 202)
self.assertIsNotNone(order_ref)
@utils.parameterized_dataset({
'int': [123]
})
@testcase.attr('negative')
def test_order_create_invalid_algorithm(self, algorithm):
"""Covers creating orders with various invalid algorithms."""
test_model = order_models.OrderModel(**self.create_default_data)
test_model.meta['algorithm'] = algorithm
create_resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(create_resp.status_code, 400)
@utils.parameterized_dataset({
'empty': [''],
'text/plain': ['text/plain'],
'text_plain_space_charset_utf8': ['text/plain; charset=utf-8'],
})
@testcase.attr('positive')
def test_order_create_valid_payload_content_type(self, pct):
"""Covers order creation with various valid payload content types."""
test_model = order_models.OrderModel(**self.create_default_data)
test_model.meta['payload_content_type'] = pct
create_resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(create_resp.status_code, 202)
self.assertIsNotNone(order_ref)
@utils.parameterized_dataset({
'int': [123],
'invalid': ['invalid'],
'oversized_string': [base.TestCase.oversized_field],
'text': ['text'],
'text_slash_with_no_subtype': ['text/'],
})
@testcase.attr('negative')
def test_order_create_invalid_payload_content_type(self, pct):
"""Covers order creation with various invalid payload content types."""
test_model = order_models.OrderModel(**self.create_default_data)
test_model.meta['payload_content_type'] = pct
create_resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(create_resp.status_code, 400)
@utils.parameterized_dataset({
'negative_five_long_expire': {
'timezone': '-05:00',
'days': 5},
'positive_five_long_expire': {
'timezone': '+05:00',
'days': 5},
'negative_one_short_expire': {
'timezone': '-01',
'days': 1},
'positive_one_short_expire': {
'timezone': '+01',
'days': 1}
})
@testcase.attr('positive')
def test_order_create_valid_expiration(self, **kwargs):
"""Covers creating orders with various valid expiration data."""
timestamp = utils.create_timestamp_w_tz_and_offset(**kwargs)
test_model = order_models.OrderModel(**self.create_default_data)
test_model.meta['expiration'] = timestamp
create_resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(create_resp.status_code, 202)
self.assertIsNotNone(order_ref)
@utils.parameterized_dataset({
'malformed_timezone': {
'timezone': '-5:00',
'days': 5},
})
@testcase.attr('negative')
def test_order_create_invalid_expiration(self, **kwargs):
"""Covers creating orders with various invalid expiration data."""
timestamp = utils.create_timestamp_w_tz_and_offset(**kwargs)
test_model = order_models.OrderModel(**self.create_default_data)
test_model.meta['expiration'] = timestamp
create_resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(create_resp.status_code, 400)
@testcase.attr('positive')
def test_order_create_change_host_header(self, **kwargs):
"""Create an order with a (possibly) malicious host name in header."""
test_model = order_models.OrderModel(**self.create_default_data)
malicious_hostname = 'some.bad.server.com'
changed_host_header = {'Host': malicious_hostname}
resp, order_ref = self.behaviors.create_order(
test_model, extra_headers=changed_host_header)
self.assertEqual(resp.status_code, 202)
# get Location field from result and assert that it is NOT the
# malicious one.
regex = '.*{0}.*'.format(malicious_hostname)
self.assertNotRegexpMatches(resp.headers['location'], regex)
@testcase.attr('positive')
def test_encryption_using_generated_key(self):
"""Tests functionality of a generated asymmetric key pair."""
test_model = order_models.OrderModel(**self.asymmetric_data)
create_resp, order_ref = self.behaviors.create_order(test_model)
self.assertEqual(create_resp.status_code, 202)
order_resp = self.behaviors.get_order(order_ref)
self.assertEqual(order_resp.status_code, 200)
container_resp = self.container_behaviors.get_container(
order_resp.model.container_ref)
self.assertEqual(container_resp.status_code, 200)
secret_dict = {}
for secret in container_resp.model.secret_refs:
self.assertIsNotNone(secret.secret_ref)
secret_resp = self.secret_behaviors.get_secret(
secret.secret_ref, "application/octet-stream")
self.assertIsNotNone(secret_resp)
secret_dict[secret.name] = secret_resp.content
private_key = serialization.load_pem_private_key(
secret_dict['private_key'],
password=None,
backend=backends.default_backend()
)
public_key = serialization.load_pem_public_key(
secret_dict['public_key'],
backend=backends.default_backend()
)
self.assertIsNotNone(private_key)
self.assertIsNotNone(public_key)
message = b'plaintext message'
ciphertext = public_key.encrypt(
message,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
)
)
plaintext = private_key.decrypt(
ciphertext,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
)
)
self.assertEqual(message, plaintext)
class OrdersPagingTestCase(base.PagingTestCase):
def setUp(self):
super(OrdersPagingTestCase, self).setUp()
self.behaviors = order_behaviors.OrderBehaviors(self.client)
# make a local mutable copy of the default data to prevent
# possible data contamination
self.create_default_data = get_default_order_create_data()
def tearDown(self):
self.behaviors.delete_all_created_orders()
super(OrdersPagingTestCase, self).tearDown()
def create_model(self):
return order_models.OrderModel(**self.create_default_data)
def create_resources(self, count=0, model=None):
for x in range(0, count):
self.behaviors.create_order(model)
def get_resources(self, limit=10, offset=0, filter=None):
return self.behaviors.get_orders(limit=limit, offset=offset,
filter=filter)
def set_filter_field(self, unique_str, model):
'''Set the meta field which we use in the get_resources '''
model.meta['name'] = unique_str
class OrdersUnauthedTestCase(base.TestCase):
def setUp(self):
super(OrdersUnauthedTestCase, self).setUp()
self.behaviors = order_behaviors.OrderBehaviors(self.client)
self.container_behaviors = container_behaviors.ContainerBehaviors(
self.client)
self.secret_behaviors = secret_behaviors.SecretBehaviors(self.client)
self.create_default_data = get_default_order_create_data()
self.dummy_order_ref = 'orders/dummy-7b86-4071-935d-ef6b83729200'
self.dummy_project_id = 'dummy'
def tearDown(self):
self.behaviors.delete_all_created_orders()
super(OrdersUnauthedTestCase, self).tearDown()
@testcase.attr('negative', 'security')
def test_order_create_unauthed_no_proj_id(self):
"""Attempt to create an order without a token or project id
Should return 401
"""
model = order_models.OrderModel(self.create_default_data)
resp, order_ref = self.behaviors.create_order(model, use_auth=False)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_order_get_unauthed_no_proj_id(self):
"""Attempt to get an order without a token or project id
Should return 401
"""
resp = self.behaviors.get_order(self.dummy_order_ref, use_auth=False)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_order_get_order_list_unauthed_no_proj_id(self):
"""Attempt to get the list of orders without a token or project id
Should return 401
"""
resp, orders, next_ref, prev_ref = self.behaviors.get_orders(
use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_order_delete_unauthed_no_proj_id(self):
"""Attempt to delete an order without a token or project id
Should return 401
"""
resp = self.behaviors.delete_order(
self.dummy_order_ref, expected_fail=True, use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_order_create_unauthed_with_proj_id(self):
"""Attempt to create an order with a project id, but no token
Should return 401
"""
model = order_models.OrderModel(self.create_default_data)
headers = {'X-Project-Id': self.dummy_project_id}
resp, order_ref = self.behaviors.create_order(
model, extra_headers=headers, use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_order_get_unauthed_with_proj_id(self):
"""Attempt to get an order with a project id, but no token
Should return 401
"""
headers = {'X-Project-Id': self.dummy_project_id}
resp = self.behaviors.get_order(
self.dummy_order_ref, extra_headers=headers, use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_order_get_order_list_unauthed_with_proj_id(self):
"""Attempt to get the list of orders with a project id, but no token
Should return 401
"""
headers = {'X-Project-Id': self.dummy_project_id}
resp, orders, next_ref, prev_ref = self.behaviors.get_orders(
extra_headers=headers, use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_order_delete_unauthed_with_proj_id(self):
"""Attempt to delete an order with a project id, but no token
Should return 401
"""
headers = {'X-Project-Id': self.dummy_project_id}
resp = self.behaviors.delete_order(
self.dummy_order_ref, extra_headers=headers, expected_fail=True,
use_auth=False
)
self.assertEqual(401, resp.status_code)
|
|
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
from django.urls import reverse
import logging
from ci.git_api import GitAPI, GitException, copydoc
import requests
import re
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
logger = logging.getLogger('ci')
class GitHubAPI(GitAPI):
STATUS = ((GitAPI.PENDING, "pending"),
(GitAPI.ERROR, "error"),
(GitAPI.SUCCESS, "success"),
(GitAPI.FAILURE, "failure"),
(GitAPI.RUNNING, "pending"),
(GitAPI.CANCELED, "error"),
)
def __init__(self, config, access_user=None, token=None):
super(GitHubAPI, self).__init__(config, access_user=access_user, token=token)
self._api_url = config.get("api_url", "https://api.github.com")
self._github_url = config.get("html_url", "https://github.com")
self._hostname = config.get("hostname", "github.com")
self._prefix = "%s_" % self._hostname
self._repos_key = "%s_repos" % self._prefix
self._org_repos_key = "%s_org_repos" % self._prefix
self._headers["Accept"] = "application/vnd.github.v3+json"
if self._access_user is not None:
self._session = self._access_user.start_session()
elif self._token is not None:
self._headers["Authorization"] = "token %s" % self._token
self._session = requests
else:
# No authorization, just straight requests
self._session = requests
@copydoc(GitAPI.sign_in_url)
def sign_in_url(self):
return reverse('ci:github:sign_in', args=[self._hostname])
@copydoc(GitAPI.branch_html_url)
def branch_html_url(self, owner, repo, branch):
return "%s/tree/%s" % (self.repo_html_url(owner, repo), branch)
@copydoc(GitAPI.repo_html_url)
def repo_html_url(self, owner, repo):
return "%s/%s/%s" %(self._github_url, owner, repo)
@copydoc(GitAPI.commit_html_url)
def commit_html_url(self, owner, repo, sha):
return "%s/commits/%s" % (self.repo_html_url(owner, repo), sha)
def _commit_comment_url(self, owner, repo, sha):
"""
API URL to get a list of commits for a SHA.
Typically used for the comment URL on a push event.
"""
return "%s/repos/%s/%s/commits/%s/comments" % (self._api_url, owner, repo, sha)
def _status_str(self, status):
"""
Used to convert a GitAPI status into a string that GitHub wants.
"""
for status_pair in self.STATUS:
if status == status_pair[0]:
return status_pair[1]
return None
@copydoc(GitAPI.get_all_repos)
def get_all_repos(self, username):
repos = self._get_user_repos()
repos.extend(self._get_user_org_repos())
repos.sort()
return repos
def _get_user_repos(self):
"""
Gets a list of repos the user owns or is a collaborator on.
"""
url = "%s/user/repos" % self._api_url
data = {"affiliation": ["owner", "collaborator"]}
repo_data = self.get_all_pages(url, data)
owner_repo = []
if repo_data:
for repo in repo_data:
owner_repo.append("%s/%s" % (repo['owner']['login'], repo['name']))
owner_repo.sort()
return owner_repo
@copydoc(GitAPI.get_repos)
def get_repos(self, session):
if self._repos_key in session:
return session[self._repos_key]
owner_repo = self._get_user_repos()
session[self._repos_key] = owner_repo
return owner_repo
@copydoc(GitAPI.get_branches)
def get_branches(self, owner, repo):
url = "%s/repos/%s/%s/branches" % (self._api_url, owner, repo)
data = self.get_all_pages(url)
branches = []
if data:
for branch in data:
branches.append(branch['name'])
branches.sort()
return branches
def _get_user_org_repos(self):
"""
Get a list of organizations that the user is a member of.
"""
url = "%s/user/repos" % self._api_url
data = {"affiliation": "organization_member"}
repo_data = self.get_all_pages(url, data)
org_repo = []
if repo_data:
for repo in repo_data:
org_repo.append("%s/%s" % (repo['owner']['login'], repo['name']))
org_repo.sort()
return org_repo
@copydoc(GitAPI.update_pr_status)
def update_pr_status(self, base, head, state, event_url, description, context, job_stage):
self._update_pr_status(base.user().name, base.repo().name, head.sha, state, event_url, description, context)
def _update_pr_status(self, owner, repo, sha, state, event_url, description, context):
"""
Utility function that implements GitAPI.update_pr_status
"""
if not self._update_remote:
return
data = {
'state': self._status_str(state),
'target_url': event_url,
'description': description,
'context': context,
}
url = "%s/repos/%s/%s/statuses/%s" % (self._api_url, owner, repo, sha)
timeout=None
if state in [self.RUNNING, self.PENDING]:
# decrease the timeout since it is not a big deal if these don't get set
timeout = 2
self.post(url, data=data, timeout=timeout)
if not self._bad_response:
logger.info("Set pr status %s:\nSent Data:\n%s" % (url, self._format_json(data)))
def _remove_pr_todo_labels(self, owner, repo, pr_num):
"""
Removes all labels on a PR with the labels that start with a certain prefix
Input:
owner[str]: name of the owner of the repo
repo[str]: name of the repository
pr_num[int]: PR number
"""
if not self._update_remote:
return
url = "%s/repos/%s/%s/issues/%s/labels" % (self._api_url, owner, repo, pr_num)
# First get a list of all labels
data = self.get_all_pages(url)
if not data:
return
# We could filter out the unwanted labels and then POST the new list
# but I don't like the message that appears on GitHub.
# Instead, delete each one. This should be fine since there won't
# be many of these.
for label in data:
for remove_label in self._remove_pr_labels:
if label["name"].startswith(remove_label):
new_url = "%s/%s" % (url, label["name"])
response = self.delete(new_url)
if response is not None:
logger.info("%s/%s #%s: Removed label '%s'" % (owner, repo, pr_num, label["name"]))
break
@copydoc(GitAPI.remove_pr_label)
def remove_pr_label(self, repo, pr_num, label_name):
self._remove_pr_label(repo.user.name, repo.name, pr_num, label_name)
def _remove_pr_label(self, owner, repo, pr_num, label_name):
"""
Implements GitAPI.remove_pr_label
"""
if not self._update_remote:
return
prefix = "%s/%s #%s:" % (owner, repo, pr_num)
if not label_name:
logger.info("%s Not removing empty label" % prefix)
return
url = "%s/repos/%s/%s/issues/%s/labels/%s" % (self._api_url, owner, repo, pr_num, label_name)
response = self.delete(url, log=False)
if not response or response.status_code == 404:
# if we get this then the label probably isn't on the PR
logger.info("%s Label '%s' was not found" % (prefix, label_name))
return
try:
response.raise_for_status()
logger.info("%s Removed label '%s'" % (prefix, label_name))
except Exception as e:
msg = "%s Problem occured while removing label '%s'\nURL: %s\nError: %s" \
% (prefix, label_name, url, e)
self._add_error(msg)
@copydoc(GitAPI.add_pr_label)
def add_pr_label(self, repo, pr_num, label_name):
self._add_pr_label(repo.user.name, repo.name, pr_num, label_name)
def _add_pr_label(self, owner, repo, pr_num, label_name):
"""
Implements GitAPI.add_pr_label
"""
if not self._update_remote:
return
prefix = "%s/%s #%s:" % (owner, repo, pr_num)
if not label_name:
logger.info("%s Not adding empty label" % prefix)
return
url = "%s/repos/%s/%s/issues/%s/labels" % (self._api_url, owner, repo, pr_num)
response = self.post(url, data=[label_name])
if not self._bad_response and response is not None:
logger.info("%s Added label '%s'" % (prefix, label_name))
@copydoc(GitAPI.is_collaborator)
def is_collaborator(self, user, repo):
return self._is_collaborator(user.name, repo.user.name, repo.name)
def _is_collaborator(self, user, owner, repo):
"""
Implements GitAPI.is_collaborator
"""
if owner == user:
# user is the owner
return True
url = "%s/repos/%s/%s/collaborators/%s" % (self._api_url, owner, repo, user)
response = self.get(url, log=False)
if response is None:
self._add_error("Error occurred getting URL %s" % url)
return False
prefix = "%s/%s:" % (owner, repo)
# on success a 204 no content
if response.status_code == 403:
logger.info('%s User "%s" does not have permission to check collaborators' % (prefix, user))
return False
elif response.status_code == 404:
logger.info('%s User "%s" is NOT a collaborator' % (prefix, user))
return False
elif response.status_code == 204:
logger.info('%s User "%s" is a collaborator' % (prefix, user))
return True
else:
self._add_error('%s Unknown response on collaborator check for user "%s"\n%s' %
(prefix, user, self._response_to_str(response)))
return False
@copydoc(GitAPI.pr_comment)
def pr_comment(self, url, msg):
if not self._update_remote:
return
comment = {'body': msg}
self.post(url, data=comment)
@copydoc(GitAPI.pr_review_comment)
def pr_review_comment(self, url, sha, filepath, position, msg):
if not self._update_remote:
return
comment = {'body': msg,
"commit_id": sha,
"path": filepath,
"position": int(position),
}
self.post(url, data=comment)
@copydoc(GitAPI.last_sha)
def last_sha(self, owner, repo, branch):
url = "%s/repos/%s/%s/branches/%s" % (self._api_url, owner, repo, branch)
response = self.get(url)
if not self._bad_response:
data = response.json()
if data and "commit" in data:
return data['commit']['sha']
self._add_error("Failed to get last SHA for %s/%s:%s" % (owner, repo, branch))
def _tag_sha(self, owner, repo, tag):
"""
Get the SHA for a tag
Input:
owner[str]: owner of the repository
repo[str]: name of the repository
tag[str]: name of the tag
Return:
SHA of the tag or None if there was a problem
"""
url = "%s/repos/%s/%s/tags" % (self._api_url, owner, repo)
data = self.get_all_pages(url)
if data:
for t in data:
if t["name"] == tag:
return t["commit"]["sha"]
self._add_error('Failed to find tag "%s" in %s.' % (tag, url))
@copydoc(GitAPI.install_webhooks)
def install_webhooks(self, user, repo):
self._install_webhooks(user.name, user.build_key, repo.user.name, repo.name)
def _install_webhooks(self, user, user_build_key, owner, repo):
"""
Implements GitAPI.install_webhooks
"""
if not self._install_webhook:
return
hook_url = '%s/repos/%s/%s/hooks' % (self._api_url, owner, repo)
callback_url = urljoin(self._civet_url, reverse('ci:github:webhook', args=[user_build_key]))
data = self.get_all_pages(hook_url)
if self._bad_response or data is None:
err = 'Failed to access webhook to %s/%s for user %s' % (owner, repo, user)
self._add_error(err)
raise GitException(err)
have_hook = False
for hook in data:
events = hook.get('events', [])
if ('pull_request' not in events) or ('push' not in events):
continue
if hook['config']['url'] == callback_url and hook['config']['content_type'] == 'json':
have_hook = True
break
if have_hook:
return
add_hook = {
'name': 'web', # "web" is required for webhook
'active': True,
'events': ['push', 'pull_request'],
'config': {
'url': callback_url,
'content_type': 'json',
'insecure_ssl': '1',
}
}
response = self.post(hook_url, data=add_hook)
data = response.json()
if self._bad_response or "errors" in data:
raise GitException(data['errors'])
logger.info('%s/%s: Added webhook for user %s' % (owner, repo, user))
def _get_pr_changed_files(self, owner, repo, pr_num):
"""
Gets a list of changed files in this PR.
Input:
owner[str]: name of the owner of the repo
repo[str]: name of the repository
pr_num[int]: PR number
Return:
list[str]: Filenames that have changed in the PR
"""
url = "%s/repos/%s/%s/pulls/%s/files" % (self._api_url, owner, repo, pr_num)
data = self.get_all_pages(url)
filenames = []
if data and not self._bad_response:
for f in data:
if "filename" in f:
filenames.append(f["filename"])
filenames.sort()
if not filenames:
self._add_error("Didn't read any PR changed files at URL: %s\nData: %s" % (url, data))
return filenames
@copydoc(GitAPI.get_pr_comments)
def get_pr_comments(self, url, username, comment_re):
data = self.get_all_pages(url)
comments = []
if not self._bad_response and data:
for c in data:
if c["user"]["login"] != username:
continue
if re.search(comment_re, c["body"]):
comments.append(c)
return comments
@copydoc(GitAPI.remove_pr_comment)
def remove_pr_comment(self, comment):
if not self._update_remote:
return
del_url = comment.get("url")
response = self.delete(del_url)
if not self._bad_response and response:
logger.info("Removed comment: %s" % del_url)
@copydoc(GitAPI.edit_pr_comment)
def edit_pr_comment(self, comment, msg):
if not self._update_remote:
return
edit_url = comment.get("url")
response = self.patch(edit_url, data={"body": msg})
if not self._bad_response and response:
logger.info("Edited PR comment: %s" % edit_url)
def _is_org_member(self, org):
"""
Checks to see if the user is a member of an organization.
Input:
org[str]: Name of the organization to check
Return:
bool
"""
url = "%s/user/orgs" % self._api_url
data = self.get_all_pages(url)
if not self._bad_response and data:
for org_data in data:
if org_data["login"] == org:
return True
return False
def _is_team_member(self, team_id, username):
"""
Checks to see if a user is a member of the team.
Input:
team_id[int]: ID of the team to check
username[str]: The user to check
Return:
bool
"""
url = "%s/teams/%s/memberships/%s" % (self._api_url, team_id, username)
response = self.get(url, log=False)
if not self._bad_response and response:
data = response.json()
if data['state'] == 'active':
return True
return False
def _get_team_id(self, owner, team):
"""
Gets the internal team id of a team.
"""
url = "%s/orgs/%s/teams" % (self._api_url, owner)
response = self.get(url)
if not self._bad_response and response:
data = response.json()
for team_data in data:
if team_data["name"] == team:
return team_data["id"]
self._add_error("Failed to find team '%s' at URL: %s" % (team, url))
@copydoc(GitAPI.is_member)
def is_member(self, team, user):
paths = team.split("/")
if len(paths) == 1:
# No / so should be a user or organization
if user.name == team:
return True
# Try the call using the users credentials
api = GitHubAPI(self._config, access_user=user)
ret = api._is_org_member(team)
if ret:
logger.info('"%s" IS a member of organization "%s"' % (user, team))
else:
logger.info('"%s" is NOT a member of organization "%s"' % (user, team))
return ret
elif len(paths) == 2:
# Must be a team in the form <org>/<team name>
team_id = self._get_team_id(paths[0], paths[1])
if team_id is not None:
ret = self._is_team_member(team_id, user.name)
if ret:
logger.info('"%s" IS a member of team "%s"' % (user, team))
else:
logger.info('"%s" is NOT a member of team "%s"' % (user, team))
return ret
self._add_error("Failed to check if '%s' is a member of '%s': Bad team name" % (user, team))
return False
@copydoc(GitAPI.get_open_prs)
def get_open_prs(self, owner, repo):
url = "%s/repos/%s/%s/pulls" % (self._api_url, owner, repo)
params = {"state": "open"}
data = self.get_all_pages(url, params=params)
open_prs = []
if not self._bad_response and data is not None:
for pr in data:
open_prs.append({"number": pr["number"], "title": pr["title"], "html_url": pr["html_url"]})
return open_prs
return None
def _get_issues(self, user, owner, repo, title):
"""
Get a list of open issues owned by the user that have the given title
"""
url = "%s/repos/%s/%s/issues" % (self._api_url, owner, repo)
params = {"state": "open", "creator": user}
data = self.get_all_pages(url, params=params)
matched_issues = []
if not self._bad_response and data:
for i in data:
if i["title"] == title:
matched_issues.append(i)
return matched_issues
def _create_issue(self, owner, repo, title, body):
"""
Create an issue on a repo with the given title and body
"""
url = "%s/repos/%s/%s/issues" % (self._api_url, owner, repo)
post_data = {"title": title, "body": body}
data = self.post(url, data=post_data)
if not self._bad_response and data:
logger.info("Created issue \"%s\": %s" % (title, data.json().get("html_url")))
def _edit_issue(self, owner, repo, issue_id, title, body):
"""
Modify the given issue on a repo with the given title and body
"""
url = "%s/repos/%s/%s/issues/%s" % (self._api_url, owner, repo, issue_id)
post_data = {"title": title, "body": body}
data = self.patch(url, data=post_data)
if not self._bad_response and data:
logger.info("Updated issue \"%s\": %s" % (title, data.json().get("html_url")))
@copydoc(GitAPI.create_or_update_issue)
def create_or_update_issue(self, owner, repo, title, body, new_comment):
if not self._access_user or not self._update_remote:
return
username = self._access_user.name
existing_issues = self._get_issues(username, owner, repo, title)
if existing_issues:
if new_comment:
self.pr_comment(existing_issues[-1]["comments_url"], body)
else:
issue_id = existing_issues[-1]["number"]
self._edit_issue(owner, repo, issue_id, title, body)
else:
self._create_issue(owner, repo, title, body)
@copydoc(GitAPI.automerge)
def automerge(self, repo, pr_num):
if not self._update_remote:
return False
auto_merge_label = repo.auto_merge_label()
auto_merge_require_review = repo.auto_merge_require_review()
if not auto_merge_label:
logger.info("%s:%s: No auto merging configured" % (self._hostname, repo))
return False
repo_name = repo.name
owner = repo.user.name
url = "%s/repos/%s/%s/pulls/%s" % (self._api_url, owner, repo_name, pr_num)
prefix = "%s:%s/%s #%s:" % (self._hostname, owner, repo_name, pr_num)
pr_info = self.get_all_pages(url)
if pr_info is None or self._bad_response:
logger.info("%s Failed to get info" % prefix)
return False
all_labels = [label["name"] for label in pr_info["labels"]]
if auto_merge_label not in all_labels:
logger.info("%s Auto merge label not on PR" % prefix)
return False
pr_head = pr_info["head"]["sha"]
if auto_merge_require_review:
url = "%s/repos/%s/%s/pulls/%s/reviews" % (self._api_url, owner, repo_name, pr_num)
reviews = self.get_all_pages(url)
if not reviews or self._bad_response:
logger.info("%s No reviews, not auto merging" % prefix)
return False
is_approved = False
changes_requested = False
for review in reviews:
if review["commit_id"] == pr_head:
if review["state"] == "CHANGES_REQUESTED":
changes_requested = True
elif review["state"] == "APPROVED":
is_approved = True
if not is_approved:
logger.info("%s Not approved, not auto merging" % prefix)
return False
if changes_requested:
logger.info("%s Changes requested, not auto merging" % prefix)
return False
url = "%s/repos/%s/%s/pulls/%s/merge" % (self._api_url, owner, repo_name, pr_num)
data = {"sha": pr_head}
self.put(url, data=data)
if self._bad_response:
logger.info("%s Failed to auto merge" % prefix)
return False
else:
logger.info("%s Auto merged" % prefix)
return True
|
|
"""Support for Xiaomi Philips Lights."""
import asyncio
import datetime
from datetime import timedelta
from functools import partial
import logging
from math import ceil
from miio import Ceil, DeviceException, PhilipsBulb, PhilipsEyecare, PhilipsMoonlight
from miio.gateway.gateway import (
GATEWAY_MODEL_AC_V1,
GATEWAY_MODEL_AC_V2,
GATEWAY_MODEL_AC_V3,
GatewayException,
)
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ENTITY_ID, CONF_HOST, CONF_TOKEN
from homeassistant.core import HomeAssistant, ServiceCall
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util import color, dt
from .const import (
CONF_DEVICE,
CONF_FLOW_TYPE,
CONF_GATEWAY,
CONF_MODEL,
DOMAIN,
KEY_COORDINATOR,
MODELS_LIGHT_BULB,
MODELS_LIGHT_CEILING,
MODELS_LIGHT_EYECARE,
MODELS_LIGHT_MONO,
MODELS_LIGHT_MOON,
SERVICE_EYECARE_MODE_OFF,
SERVICE_EYECARE_MODE_ON,
SERVICE_NIGHT_LIGHT_MODE_OFF,
SERVICE_NIGHT_LIGHT_MODE_ON,
SERVICE_REMINDER_OFF,
SERVICE_REMINDER_ON,
SERVICE_SET_DELAYED_TURN_OFF,
SERVICE_SET_SCENE,
)
from .device import XiaomiMiioEntity
from .gateway import XiaomiGatewayDevice
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Xiaomi Philips Light"
DATA_KEY = "light.xiaomi_miio"
# The light does not accept cct values < 1
CCT_MIN = 1
CCT_MAX = 100
DELAYED_TURN_OFF_MAX_DEVIATION_SECONDS = 4
DELAYED_TURN_OFF_MAX_DEVIATION_MINUTES = 1
SUCCESS = ["ok"]
ATTR_SCENE = "scene"
ATTR_DELAYED_TURN_OFF = "delayed_turn_off"
ATTR_TIME_PERIOD = "time_period"
ATTR_NIGHT_LIGHT_MODE = "night_light_mode"
ATTR_AUTOMATIC_COLOR_TEMPERATURE = "automatic_color_temperature"
ATTR_REMINDER = "reminder"
ATTR_EYECARE_MODE = "eyecare_mode"
# Moonlight
ATTR_SLEEP_ASSISTANT = "sleep_assistant"
ATTR_SLEEP_OFF_TIME = "sleep_off_time"
ATTR_TOTAL_ASSISTANT_SLEEP_TIME = "total_assistant_sleep_time"
ATTR_BAND_SLEEP = "band_sleep"
ATTR_BAND = "band"
XIAOMI_MIIO_SERVICE_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids})
SERVICE_SCHEMA_SET_SCENE = XIAOMI_MIIO_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_SCENE): vol.All(vol.Coerce(int), vol.Clamp(min=1, max=6))}
)
SERVICE_SCHEMA_SET_DELAYED_TURN_OFF = XIAOMI_MIIO_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_TIME_PERIOD): cv.positive_time_period}
)
SERVICE_TO_METHOD = {
SERVICE_SET_DELAYED_TURN_OFF: {
"method": "async_set_delayed_turn_off",
"schema": SERVICE_SCHEMA_SET_DELAYED_TURN_OFF,
},
SERVICE_SET_SCENE: {
"method": "async_set_scene",
"schema": SERVICE_SCHEMA_SET_SCENE,
},
SERVICE_REMINDER_ON: {"method": "async_reminder_on"},
SERVICE_REMINDER_OFF: {"method": "async_reminder_off"},
SERVICE_NIGHT_LIGHT_MODE_ON: {"method": "async_night_light_mode_on"},
SERVICE_NIGHT_LIGHT_MODE_OFF: {"method": "async_night_light_mode_off"},
SERVICE_EYECARE_MODE_ON: {"method": "async_eyecare_mode_on"},
SERVICE_EYECARE_MODE_OFF: {"method": "async_eyecare_mode_off"},
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Xiaomi light from a config entry."""
entities = []
if config_entry.data[CONF_FLOW_TYPE] == CONF_GATEWAY:
gateway = hass.data[DOMAIN][config_entry.entry_id][CONF_GATEWAY]
# Gateway light
if gateway.model not in [
GATEWAY_MODEL_AC_V1,
GATEWAY_MODEL_AC_V2,
GATEWAY_MODEL_AC_V3,
]:
entities.append(
XiaomiGatewayLight(gateway, config_entry.title, config_entry.unique_id)
)
# Gateway sub devices
sub_devices = gateway.devices
coordinator = hass.data[DOMAIN][config_entry.entry_id][KEY_COORDINATOR]
for sub_device in sub_devices.values():
if sub_device.device_type == "LightBulb":
entities.append(
XiaomiGatewayBulb(coordinator, sub_device, config_entry)
)
if config_entry.data[CONF_FLOW_TYPE] == CONF_DEVICE:
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
host = config_entry.data[CONF_HOST]
token = config_entry.data[CONF_TOKEN]
name = config_entry.title
model = config_entry.data[CONF_MODEL]
unique_id = config_entry.unique_id
_LOGGER.debug("Initializing with host %s (token %s...)", host, token[:5])
if model in MODELS_LIGHT_EYECARE:
light = PhilipsEyecare(host, token)
entity = XiaomiPhilipsEyecareLamp(name, light, config_entry, unique_id)
entities.append(entity)
hass.data[DATA_KEY][host] = entity
entities.append(
XiaomiPhilipsEyecareLampAmbientLight(
name, light, config_entry, unique_id
)
)
# The ambient light doesn't expose additional services.
# A hass.data[DATA_KEY] entry isn't needed.
elif model in MODELS_LIGHT_CEILING:
light = Ceil(host, token)
entity = XiaomiPhilipsCeilingLamp(name, light, config_entry, unique_id)
entities.append(entity)
hass.data[DATA_KEY][host] = entity
elif model in MODELS_LIGHT_MOON:
light = PhilipsMoonlight(host, token)
entity = XiaomiPhilipsMoonlightLamp(name, light, config_entry, unique_id)
entities.append(entity)
hass.data[DATA_KEY][host] = entity
elif model in MODELS_LIGHT_BULB:
light = PhilipsBulb(host, token)
entity = XiaomiPhilipsBulb(name, light, config_entry, unique_id)
entities.append(entity)
hass.data[DATA_KEY][host] = entity
elif model in MODELS_LIGHT_MONO:
light = PhilipsBulb(host, token)
entity = XiaomiPhilipsGenericLight(name, light, config_entry, unique_id)
entities.append(entity)
hass.data[DATA_KEY][host] = entity
else:
_LOGGER.error(
"Unsupported device found! Please create an issue at "
"https://github.com/syssi/philipslight/issues "
"and provide the following data: %s",
model,
)
return
async def async_service_handler(service: ServiceCall) -> None:
"""Map services to methods on Xiaomi Philips Lights."""
method = SERVICE_TO_METHOD.get(service.service)
params = {
key: value
for key, value in service.data.items()
if key != ATTR_ENTITY_ID
}
if entity_ids := service.data.get(ATTR_ENTITY_ID):
target_devices = [
dev
for dev in hass.data[DATA_KEY].values()
if dev.entity_id in entity_ids
]
else:
target_devices = hass.data[DATA_KEY].values()
update_tasks = []
for target_device in target_devices:
if not hasattr(target_device, method["method"]):
continue
await getattr(target_device, method["method"])(**params)
update_tasks.append(target_device.async_update_ha_state(True))
if update_tasks:
await asyncio.wait(update_tasks)
for xiaomi_miio_service, method in SERVICE_TO_METHOD.items():
schema = method.get("schema", XIAOMI_MIIO_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, xiaomi_miio_service, async_service_handler, schema=schema
)
async_add_entities(entities, update_before_add=True)
class XiaomiPhilipsAbstractLight(XiaomiMiioEntity, LightEntity):
"""Representation of a Abstract Xiaomi Philips Light."""
def __init__(self, name, device, entry, unique_id):
"""Initialize the light device."""
super().__init__(name, device, entry, unique_id)
self._brightness = None
self._available = False
self._state = None
self._state_attrs = {}
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def extra_state_attributes(self):
"""Return the state attributes of the device."""
return self._state_attrs
@property
def is_on(self):
"""Return true if light is on."""
return self._state
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_BRIGHTNESS
async def _try_command(self, mask_error, func, *args, **kwargs):
"""Call a light command handling error messages."""
try:
result = await self.hass.async_add_executor_job(
partial(func, *args, **kwargs)
)
_LOGGER.debug("Response received from light: %s", result)
return result == SUCCESS
except DeviceException as exc:
if self._available:
_LOGGER.error(mask_error, exc)
self._available = False
return False
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
_LOGGER.debug("Setting brightness: %s %s%%", brightness, percent_brightness)
result = await self._try_command(
"Setting brightness failed: %s",
self._device.set_brightness,
percent_brightness,
)
if result:
self._brightness = brightness
else:
await self._try_command("Turning the light on failed.", self._device.on)
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._try_command("Turning the light off failed.", self._device.off)
async def async_update(self):
"""Fetch state from the device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
class XiaomiPhilipsGenericLight(XiaomiPhilipsAbstractLight):
"""Representation of a Generic Xiaomi Philips Light."""
def __init__(self, name, device, entry, unique_id):
"""Initialize the light device."""
super().__init__(name, device, entry, unique_id)
self._state_attrs.update({ATTR_SCENE: None, ATTR_DELAYED_TURN_OFF: None})
async def async_update(self):
"""Fetch state from the device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF],
)
self._state_attrs.update(
{ATTR_SCENE: state.scene, ATTR_DELAYED_TURN_OFF: delayed_turn_off}
)
async def async_set_scene(self, scene: int = 1):
"""Set the fixed scene."""
await self._try_command(
"Setting a fixed scene failed.", self._device.set_scene, scene
)
async def async_set_delayed_turn_off(self, time_period: timedelta):
"""Set delayed turn off."""
await self._try_command(
"Setting the turn off delay failed.",
self._device.delay_off,
time_period.total_seconds(),
)
@staticmethod
def delayed_turn_off_timestamp(
countdown: int, current: datetime, previous: datetime
):
"""Update the turn off timestamp only if necessary."""
if countdown is not None and countdown > 0:
new = current.replace(microsecond=0) + timedelta(seconds=countdown)
if previous is None:
return new
lower = timedelta(seconds=-DELAYED_TURN_OFF_MAX_DEVIATION_SECONDS)
upper = timedelta(seconds=DELAYED_TURN_OFF_MAX_DEVIATION_SECONDS)
diff = previous - new
if lower < diff < upper:
return previous
return new
return None
class XiaomiPhilipsBulb(XiaomiPhilipsGenericLight):
"""Representation of a Xiaomi Philips Bulb."""
def __init__(self, name, device, entry, unique_id):
"""Initialize the light device."""
super().__init__(name, device, entry, unique_id)
self._color_temp = None
@property
def color_temp(self):
"""Return the color temperature."""
return self._color_temp
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return 175
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return 333
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_COLOR_TEMP in kwargs:
color_temp = kwargs[ATTR_COLOR_TEMP]
percent_color_temp = self.translate(
color_temp, self.max_mireds, self.min_mireds, CCT_MIN, CCT_MAX
)
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
if ATTR_BRIGHTNESS in kwargs and ATTR_COLOR_TEMP in kwargs:
_LOGGER.debug(
"Setting brightness and color temperature: "
"%s %s%%, %s mireds, %s%% cct",
brightness,
percent_brightness,
color_temp,
percent_color_temp,
)
result = await self._try_command(
"Setting brightness and color temperature failed: %s bri, %s cct",
self._device.set_brightness_and_color_temperature,
percent_brightness,
percent_color_temp,
)
if result:
self._color_temp = color_temp
self._brightness = brightness
elif ATTR_COLOR_TEMP in kwargs:
_LOGGER.debug(
"Setting color temperature: %s mireds, %s%% cct",
color_temp,
percent_color_temp,
)
result = await self._try_command(
"Setting color temperature failed: %s cct",
self._device.set_color_temperature,
percent_color_temp,
)
if result:
self._color_temp = color_temp
elif ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
_LOGGER.debug("Setting brightness: %s %s%%", brightness, percent_brightness)
result = await self._try_command(
"Setting brightness failed: %s",
self._device.set_brightness,
percent_brightness,
)
if result:
self._brightness = brightness
else:
await self._try_command("Turning the light on failed.", self._device.on)
async def async_update(self):
"""Fetch state from the device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
self._color_temp = self.translate(
state.color_temperature, CCT_MIN, CCT_MAX, self.max_mireds, self.min_mireds
)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF],
)
self._state_attrs.update(
{ATTR_SCENE: state.scene, ATTR_DELAYED_TURN_OFF: delayed_turn_off}
)
@staticmethod
def translate(value, left_min, left_max, right_min, right_max):
"""Map a value from left span to right span."""
left_span = left_max - left_min
right_span = right_max - right_min
value_scaled = float(value - left_min) / float(left_span)
return int(right_min + (value_scaled * right_span))
class XiaomiPhilipsCeilingLamp(XiaomiPhilipsBulb):
"""Representation of a Xiaomi Philips Ceiling Lamp."""
def __init__(self, name, device, entry, unique_id):
"""Initialize the light device."""
super().__init__(name, device, entry, unique_id)
self._state_attrs.update(
{ATTR_NIGHT_LIGHT_MODE: None, ATTR_AUTOMATIC_COLOR_TEMPERATURE: None}
)
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return 175
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return 370
async def async_update(self):
"""Fetch state from the device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
self._color_temp = self.translate(
state.color_temperature, CCT_MIN, CCT_MAX, self.max_mireds, self.min_mireds
)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF],
)
self._state_attrs.update(
{
ATTR_SCENE: state.scene,
ATTR_DELAYED_TURN_OFF: delayed_turn_off,
ATTR_NIGHT_LIGHT_MODE: state.smart_night_light,
ATTR_AUTOMATIC_COLOR_TEMPERATURE: state.automatic_color_temperature,
}
)
class XiaomiPhilipsEyecareLamp(XiaomiPhilipsGenericLight):
"""Representation of a Xiaomi Philips Eyecare Lamp 2."""
def __init__(self, name, device, entry, unique_id):
"""Initialize the light device."""
super().__init__(name, device, entry, unique_id)
self._state_attrs.update(
{ATTR_REMINDER: None, ATTR_NIGHT_LIGHT_MODE: None, ATTR_EYECARE_MODE: None}
)
async def async_update(self):
"""Fetch state from the device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF],
)
self._state_attrs.update(
{
ATTR_SCENE: state.scene,
ATTR_DELAYED_TURN_OFF: delayed_turn_off,
ATTR_REMINDER: state.reminder,
ATTR_NIGHT_LIGHT_MODE: state.smart_night_light,
ATTR_EYECARE_MODE: state.eyecare,
}
)
async def async_set_delayed_turn_off(self, time_period: timedelta):
"""Set delayed turn off."""
await self._try_command(
"Setting the turn off delay failed.",
self._device.delay_off,
round(time_period.total_seconds() / 60),
)
async def async_reminder_on(self):
"""Enable the eye fatigue notification."""
await self._try_command(
"Turning on the reminder failed.", self._device.reminder_on
)
async def async_reminder_off(self):
"""Disable the eye fatigue notification."""
await self._try_command(
"Turning off the reminder failed.", self._device.reminder_off
)
async def async_night_light_mode_on(self):
"""Turn the smart night light mode on."""
await self._try_command(
"Turning on the smart night light mode failed.",
self._device.smart_night_light_on,
)
async def async_night_light_mode_off(self):
"""Turn the smart night light mode off."""
await self._try_command(
"Turning off the smart night light mode failed.",
self._device.smart_night_light_off,
)
async def async_eyecare_mode_on(self):
"""Turn the eyecare mode on."""
await self._try_command(
"Turning on the eyecare mode failed.", self._device.eyecare_on
)
async def async_eyecare_mode_off(self):
"""Turn the eyecare mode off."""
await self._try_command(
"Turning off the eyecare mode failed.", self._device.eyecare_off
)
@staticmethod
def delayed_turn_off_timestamp(
countdown: int, current: datetime, previous: datetime
):
"""Update the turn off timestamp only if necessary."""
if countdown is not None and countdown > 0:
new = current.replace(second=0, microsecond=0) + timedelta(
minutes=countdown
)
if previous is None:
return new
lower = timedelta(minutes=-DELAYED_TURN_OFF_MAX_DEVIATION_MINUTES)
upper = timedelta(minutes=DELAYED_TURN_OFF_MAX_DEVIATION_MINUTES)
diff = previous - new
if lower < diff < upper:
return previous
return new
return None
class XiaomiPhilipsEyecareLampAmbientLight(XiaomiPhilipsAbstractLight):
"""Representation of a Xiaomi Philips Eyecare Lamp Ambient Light."""
def __init__(self, name, device, entry, unique_id):
"""Initialize the light device."""
name = f"{name} Ambient Light"
if unique_id is not None:
unique_id = f"{unique_id}-ambient"
super().__init__(name, device, entry, unique_id)
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
_LOGGER.debug(
"Setting brightness of the ambient light: %s %s%%",
brightness,
percent_brightness,
)
result = await self._try_command(
"Setting brightness of the ambient failed: %s",
self._device.set_ambient_brightness,
percent_brightness,
)
if result:
self._brightness = brightness
else:
await self._try_command(
"Turning the ambient light on failed.", self._device.ambient_on
)
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._try_command(
"Turning the ambient light off failed.", self._device.ambient_off
)
async def async_update(self):
"""Fetch state from the device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.ambient
self._brightness = ceil((255 / 100.0) * state.ambient_brightness)
class XiaomiPhilipsMoonlightLamp(XiaomiPhilipsBulb):
"""Representation of a Xiaomi Philips Zhirui Bedside Lamp."""
def __init__(self, name, device, entry, unique_id):
"""Initialize the light device."""
super().__init__(name, device, entry, unique_id)
self._hs_color = None
self._state_attrs.pop(ATTR_DELAYED_TURN_OFF)
self._state_attrs.update(
{
ATTR_SLEEP_ASSISTANT: None,
ATTR_SLEEP_OFF_TIME: None,
ATTR_TOTAL_ASSISTANT_SLEEP_TIME: None,
ATTR_BAND_SLEEP: None,
ATTR_BAND: None,
}
)
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return 153
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return 588
@property
def hs_color(self) -> tuple:
"""Return the hs color value."""
return self._hs_color
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR | SUPPORT_COLOR_TEMP
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_COLOR_TEMP in kwargs:
color_temp = kwargs[ATTR_COLOR_TEMP]
percent_color_temp = self.translate(
color_temp, self.max_mireds, self.min_mireds, CCT_MIN, CCT_MAX
)
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
if ATTR_HS_COLOR in kwargs:
hs_color = kwargs[ATTR_HS_COLOR]
rgb = color.color_hs_to_RGB(*hs_color)
if ATTR_BRIGHTNESS in kwargs and ATTR_HS_COLOR in kwargs:
_LOGGER.debug(
"Setting brightness and color: %s %s%%, %s",
brightness,
percent_brightness,
rgb,
)
result = await self._try_command(
"Setting brightness and color failed: %s bri, %s color",
self._device.set_brightness_and_rgb,
percent_brightness,
rgb,
)
if result:
self._hs_color = hs_color
self._brightness = brightness
elif ATTR_BRIGHTNESS in kwargs and ATTR_COLOR_TEMP in kwargs:
_LOGGER.debug(
"Setting brightness and color temperature: "
"%s %s%%, %s mireds, %s%% cct",
brightness,
percent_brightness,
color_temp,
percent_color_temp,
)
result = await self._try_command(
"Setting brightness and color temperature failed: %s bri, %s cct",
self._device.set_brightness_and_color_temperature,
percent_brightness,
percent_color_temp,
)
if result:
self._color_temp = color_temp
self._brightness = brightness
elif ATTR_HS_COLOR in kwargs:
_LOGGER.debug("Setting color: %s", rgb)
result = await self._try_command(
"Setting color failed: %s", self._device.set_rgb, rgb
)
if result:
self._hs_color = hs_color
elif ATTR_COLOR_TEMP in kwargs:
_LOGGER.debug(
"Setting color temperature: %s mireds, %s%% cct",
color_temp,
percent_color_temp,
)
result = await self._try_command(
"Setting color temperature failed: %s cct",
self._device.set_color_temperature,
percent_color_temp,
)
if result:
self._color_temp = color_temp
elif ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
_LOGGER.debug("Setting brightness: %s %s%%", brightness, percent_brightness)
result = await self._try_command(
"Setting brightness failed: %s",
self._device.set_brightness,
percent_brightness,
)
if result:
self._brightness = brightness
else:
await self._try_command("Turning the light on failed.", self._device.on)
async def async_update(self):
"""Fetch state from the device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
self._color_temp = self.translate(
state.color_temperature, CCT_MIN, CCT_MAX, self.max_mireds, self.min_mireds
)
self._hs_color = color.color_RGB_to_hs(*state.rgb)
self._state_attrs.update(
{
ATTR_SCENE: state.scene,
ATTR_SLEEP_ASSISTANT: state.sleep_assistant,
ATTR_SLEEP_OFF_TIME: state.sleep_off_time,
ATTR_TOTAL_ASSISTANT_SLEEP_TIME: state.total_assistant_sleep_time,
ATTR_BAND_SLEEP: state.brand_sleep,
ATTR_BAND: state.brand,
}
)
async def async_set_delayed_turn_off(self, time_period: timedelta):
"""Set delayed turn off. Unsupported."""
return
class XiaomiGatewayLight(LightEntity):
"""Representation of a gateway device's light."""
def __init__(self, gateway_device, gateway_name, gateway_device_id):
"""Initialize the XiaomiGatewayLight."""
self._gateway = gateway_device
self._name = f"{gateway_name} Light"
self._gateway_device_id = gateway_device_id
self._unique_id = gateway_device_id
self._available = False
self._is_on = None
self._brightness_pct = 100
self._rgb = (255, 255, 255)
self._hs = (0, 0)
@property
def unique_id(self):
"""Return an unique ID."""
return self._unique_id
@property
def device_info(self) -> DeviceInfo:
"""Return the device info of the gateway."""
return DeviceInfo(
identifiers={(DOMAIN, self._gateway_device_id)},
)
@property
def name(self):
"""Return the name of this entity, if any."""
return self._name
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def is_on(self):
"""Return true if it is on."""
return self._is_on
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return int(255 * self._brightness_pct / 100)
@property
def hs_color(self):
"""Return the hs color value."""
return self._hs
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR
def turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_HS_COLOR in kwargs:
rgb = color.color_hs_to_RGB(*kwargs[ATTR_HS_COLOR])
else:
rgb = self._rgb
if ATTR_BRIGHTNESS in kwargs:
brightness_pct = int(100 * kwargs[ATTR_BRIGHTNESS] / 255)
else:
brightness_pct = self._brightness_pct
self._gateway.light.set_rgb(brightness_pct, rgb)
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the light off."""
self._gateway.light.set_rgb(0, self._rgb)
self.schedule_update_ha_state()
async def async_update(self):
"""Fetch state from the device."""
try:
state_dict = await self.hass.async_add_executor_job(
self._gateway.light.rgb_status
)
except GatewayException as ex:
if self._available:
self._available = False
_LOGGER.error(
"Got exception while fetching the gateway light state: %s", ex
)
return
self._available = True
self._is_on = state_dict["is_on"]
if self._is_on:
self._brightness_pct = state_dict["brightness"]
self._rgb = state_dict["rgb"]
self._hs = color.color_RGB_to_hs(*self._rgb)
class XiaomiGatewayBulb(XiaomiGatewayDevice, LightEntity):
"""Representation of Xiaomi Gateway Bulb."""
@property
def brightness(self):
"""Return the brightness of the light."""
return round((self._sub_device.status["brightness"] * 255) / 100)
@property
def color_temp(self):
"""Return current color temperature."""
return self._sub_device.status["color_temp"]
@property
def is_on(self):
"""Return true if light is on."""
return self._sub_device.status["status"] == "on"
@property
def min_mireds(self):
"""Return min cct."""
return self._sub_device.status["cct_min"]
@property
def max_mireds(self):
"""Return max cct."""
return self._sub_device.status["cct_max"]
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
async def async_turn_on(self, **kwargs):
"""Instruct the light to turn on."""
await self.hass.async_add_executor_job(self._sub_device.on)
if ATTR_COLOR_TEMP in kwargs:
color_temp = kwargs[ATTR_COLOR_TEMP]
await self.hass.async_add_executor_job(
self._sub_device.set_color_temp, color_temp
)
if ATTR_BRIGHTNESS in kwargs:
brightness = round((kwargs[ATTR_BRIGHTNESS] * 100) / 255)
await self.hass.async_add_executor_job(
self._sub_device.set_brightness, brightness
)
async def async_turn_off(self, **kwargsf):
"""Instruct the light to turn off."""
await self.hass.async_add_executor_job(self._sub_device.off)
|
|
#!/usr/bin/env python3
from collections import defaultdict
from urllib.request import Request, urlopen
from urllib.error import URLError
from os.path import join
from functools import *
from poe_utils import *
from sys import exit
class PoEItemData():
# User Variables
__attribute_list = ['str', 'dex', 'int', 'str/int', 'str/dex', 'int/dex']
# Core Variables
# __attribute_map = item_data -> attribute -> attribute/requirement
__attribue_map = { 'armour': { 'str': 'Armour'
, 'dex': 'Evasion Rating'
, 'int': 'Energy Shield'
}
, 'weapon': { 'str': 'Req_Str'
, 'dex': 'Req_Dex'
, 'int': 'Req_Int'
}
}
# __class_group_map = abbrev in filter -> group shown on website
__class_group_map = { 'One Hand': { 'One Hand Axe', 'One Hand Mace', 'One Hand Sword', 'Thrusting One Hand Sword' }
, 'Two Hand': { 'Two Hand Axe', 'Two Hand Mace', 'Two Hand Sword' }
}
# __class_name_map = abbrev in filter -> name shown on website
__class_name_map = { 'Body' : 'Body Armour'
, 'Boot' : 'Boots'
, 'Glove' : 'Gloves'
, 'Thrusting': 'Thrusting One Hand Sword'
}
# __reduced_class_map = abbrev in filter -> abbrevs in filter
__reduced_class_map = { 'Axe': { 'Two Hand Axe', 'One Hand Axe' }
, 'Mace': { 'Two Hand Mace', 'One Hand Mace' }
, 'Sword': { 'Two Hand Sword', 'Thrusting', 'One Hand Sword' }
}
__name_attribute = 'Name'
__requirements = [x.upper() for x in __attribute_list]
__rq_tables_generated = False # FIXME: bad design
__default_format = 'utf-8'
__default_url = "http://www.pathofexile.com/item-data"
__default_headers = {}
__default_headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
def __init__(self, item_family = 'armour'):
self.tables = defaultdict(list) # Class -> Row
self.rq_tables = defaultdict(lambda : defaultdict(list)) # Req -> Class -> Name
self.__fetch_data(item_family)
self.__classes = set([]) # all classnames encountered
self.__generate_tables()
self.required_classes = None
@property
def required_classes(self):
return self.__required_classes
@required_classes.setter
def required_classes(self, value):
prev_required = None
try:
prev_required = self.required_classes.copy()
except AttributeError:
pass
self.__required_classes = set({})
if value is None or value is {}:
# all classes are required by default
self.__required_classes = self.__classes.copy()
else:
# ensure required classes exist
self.__required_classes = value & self.__classes
# first use of accessor, do nothing
if prev_required is None:
return
# TODO: if changed, do something
if prev_required & self.__required_classes:
pass
def __fetch_data(self, item_family):
self.data_type = item_family
url = self.__default_url+"/"+item_family
req = Request(url, headers = self.__default_headers)
try:
resp = urlopen(req)
except URLError as e:
print('An error occured while fetching {url}\n\t{reason}'.format(url=url, reason=e.reason))
exit(1)
self.data = BeautifulSoup(resp.read().decode(self.__default_format))
def __generate_tables(self):
if self.data_type is None:
return 1
try:
tables = self.data.findAll('table')
except AttributeError:
print('No tables found')
return 1
for table in tables:
try:
rows = table.findAll('tr')
except AttributeError:
print('Empty table')
continue
table_class = str(table.parent.findPrevious('h1').contents[0])
self.__classes.add(table_class) # remember all classnames
table_rows = parse_rows(rows) # implicits are concatenated
self.tables[table_class] = table_rows
# add maraketh weapons if item_data/weapon
if self.data_type == 'weapon':
act_four_tables = import_db(join('temporary_db','maraketh_weapon'))
for table_class, table in act_four_tables.items():
# suppose table is well-formed
if table[1:]:
self.tables[table_class].append(table[1:])
def __generate_rq_tables(self):
table_requirements = defaultdict(list)
if self.data_type in list(self.__attribue_map.keys()):
for attr in self.__attribute_list:
table_requirements[attr] = [self.__attribue_map[self.data_type][c] for c in attr.split('/')]
else:
print('Please, define rules to walk the database')
exit(1)
for table_class, table in self.tables.items():
# - HEADER
# position of name
p_name = self.__get_name_position(table_class)
if p_name is None:
continue
# attribute (ex: 'Energy Shield' or 'Req Int'...)
attribute_vlist = []
# attributs -> position of requirements
attr_pos = defaultdict(list)
# attributs -> position of related requirements
related_attr_pos = defaultdict(list)
for a,v in table_requirements.items():
attr_pos[a] = [table[0].index(rq_v) for rq_v in v]
attribute_vlist.extend(v)
attribute_vlist = list(set(attribute_vlist)) # remove dup
# all requirements in header
attribute_plist = [table[0].index(rq_v) for rq_v in attribute_vlist]
for a, v in table_requirements.items():
related_attr_pos[a] = [aa for aa in attribute_plist if aa not in attr_pos[a]]
# - DATA
for data in table[1:]:
name = data[p_name]
for attr in self.__attribute_list:
if all(int(data[pos]) > 0 for pos in attr_pos[attr]):
if all(int(data[pos]) == 0 for pos in related_attr_pos[attr]):
self.rq_tables[attr.upper()][table_class].append(name)
break
self.__rq_tables_generated = True
def get_items(self, item_class = ''):
if not item_class:
item_classes = self.get_classes()
else:
item_classes = [ item_class ]
res = []
for i_class in item_classes:
res.extend(self.__get_items(i_class))
return res
def __get_items(self, item_class):
p = self.__get_name_position(item_class)
if p is not None:
return [x[p] for x in self.tables[item_class][1:]]
return []
def __get_name_position(self, item_class):
res = None
attribute=self.__name_attribute
table_header = self.tables[item_class][0]
try:
res = table_header.index(attribute)
except ValueError as e:
print('An error occured while fetching {attribute} in the table header of {classname}'.format(attribute=attribute, classname=item_class))
return res
def get_items_by_requirement(self, requirement, item_class = ''):
if not self.__rq_tables_generated:
self.__generate_rq_tables()
rq_tabs = self.rq_tables[requirement]
rq = []
if item_class:
rq = rq_tabs[item_class]
else:
for rq_class, rq_items in rq_tabs.items():
if rq_class in self.required_classes:
rq.extend(rq_items)
return rq
def get_selected_items(self, action, item_class = ''):
# TODO: refactor __generate_rq_tables
pass
def __get_variable_by_pattern(self, var, pattern = ''):
if pattern:
return None
# return re_list_filter(pattern, var)
return var
def get_requirements(self, pattern = ''):
return self.__get_variable_by_pattern(self.__requirements, pattern)
def get_classes(self, pattern = ''):
return self.__get_variable_by_pattern(list(self.__classes), pattern)
@classmethod
def abbrev_classes(cls, class_names, precedence = None):
# class_names = string or heterogeneous list of classes
if isinstance(class_names, str):
cnames = [ class_names ]
else:
cnames = class_names.copy()
classes_set = set(cnames) # faster as set
for class_map in [cls.__class_group_map, cls.__class_name_map]:
for abbrev, names in class_map.items():
if isinstance(names, str):
names = { names }
if names.issubset(classes_set):
classes_set = classes_set - names
# replace in the 'ordered' list
list_replace(cnames, list(names), abbrev)
if precedence is not None:
if isinstance(precedence, str):
precedence = [precedence]
if isinstance(precedence, list):
precedence = set(precedence)
classes_set = set(cnames)
for abbrev, names in cls.__reduced_class_map.items():
uset = classes_set | precedence
if names.issubset(uset):
classes_set = uset - names
# replace in the 'ordered' list
list_replace(cnames, list(names - precedence), abbrev)
return cnames
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A variational ansatz based on a linear swap network Trotter step."""
from typing import Iterable, Optional, Sequence, Tuple, cast
import numpy
import sympy
import cirq
from openfermioncirq import swap_network
from openfermioncirq.variational.ansatz import VariationalAnsatz
from openfermioncirq.variational.letter_with_subscripts import (
LetterWithSubscripts)
class SwapNetworkTrotterHubbardAnsatz(VariationalAnsatz):
"""A Hubbard model ansatz based on the fermionic swap network Trotter step.
Each Trotter step includes 3 parameters: one for the horizontal hopping
terms, one for the vertical hopping terms, and one for the on-site
interaction. This ansatz is similar to the one used in arXiv:1507.08969,
but corresponds to a different ordering for simulating the Hamiltonian
terms.
"""
def __init__(self,
x_dim: float,
y_dim: float,
tunneling: float,
coulomb: float,
periodic: bool=True,
iterations: int=1,
adiabatic_evolution_time: Optional[float]=None,
qubits: Optional[Sequence[cirq.Qid]]=None
) -> None:
"""
Args:
iterations: The number of iterations of the basic template to
include in the circuit. The number of parameters grows linearly
with this value.
adiabatic_evolution_time: The time scale for Hamiltonian evolution
used to determine the default initial parameters of the ansatz.
This is the value A from the docstring of this class.
If not specified, defaults to the sum of the absolute values
of the entries of the two-body tensor of the Hamiltonian.
qubits: Qubits to be used by the ansatz circuit. If not specified,
then qubits will automatically be generated by the
`_generate_qubits` method.
"""
self.x_dim = x_dim
self.y_dim = y_dim
self.tunneling = tunneling
self.coulomb = coulomb
self.periodic = periodic
self.iterations = iterations
if adiabatic_evolution_time is None:
adiabatic_evolution_time = 0.1*abs(coulomb)*iterations
self.adiabatic_evolution_time = cast(float, adiabatic_evolution_time)
super().__init__(qubits)
def params(self) -> Iterable[sympy.Symbol]:
"""The parameters of the ansatz."""
for i in range(self.iterations):
if self.x_dim > 1:
yield LetterWithSubscripts('Th', i)
if self.y_dim > 1:
yield LetterWithSubscripts('Tv', i)
yield LetterWithSubscripts('V', i)
def param_bounds(self) -> Optional[Sequence[Tuple[float, float]]]:
"""Bounds on the parameters."""
bounds = []
for param in self.params():
s = 1.0 if param.letter == 'V' else 2.0
bounds.append((-s, s))
return bounds
def _generate_qubits(self) -> Sequence[cirq.Qid]:
"""Produce qubits that can be used by the ansatz circuit."""
n_qubits = 2*self.x_dim*self.y_dim
return cirq.LineQubit.range(n_qubits)
def operations(self, qubits: Sequence[cirq.Qid]) -> cirq.OP_TREE:
"""Produce the operations of the ansatz circuit."""
for i in range(self.iterations):
# Apply one- and two-body interactions with a swap network that
# reverses the order of the modes
def one_and_two_body_interaction(p, q, a, b) -> cirq.OP_TREE:
th_symbol = LetterWithSubscripts('Th', i)
tv_symbol = LetterWithSubscripts('Tv', i)
v_symbol = LetterWithSubscripts('V', i)
if _is_horizontal_edge(
p, q, self.x_dim, self.y_dim, self.periodic):
yield cirq.ISwapPowGate(exponent=-th_symbol).on(a, b)
if _is_vertical_edge(
p, q, self.x_dim, self.y_dim, self.periodic):
yield cirq.ISwapPowGate(exponent=-tv_symbol).on(a, b)
if _are_same_site_opposite_spin(p, q, self.x_dim*self.y_dim):
yield cirq.CZPowGate(exponent=v_symbol).on(a, b)
yield swap_network(
qubits, one_and_two_body_interaction, fermionic=True)
qubits = qubits[::-1]
# Apply one- and two-body interactions again. This time, reorder
# them so that the entire iteration is symmetric
def one_and_two_body_interaction_reversed_order(p, q, a, b
) -> cirq.OP_TREE:
th_symbol = LetterWithSubscripts('Th', i)
tv_symbol = LetterWithSubscripts('Tv', i)
v_symbol = LetterWithSubscripts('V', i)
if _are_same_site_opposite_spin(p, q, self.x_dim*self.y_dim):
yield cirq.CZPowGate(exponent=v_symbol).on(a, b)
if _is_vertical_edge(
p, q, self.x_dim, self.y_dim, self.periodic):
yield cirq.ISwapPowGate(exponent=-tv_symbol).on(a, b)
if _is_horizontal_edge(
p, q, self.x_dim, self.y_dim, self.periodic):
yield cirq.ISwapPowGate(exponent=-th_symbol).on(a, b)
yield swap_network(
qubits, one_and_two_body_interaction_reversed_order,
fermionic=True, offset=True)
qubits = qubits[::-1]
def default_initial_params(self) -> numpy.ndarray:
"""Approximate evolution by H(t) = T + (t/A)V.
Sets the parameters so that the ansatz circuit consists of a sequence
of second-order Trotter steps approximating the dynamics of the
time-dependent Hamiltonian H(t) = T + (t/A)V, where T is the one-body
term and V is the two-body term of the Hamiltonian used to generate the
ansatz circuit, and t ranges from 0 to A, where A is equal to
`self.adibatic_evolution_time`. The number of Trotter steps
is equal to the number of iterations in the ansatz. This choice is
motivated by the idea of state preparation via adiabatic evolution.
The dynamics of H(t) are approximated as follows. First, the total
evolution time of A is split into segments of length A / r, where r
is the number of Trotter steps. Then, each Trotter step simulates H(t)
for a time length of A / r, where t is the midpoint of the
corresponding time segment. As an example, suppose A is 100 and the
ansatz has two iterations. Then the approximation is achieved with two
Trotter steps. The first Trotter step simulates H(25) for a time length
of 50, and the second Trotter step simulates H(75) for a time length
of 50.
"""
total_time = self.adiabatic_evolution_time
step_time = total_time / self.iterations
params = []
for param, scale_factor in zip(self.params(),
self.param_scale_factors()):
if param.letter == 'Th' or param.letter == 'Tv':
params.append(_canonicalize_exponent(
-self.tunneling * step_time / numpy.pi, 4) / scale_factor)
elif param.letter == 'V':
i, = param.subscripts
# Use the midpoint of the time segment
interpolation_progress = 0.5 * (2 * i + 1) / self.iterations
params.append(_canonicalize_exponent(
-0.5 * self.coulomb * interpolation_progress *
step_time / numpy.pi, 2) / scale_factor)
return numpy.array(params)
def _is_horizontal_edge(p, q, x_dim, y_dim, periodic):
n_sites = x_dim*y_dim
if p < n_sites and q >= n_sites or q < n_sites and p >= n_sites:
return False
if p >= n_sites and q >= n_sites:
p -= n_sites
q -= n_sites
return (q == _right_neighbor(p, x_dim, y_dim, periodic)
or p == _right_neighbor(q, x_dim, y_dim, periodic))
def _is_vertical_edge(p, q, x_dim, y_dim, periodic):
n_sites = x_dim*y_dim
if p < n_sites and q >= n_sites or q < n_sites and p >= n_sites:
return False
if p >= n_sites and q >= n_sites:
p -= n_sites
q -= n_sites
return (q == _bottom_neighbor(p, x_dim, y_dim, periodic)
or p == _bottom_neighbor(q, x_dim, y_dim, periodic))
def _are_same_site_opposite_spin(p, q, n_sites):
return abs(p-q) == n_sites
def _right_neighbor(site, x_dimension, y_dimension, periodic):
if x_dimension == 1:
return None
if (site + 1) % x_dimension == 0:
if periodic:
return site + 1 - x_dimension
else:
return None
return site + 1
def _bottom_neighbor(site, x_dimension, y_dimension, periodic):
if y_dimension == 1:
return None
if site + x_dimension + 1 > x_dimension*y_dimension:
if periodic:
return site + x_dimension - x_dimension*y_dimension
else:
return None
return site + x_dimension
def _canonicalize_exponent(exponent: float, period: int) -> float:
# Shift into [-p/2, +p/2).
exponent += period / 2
exponent %= period
exponent -= period / 2
# Prefer (-p/2, +p/2] over [-p/2, +p/2).
if exponent <= -period / 2:
exponent += period # coverage: ignore
return exponent
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.seq2seq.python.seq2seq.basic_decoder."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import sys
# TODO(jart): #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes # pylint: disable=g-import-not-at-top
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
# pylint: disable=g-import-not-at-top
import numpy as np
from tensorflow.contrib.rnn import core_rnn_cell
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.contrib.seq2seq.python.ops import basic_decoder
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# pylint: enable=g-import-not-at-top
class BasicDecoderTest(test.TestCase):
def _testStepWithTrainingHelper(self, use_output_layer):
sequence_length = [3, 4, 3, 1, 0]
batch_size = 5
max_time = 8
input_depth = 7
cell_depth = 10
output_layer_depth = 3
with self.test_session() as sess:
inputs = np.random.randn(batch_size, max_time,
input_depth).astype(np.float32)
cell = core_rnn_cell.LSTMCell(cell_depth)
helper = helper_py.TrainingHelper(
inputs, sequence_length, time_major=False)
if use_output_layer:
output_layer = layers_core.Dense(output_layer_depth, use_bias=False)
expected_output_depth = output_layer_depth
else:
output_layer = None
expected_output_depth = cell_depth
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size),
output_layer=output_layer)
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(expected_output_depth,
tensor_shape.TensorShape([])),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.int32),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, core_rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, core_rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, expected_output_depth),
step_outputs[0].get_shape())
self.assertEqual((batch_size,), step_outputs[1].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[1].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[1].get_shape())
if use_output_layer:
# The output layer was accessed
self.assertEqual(len(output_layer.variables), 1)
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
})
self.assertAllEqual([False, False, False, False, True],
sess_results["first_finished"])
self.assertAllEqual([False, False, False, True, True],
sess_results["step_finished"])
self.assertAllEqual(
np.argmax(sess_results["step_outputs"].rnn_output, -1),
sess_results["step_outputs"].sample_id)
def testStepWithTrainingHelperNoOutputLayer(self):
self._testStepWithTrainingHelper(use_output_layer=False)
def testStepWithTrainingHelperWithOutputLayer(self):
self._testStepWithTrainingHelper(use_output_layer=True)
def testStepWithGreedyEmbeddingHelper(self):
batch_size = 5
vocabulary_size = 7
cell_depth = vocabulary_size # cell's logits must match vocabulary size
input_depth = 10
start_tokens = [0] * batch_size
end_token = 1
with self.test_session() as sess:
embeddings = np.random.randn(vocabulary_size,
input_depth).astype(np.float32)
cell = core_rnn_cell.LSTMCell(vocabulary_size)
helper = helper_py.GreedyEmbeddingHelper(embeddings, start_tokens,
end_token)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(cell_depth,
tensor_shape.TensorShape([])),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.int32),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, core_rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, core_rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, cell_depth), step_outputs[0].get_shape())
self.assertEqual((batch_size,), step_outputs[1].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[1].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[1].get_shape())
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
})
expected_sample_ids = np.argmax(
sess_results["step_outputs"].rnn_output, -1)
expected_step_finished = (expected_sample_ids == end_token)
expected_step_next_inputs = embeddings[expected_sample_ids]
self.assertAllEqual([False, False, False, False, False],
sess_results["first_finished"])
self.assertAllEqual(expected_step_finished, sess_results["step_finished"])
self.assertAllEqual(expected_sample_ids,
sess_results["step_outputs"].sample_id)
self.assertAllEqual(expected_step_next_inputs,
sess_results["step_next_inputs"])
def testStepWithScheduledEmbeddingTrainingHelper(self):
sequence_length = [3, 4, 3, 1, 0]
batch_size = 5
max_time = 8
input_depth = 7
vocabulary_size = 10
with self.test_session() as sess:
inputs = np.random.randn(
batch_size, max_time, input_depth).astype(np.float32)
embeddings = np.random.randn(
vocabulary_size, input_depth).astype(np.float32)
half = constant_op.constant(0.5)
cell = core_rnn_cell.LSTMCell(vocabulary_size)
helper = helper_py.ScheduledEmbeddingTrainingHelper(
inputs=inputs,
sequence_length=sequence_length,
embedding=embeddings,
sampling_probability=half,
time_major=False)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(vocabulary_size,
tensor_shape.TensorShape([])),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.int32),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, core_rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, core_rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, vocabulary_size),
step_outputs[0].get_shape())
self.assertEqual((batch_size,), step_outputs[1].get_shape())
self.assertEqual((batch_size, vocabulary_size),
first_state[0].get_shape())
self.assertEqual((batch_size, vocabulary_size),
first_state[1].get_shape())
self.assertEqual((batch_size, vocabulary_size),
step_state[0].get_shape())
self.assertEqual((batch_size, vocabulary_size),
step_state[1].get_shape())
self.assertEqual((batch_size, input_depth),
step_next_inputs.get_shape())
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
})
self.assertAllEqual([False, False, False, False, True],
sess_results["first_finished"])
self.assertAllEqual([False, False, False, True, True],
sess_results["step_finished"])
sample_ids = sess_results["step_outputs"].sample_id
batch_where_not_sampling = np.where(sample_ids == -1)
batch_where_sampling = np.where(sample_ids > -1)
self.assertAllClose(
sess_results["step_next_inputs"][batch_where_sampling],
embeddings[sample_ids[batch_where_sampling]])
self.assertAllClose(
sess_results["step_next_inputs"][batch_where_not_sampling],
np.squeeze(inputs[batch_where_not_sampling, 1]))
def _testStepWithScheduledOutputTrainingHelper(self, use_next_input_layer):
sequence_length = [3, 4, 3, 1, 0]
batch_size = 5
max_time = 8
input_depth = 7
cell_depth = input_depth
if use_next_input_layer:
cell_depth = 6
with self.test_session() as sess:
inputs = np.random.randn(batch_size, max_time,
input_depth).astype(np.float32)
cell = core_rnn_cell.LSTMCell(cell_depth)
half = constant_op.constant(0.5)
next_input_layer = None
if use_next_input_layer:
next_input_layer = layers_core.Dense(input_depth, use_bias=False)
helper = helper_py.ScheduledOutputTrainingHelper(
inputs=inputs,
sequence_length=sequence_length,
sampling_probability=half,
time_major=False,
next_input_layer=next_input_layer)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
output_size = my_decoder.output_size
output_dtype = my_decoder.output_dtype
self.assertEqual(
basic_decoder.BasicDecoderOutput(cell_depth,
tensor_shape.TensorShape([])),
output_size)
self.assertEqual(
basic_decoder.BasicDecoderOutput(dtypes.float32, dtypes.int32),
output_dtype)
(first_finished, first_inputs, first_state) = my_decoder.initialize()
(step_outputs, step_state, step_next_inputs,
step_finished) = my_decoder.step(
constant_op.constant(0), first_inputs, first_state)
if use_next_input_layer:
output_after_next_input_layer = next_input_layer(
step_outputs.rnn_output)
batch_size_t = my_decoder.batch_size
self.assertTrue(isinstance(first_state, core_rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(step_state, core_rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(step_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual((batch_size, cell_depth), step_outputs[0].get_shape())
self.assertEqual((batch_size,), step_outputs[1].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), first_state[1].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[0].get_shape())
self.assertEqual((batch_size, cell_depth), step_state[1].get_shape())
sess.run(variables.global_variables_initializer())
fetches = {
"batch_size": batch_size_t,
"first_finished": first_finished,
"first_inputs": first_inputs,
"first_state": first_state,
"step_outputs": step_outputs,
"step_state": step_state,
"step_next_inputs": step_next_inputs,
"step_finished": step_finished
}
if use_next_input_layer:
fetches["output_after_next_input_layer"] = output_after_next_input_layer
sess_results = sess.run(fetches)
self.assertAllEqual([False, False, False, False, True],
sess_results["first_finished"])
self.assertAllEqual([False, False, False, True, True],
sess_results["step_finished"])
sample_ids = sess_results["step_outputs"].sample_id
batch_where_not_sampling = np.where(np.logical_not(sample_ids))
batch_where_sampling = np.where(sample_ids)
if use_next_input_layer:
self.assertAllClose(
sess_results["step_next_inputs"][batch_where_sampling],
sess_results["output_after_next_input_layer"][batch_where_sampling])
else:
self.assertAllClose(
sess_results["step_next_inputs"][batch_where_sampling],
sess_results["step_outputs"].rnn_output[batch_where_sampling])
self.assertAllClose(
sess_results["step_next_inputs"][batch_where_not_sampling],
np.squeeze(inputs[batch_where_not_sampling, 1], axis=1))
def testStepWithScheduledOutputTrainingHelperWithoutNextInputLayer(self):
self._testStepWithScheduledOutputTrainingHelper(use_next_input_layer=False)
def testStepWithScheduledOutputTrainingHelperWithNextInputLayer(self):
self._testStepWithScheduledOutputTrainingHelper(use_next_input_layer=True)
if __name__ == "__main__":
test.main()
|
|
"""
South's fake ORM; lets you not have to write SQL inside migrations.
Roughly emulates the real Django ORM, to a point.
"""
import inspect
import datetime
from django.db import models
from django.db.models.loading import cache
from django.core.exceptions import ImproperlyConfigured
from south.db import db
from south.utils import ask_for_it_by_name
from south.hacks import hacks
class ModelsLocals(object):
"""
Custom dictionary-like class to be locals();
falls back to lowercase search for items that don't exist
(because we store model names as lowercase).
"""
def __init__(self, data):
self.data = data
def __getitem__(self, key):
try:
return self.data[key]
except KeyError:
return self.data[key.lower()]
# Stores already-created ORMs.
_orm_cache = {}
def FakeORM(*args):
"""
Creates a Fake Django ORM.
This is actually a memoised constructor; the real class is _FakeORM.
"""
if not args in _orm_cache:
_orm_cache[args] = _FakeORM(*args)
return _orm_cache[args]
class LazyFakeORM(object):
"""
In addition to memoising the ORM call, this function lazily generates them
for a Migration class. Assign the result of this to (for example)
.orm, and as soon as .orm is accessed the ORM will be created.
"""
def __init__(self, *args):
self._args = args
self.orm = None
def __get__(self, obj, type=None):
if not self.orm:
self.orm = FakeORM(*self._args)
return self.orm
class _FakeORM(object):
"""
Simulates the Django ORM at some point in time,
using a frozen definition on the Migration class.
"""
def __init__(self, cls, app):
self.default_app = app
self.cls = cls
# Try loading the models off the migration class; default to no models.
self.models = {}
try:
self.models_source = cls.models
except AttributeError:
return
# Start a 'new' AppCache
hacks.clear_app_cache()
# Now, make each model's data into a FakeModel
# We first make entries for each model that are just its name
# This allows us to have circular model dependency loops
model_names = []
for name, data in self.models_source.items():
# Make sure there's some kind of Meta
if "Meta" not in data:
data['Meta'] = {}
try:
app_name, model_name = name.split(".", 1)
except ValueError:
app_name = self.default_app
model_name = name
name = "%s.%s" % (app_name, model_name)
name = name.lower()
self.models[name] = name
model_names.append((name, app_name, model_name, data))
for name, app_name, model_name, data in model_names:
self.models[name] = self.make_model(app_name, model_name, data)
# And perform the second run to iron out any circular/backwards depends.
self.retry_failed_fields()
# Force evaluation of relations on the models now
for model in self.models.values():
model._meta.get_all_field_names()
# Reset AppCache
hacks.unclear_app_cache()
def __iter__(self):
return iter(self.models.values())
def __getattr__(self, key):
fullname = (self.default_app+"."+key).lower()
try:
return self.models[fullname]
except KeyError:
raise AttributeError("The model '%s' from the app '%s' is not available in this migration." % (key, self.default_app))
def __getitem__(self, key):
# Detect if they asked for a field on a model or not.
if ":" in key:
key, fname = key.split(":")
else:
fname = None
# Now, try getting the model
key = key.lower()
try:
model = self.models[key]
except KeyError:
try:
app, model = key.split(".", 1)
except ValueError:
raise KeyError("The model '%s' is not in appname.modelname format." % key)
else:
raise KeyError("The model '%s' from the app '%s' is not available in this migration." % (model, app))
# If they asked for a field, get it.
if fname:
return model._meta.get_field_by_name(fname)[0]
else:
return model
def eval_in_context(self, code, app, extra_imports={}):
"Evaluates the given code in the context of the migration file."
# Drag in the migration module's locals (hopefully including models.py)
fake_locals = dict(inspect.getmodule(self.cls).__dict__)
# Remove all models from that (i.e. from modern models.py), to stop pollution
for key, value in fake_locals.items():
if isinstance(value, type) and issubclass(value, models.Model) and hasattr(value, "_meta"):
del fake_locals[key]
# We add our models into the locals for the eval
fake_locals.update(dict([
(name.split(".")[-1], model)
for name, model in self.models.items()
]))
# Make sure the ones for this app override.
fake_locals.update(dict([
(name.split(".")[-1], model)
for name, model in self.models.items()
if name.split(".")[0] == app
]))
# Ourselves as orm, to allow non-fail cross-app referencing
fake_locals['orm'] = self
# And a fake _ function
fake_locals['_'] = lambda x: x
# Datetime; there should be no datetime direct accesses
fake_locals['datetime'] = datetime
# Now, go through the requested imports and import them.
for name, value in extra_imports.items():
# First, try getting it out of locals.
parts = value.split(".")
try:
obj = fake_locals[parts[0]]
for part in parts[1:]:
obj = getattr(obj, part)
except (KeyError, AttributeError):
pass
else:
fake_locals[name] = obj
continue
# OK, try to import it directly
try:
fake_locals[name] = ask_for_it_by_name(value)
except ImportError:
if name == "SouthFieldClass":
raise ValueError("Cannot import the required field '%s'" % value)
else:
print "WARNING: Cannot import '%s'" % value
# Use ModelsLocals to make lookups work right for CapitalisedModels
fake_locals = ModelsLocals(fake_locals)
return eval(code, globals(), fake_locals)
def make_meta(self, app, model, data, stub=False):
"Makes a Meta class out of a dict of eval-able arguments."
results = {'app_label': app}
for key, code in data.items():
# Some things we never want to use.
if key in ["_bases"]:
continue
# Some things we don't want with stubs.
if stub and key in ["order_with_respect_to"]:
continue
# OK, add it.
try:
results[key] = self.eval_in_context(code, app)
except (NameError, AttributeError), e:
raise ValueError("Cannot successfully create meta field '%s' for model '%s.%s': %s." % (
key, app, model, e
))
return type("Meta", tuple(), results)
def make_model(self, app, name, data):
"Makes a Model class out of the given app name, model name and pickled data."
# Extract any bases out of Meta
if "_bases" in data['Meta']:
bases = data['Meta']['_bases']
else:
bases = ['django.db.models.Model']
# Turn the Meta dict into a basic class
meta = self.make_meta(app, name, data['Meta'], data.get("_stub", False))
failed_fields = {}
fields = {}
stub = False
# Now, make some fields!
for fname, params in data.items():
# If it's the stub marker, ignore it.
if fname == "_stub":
stub = bool(params)
continue
elif fname == "Meta":
continue
elif not params:
raise ValueError("Field '%s' on model '%s.%s' has no definition." % (fname, app, name))
elif isinstance(params, (str, unicode)):
# It's a premade definition string! Let's hope it works...
code = params
extra_imports = {}
else:
# If there's only one parameter (backwards compat), make it 3.
if len(params) == 1:
params = (params[0], [], {})
# There should be 3 parameters. Code is a tuple of (code, what-to-import)
if len(params) == 3:
code = "SouthFieldClass(%s)" % ", ".join(
params[1] +
["%s=%s" % (n, v) for n, v in params[2].items()]
)
extra_imports = {"SouthFieldClass": params[0]}
else:
raise ValueError("Field '%s' on model '%s.%s' has a weird definition length (should be 1 or 3 items)." % (fname, app, name))
try:
# Execute it in a probably-correct context.
field = self.eval_in_context(code, app, extra_imports)
except (NameError, AttributeError, AssertionError, KeyError):
# It might rely on other models being around. Add it to the
# model for the second pass.
failed_fields[fname] = (code, extra_imports)
else:
fields[fname] = field
# Find the app in the Django core, and get its module
more_kwds = {}
try:
app_module = models.get_app(app)
more_kwds['__module__'] = app_module.__name__
except ImproperlyConfigured:
# The app this belonged to has vanished, but thankfully we can still
# make a mock model, so ignore the error.
more_kwds['__module__'] = '_south_mock'
more_kwds['Meta'] = meta
# Make our model
fields.update(more_kwds)
model = type(
str(name),
tuple(map(ask_for_it_by_name, bases)),
fields,
)
# If this is a stub model, change Objects to a whiny class
if stub:
model.objects = WhinyManager()
# Also, make sure they can't instantiate it
model.__init__ = whiny_method
else:
model.objects = NoDryRunManager(model.objects)
if failed_fields:
model._failed_fields = failed_fields
return model
def retry_failed_fields(self):
"Tries to re-evaluate the _failed_fields for each model."
for modelkey, model in self.models.items():
app, modelname = modelkey.split(".", 1)
if hasattr(model, "_failed_fields"):
for fname, (code, extra_imports) in model._failed_fields.items():
try:
field = self.eval_in_context(code, app, extra_imports)
except (NameError, AttributeError, AssertionError, KeyError), e:
# It's failed again. Complain.
raise ValueError("Cannot successfully create field '%s' for model '%s': %s." % (
fname, modelname, e
))
else:
# Startup that field.
model.add_to_class(fname, field)
class WhinyManager(object):
"A fake manager that whines whenever you try to touch it. For stub models."
def __getattr__(self, key):
raise AttributeError("You cannot use items from a stub model.")
class NoDryRunManager(object):
"""
A manager that always proxies through to the real manager,
unless a dry run is in progress.
"""
def __init__(self, real):
self.real = real
def __getattr__(self, name):
if db.dry_run:
raise AttributeError("You are in a dry run, and cannot access the ORM.\nWrap ORM sections in 'if not db.dry_run:', or if the whole migration is only a data migration, set no_dry_run = True on the Migration class.")
return getattr(self.real, name)
def whiny_method(*a, **kw):
raise ValueError("You cannot instantiate a stub model.")
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import datetime
from frappe import _
import frappe
import frappe.database
import frappe.utils
from frappe.utils import cint
import frappe.utils.user
from frappe import conf
from frappe.sessions import Session, clear_sessions, delete_session
from frappe.modules.patch_handler import check_session_stopped
from frappe.translate import get_lang_code
from frappe.utils.password import check_password
from frappe.core.doctype.activity_log.activity_log import add_authentication_log
from frappe.twofactor import (should_run_2fa, authenticate_for_2factor,
confirm_otp_token, get_cached_user_pass)
from six.moves.urllib.parse import quote
class HTTPRequest:
def __init__(self):
# Get Environment variables
self.domain = frappe.request.host
if self.domain and self.domain.startswith('www.'):
self.domain = self.domain[4:]
if frappe.get_request_header('X-Forwarded-For'):
frappe.local.request_ip = (frappe.get_request_header('X-Forwarded-For').split(",")[0]).strip()
elif frappe.get_request_header('REMOTE_ADDR'):
frappe.local.request_ip = frappe.get_request_header('REMOTE_ADDR')
else:
frappe.local.request_ip = '127.0.0.1'
# language
self.set_lang()
# load cookies
frappe.local.cookie_manager = CookieManager()
# set db
self.connect()
# login
frappe.local.login_manager = LoginManager()
if frappe.form_dict._lang:
lang = get_lang_code(frappe.form_dict._lang)
if lang:
frappe.local.lang = lang
self.validate_csrf_token()
# write out latest cookies
frappe.local.cookie_manager.init_cookies()
# check status
check_session_stopped()
def validate_csrf_token(self):
if frappe.local.request and frappe.local.request.method=="POST":
if not frappe.local.session: return
if not frappe.local.session.data.csrf_token \
or frappe.local.session.data.device=="mobile" \
or frappe.conf.get('ignore_csrf', None):
# not via boot
return
csrf_token = frappe.get_request_header("X-Frappe-CSRF-Token")
if not csrf_token and "csrf_token" in frappe.local.form_dict:
csrf_token = frappe.local.form_dict.csrf_token
del frappe.local.form_dict["csrf_token"]
if frappe.local.session.data.csrf_token != csrf_token:
frappe.local.flags.disable_traceback = True
frappe.throw(_("Invalid Request"), frappe.CSRFTokenError)
def set_lang(self):
from frappe.translate import guess_language
frappe.local.lang = guess_language()
def get_db_name(self):
"""get database name from conf"""
return conf.db_name
def connect(self, ac_name = None):
"""connect to db, from ac_name or db_name"""
frappe.local.db = frappe.database.Database(user = self.get_db_name(), \
password = getattr(conf, 'db_password', ''))
class LoginManager:
def __init__(self):
self.user = None
self.info = None
self.full_name = None
self.user_type = None
if frappe.local.form_dict.get('cmd')=='login' or frappe.local.request.path=="/api/method/login":
if self.login()==False: return
self.resume = False
# run login triggers
self.run_trigger('on_session_creation')
else:
try:
self.resume = True
self.make_session(resume=True)
self.get_user_info()
self.set_user_info(resume=True)
except AttributeError:
self.user = "Guest"
self.get_user_info()
self.make_session()
self.set_user_info()
def login(self):
# clear cache
frappe.clear_cache(user = frappe.form_dict.get('usr'))
user, pwd = get_cached_user_pass()
self.authenticate(user=user, pwd=pwd)
if should_run_2fa(self.user):
authenticate_for_2factor(self.user)
if not confirm_otp_token(self):
return False
self.post_login()
def post_login(self):
self.run_trigger('on_login')
self.validate_ip_address()
self.validate_hour()
self.get_user_info()
self.make_session()
self.set_user_info()
def get_user_info(self, resume=False):
self.info = frappe.db.get_value("User", self.user,
["user_type", "first_name", "last_name", "user_image"], as_dict=1)
self.user_type = self.info.user_type
def set_user_info(self, resume=False):
# set sid again
frappe.local.cookie_manager.init_cookies()
self.full_name = " ".join(filter(None, [self.info.first_name,
self.info.last_name]))
if self.info.user_type=="Website User":
frappe.local.cookie_manager.set_cookie("system_user", "no")
if not resume:
frappe.local.response["message"] = "No App"
frappe.local.response["home_page"] = get_website_user_home_page(self.user)
else:
frappe.local.cookie_manager.set_cookie("system_user", "yes")
if not resume:
frappe.local.response['message'] = 'Logged In'
frappe.local.response["home_page"] = "/desk"
if not resume:
frappe.response["full_name"] = self.full_name
# redirect information
redirect_to = frappe.cache().hget('redirect_after_login', self.user)
if redirect_to:
frappe.local.response["redirect_to"] = redirect_to
frappe.cache().hdel('redirect_after_login', self.user)
frappe.local.cookie_manager.set_cookie("full_name", self.full_name)
frappe.local.cookie_manager.set_cookie("user_id", self.user)
frappe.local.cookie_manager.set_cookie("user_image", self.info.user_image or "")
def make_session(self, resume=False):
# start session
frappe.local.session_obj = Session(user=self.user, resume=resume,
full_name=self.full_name, user_type=self.user_type)
# reset user if changed to Guest
self.user = frappe.local.session_obj.user
frappe.local.session = frappe.local.session_obj.data
self.clear_active_sessions()
def clear_active_sessions(self):
"""Clear other sessions of the current user if `deny_multiple_sessions` is not set"""
if not (cint(frappe.conf.get("deny_multiple_sessions")) or cint(frappe.db.get_system_setting('deny_multiple_sessions'))):
return
if frappe.session.user != "Guest":
clear_sessions(frappe.session.user, keep_current=True)
def authenticate(self, user=None, pwd=None):
if not (user and pwd):
user, pwd = frappe.form_dict.get('usr'), frappe.form_dict.get('pwd')
if not (user and pwd):
self.fail(_('Incomplete login details'), user=user)
if cint(frappe.db.get_value("System Settings", "System Settings", "allow_login_using_mobile_number")):
user = frappe.db.get_value("User", filters={"mobile_no": user}, fieldname="name") or user
if cint(frappe.db.get_value("System Settings", "System Settings", "allow_login_using_user_name")):
user = frappe.db.get_value("User", filters={"username": user}, fieldname="name") or user
self.check_if_enabled(user)
self.user = self.check_password(user, pwd)
def check_if_enabled(self, user):
"""raise exception if user not enabled"""
if user=='Administrator': return
if not cint(frappe.db.get_value('User', user, 'enabled')):
self.fail('User disabled or missing', user=user)
def check_password(self, user, pwd):
"""check password"""
try:
# returns user in correct case
return check_password(user, pwd)
except frappe.AuthenticationError:
self.fail('Incorrect password', user=user)
def fail(self, message, user=None):
if not user:
user = _('Unknown User')
frappe.local.response['message'] = message
add_authentication_log(message, user, status="Failed")
frappe.db.commit()
raise frappe.AuthenticationError
def run_trigger(self, event='on_login'):
for method in frappe.get_hooks().get(event, []):
frappe.call(frappe.get_attr(method), login_manager=self)
def validate_ip_address(self):
"""check if IP Address is valid"""
ip_list = frappe.db.get_value('User', self.user, 'restrict_ip', ignore=True)
if not ip_list:
return
ip_list = ip_list.replace(",", "\n").split('\n')
ip_list = [i.strip() for i in ip_list]
for ip in ip_list:
if frappe.local.request_ip.startswith(ip):
return
frappe.throw(_("Not allowed from this IP Address"), frappe.AuthenticationError)
def validate_hour(self):
"""check if user is logging in during restricted hours"""
login_before = int(frappe.db.get_value('User', self.user, 'login_before', ignore=True) or 0)
login_after = int(frappe.db.get_value('User', self.user, 'login_after', ignore=True) or 0)
if not (login_before or login_after):
return
from frappe.utils import now_datetime
current_hour = int(now_datetime().strftime('%H'))
if login_before and current_hour > login_before:
frappe.throw(_("Login not allowed at this time"), frappe.AuthenticationError)
if login_after and current_hour < login_after:
frappe.throw(_("Login not allowed at this time"), frappe.AuthenticationError)
def login_as_guest(self):
"""login as guest"""
self.login_as("Guest")
def login_as(self, user):
self.user = user
self.post_login()
def logout(self, arg='', user=None):
if not user: user = frappe.session.user
self.run_trigger('on_logout')
if user == frappe.session.user:
delete_session(frappe.session.sid, user=user, reason="User Manually Logged Out")
self.clear_cookies()
else:
clear_sessions(user)
def clear_cookies(self):
clear_cookies()
class CookieManager:
def __init__(self):
self.cookies = {}
self.to_delete = []
def init_cookies(self):
if not frappe.local.session.get('sid'): return
# sid expires in 3 days
expires = datetime.datetime.now() + datetime.timedelta(days=3)
if frappe.session.sid:
self.cookies["sid"] = {"value": frappe.session.sid, "expires": expires}
if frappe.session.session_country:
self.cookies["country"] = {"value": frappe.session.get("session_country")}
def set_cookie(self, key, value, expires=None):
self.cookies[key] = {"value": value, "expires": expires}
def delete_cookie(self, to_delete):
if not isinstance(to_delete, (list, tuple)):
to_delete = [to_delete]
self.to_delete.extend(to_delete)
def flush_cookies(self, response):
for key, opts in self.cookies.items():
response.set_cookie(key, quote((opts.get("value") or "").encode('utf-8')),
expires=opts.get("expires"))
# expires yesterday!
expires = datetime.datetime.now() + datetime.timedelta(days=-1)
for key in set(self.to_delete):
response.set_cookie(key, "", expires=expires)
@frappe.whitelist()
def get_logged_user():
return frappe.session.user
def clear_cookies():
if hasattr(frappe.local, "session"):
frappe.session.sid = ""
frappe.local.cookie_manager.delete_cookie(["full_name", "user_id", "sid", "user_image", "system_user"])
def get_website_user_home_page(user):
home_page_method = frappe.get_hooks('get_website_user_home_page')
if home_page_method:
home_page = frappe.get_attr(home_page_method[-1])(user)
return '/' + home_page.strip('/')
else:
return '/me'
|
|
from comperio.main.tools import render
from comperio.main.tools import MessageManager
from comperio.settings import SITE_URL
from comperio.accounts.models import cUser, cGroup
from django.contrib.auth import login, authenticate, logout
from django.contrib.auth.models import User, ContentType
from comperio.accounts.forms import RegistrationForm, LoginForm, SettingsForm, CreateGroupForm, EditAccountForm
from django.core.mail import send_mail
from django.shortcuts import get_object_or_404, redirect, Http404, HttpResponseRedirect
import datetime, sha, random
from comperio.main.backends import ModelBackend
from comperio.main.models import Link, Discussion, CodePackage
from comperio.curricula.models import Curriculum
from comperio.accounts.tools import is_manager
from comperio.tasks.models import Task
from django.core.urlresolvers import reverse
from django.views.decorators.cache import cache_control
@cache_control(private=True)
def login_view(request):
"""Login to a user account and redirect to profile"""
# TODO: put link on profile page to return to original page
# prepare messages
mm = MessageManager(request)
if request.user.is_authenticated():
return redirect(request.user.get_absolute_url())
if request.POST:
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
# cookies are enabled
form = LoginForm(request.POST)
if not form == None and form.is_valid():
# log the user in. we extended to backend to allow using email
# instead of just username
e = form.cleaned_data['username']
p = form.cleaned_data['password']
try:
user = authenticate(username=e, password=p)
except NameError:
user = None
if user is not None:
if user.is_active:
#request.user = user
login(request, user)
mm.set_success('you are logged in!')
return redirect(request.user.get_absolute_url())
else:
# account is disabled
mm.set_error('This account has been disabled, or has not been activated.')
else:
# invalid login
mm.set_error('Invalid credentials.')
else:
#mm.set_error('The form is invalid')
pass
else:
# cookies are not enabled
mm.set_error('Please enable cookies and try again.')
else:
form = LoginForm()
request.session.set_test_cookie()
return render(request, 'accounts/login.html', {'form':form}, mm.messages())
@cache_control(private=True)
def logout_view(request):
"""log out the user and redirect to front page"""
logout(request)
mm = MessageManager(request)
mm.set_success("You have logged out")
return redirect('/')
@cache_control(private=True)
def register(request):
"""register a new user"""
# prepare messages
mm = MessageManager(request)
if request.user.is_authenticated():
mm.set_notice('You already have an account')
return render(request, 'accounts/register.html', mm.messages())
if request.POST:
form = RegistrationForm(request.POST)
new_data = request.POST.copy()
# Validate passwords
# TODO: put validation in form.
if not form.isValidHuman(new_data):
mm.set_error('Sorry only humans can register. Try reloading the page')
return render(request, 'accounts/register.html', mm.messages(), {'form':form})
if not form.isValidUsername(new_data):
mm.set_error('That username is already taken')
return render(request, 'accounts/register.html', mm.messages(), {'form':form})
if not form.isValidEmail(new_data):
mm.set_error('That email is already in use')
return render(request, 'accounts/register.html', mm.messages(), {'form':form})
if not form.PasswordsMatch(new_data):
mm.set_error('Passwords do not match')
return render(request, 'accounts/register.html', mm.messages(), {'form':form})
if not form.isValidPassword(new_data):
mm.set_error('Passwords must be at least 6 characters long')
return render(request, 'accounts/register.html', mm.messages(), {'form':form})
# validate form data
if form.is_valid():
# Save the user
new_user = form.save(new_data)
if not new_user is None:
# TODO: email is not sent for certain emails ([email protected] fails)
email_subject = 'Your new Comperio account confirmation'
email_body =\
"You recently signed up for a new Comperio account.\
\n\nTo activate your account, click this link within 48 hours:\
\n%s/users/confirm/%s\
\n\n After your account is activated you can go to your account by clicking the link below\
\n%s/users/%s\
\n\nAccount Details\
\nusername: %s\
\nemail: %s\
\npassword: %s" % (
SITE_URL,
new_user.activation_key,
SITE_URL,
new_user.username,
new_user.username,
form.cleaned_data['email'],
form.cleaned_data['password1'])
send_mail(email_subject,
email_body,
'[email protected]',
[new_user.email])
mm.set_success("You're in! We just emailed you instructions to activate your account")
return redirect('/')
else:
mm.set_error("Could not create user")
else:
mm.set_error("Please fill all required fields.")
return render(request, 'accounts/register.html', mm.messages(), {'form':form})
form = RegistrationForm()
return render(request, 'accounts/register.html', mm.messages(), {'form':form})
@cache_control(private=True)
def delete(request, username):
"""delete a user account"""
# prepare messages
mm = MessageManager(request)
if request.user.is_active and request.user.username == username:
if request.user.is_superuser:
mm.set_error("um... you can't delete a super user silly.")
return redirect(request.META.get('HTTP_REFERER','/'))
# Build the removal key for account
salt = sha.new(str(random.random())).hexdigest()[:5]
removal_key = sha.new(salt+username).hexdigest()
key_expires = datetime.datetime.today() + datetime.timedelta(2)
request.user.removal_key = removal_key
request.user.key_expires = key_expires
request.user.save()
# prepare email
email_subject = 'Confirm account deletion'
email_body =\
"We just received a request to delete your Comperio account.\
\n\nTo delete your account, click this link within 48 hours:\
\n%s/users/delete/confirm/%s" % (
SITE_URL,
request.user.removal_key)
send_mail(email_subject,
email_body,
'[email protected]',
[request.user.email])
mm.set_success("We just sent you an email to verify your account removal.")
return redirect(request.META.get('HTTP_REFERER','/'))
@cache_control(private=True)
def confirm_delete(request, removal_key):
"""confirm a account removal"""
# prepare messages
mm = MessageManager(request)
user = get_object_or_404(cUser, removal_key=removal_key)
if user.key_expires < datetime.datetime.today():
mm.set_notice("This removal code as expired.")
return render(request, 'accounts/confirm_delete.html', mm.messages(), {'expired':True})
user.key_expires = datetime.datetime.today() - datetime.timedelta(days=1)
user.is_active = False
user.save()
logout(request)
mm.set_success("Your account has been successfully removed. Please visit us again!")
return render(request, 'main/index.html', mm.messages())
@cache_control(private=True)
def confirm(request, activation_key):
"""confirm a user registration"""
# prepare messages
mm = MessageManager(request)
if request.user.is_active and request.user.activation_key == activation_key:
raise Http404()
user = get_object_or_404(cUser, activation_key=activation_key)
if user.key_expires < datetime.datetime.today():
mm.set_notice("This activation code as expired. Try creating a new account")
# TODO: completely remove accounts that have expired
return render(request, 'accounts/confirm.html', mm.messages(), {'expired':True})
user.key_expires = datetime.datetime.today() - datetime.timedelta(days=1)
mm.set_success("Congratulations! You just activated your account!")
request.session.set_test_cookie()
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
# cookies are enabled
# we need to call authenticate to set some varibles before calling login()
auth_user = None
try:
auth_user = authenticate(username=user.username, password=user.activation_key)
# activate the user after authentication so we can't
# login with the activation key anymore
#auth_user.is_active = True
#auth_user.save()
except NameError:
pass
if auth_user is not None:
if auth_user.is_active:
login(request, auth_user)
# TODO: we don't cover all our bases here. we need to be more thorough with error checking
if auth_user:
return redirect(auth_user.get_absolute_url())
else:
return redirect('/')
@cache_control(private=True)
def users(request, username):
"""display the user account page"""
# prepare messages
mm = MessageManager(request)
try:
u = cUser.objects.get(username=username)
fav = u.favorites
except cUser.DoesNotExist:
u = get_object_or_404(User, username=username)
fav = None
dict_content = {}
dict_content['links'] = Link.objects.filter(user=u)
dict_content['discussions'] = Discussion.objects.filter(user=u)
dict_content['code_packages'] = CodePackage.objects.filter(user=u)
dict_content['groups'] = u.groups.all()
dict_content['curriculums'] = Curriculum.objects.filter(user=u)
manager = is_manager(request.user, u)
tasks = None
if request.user.username == u.username:
tasks = Task.objects.filter(author=u)
return render(request, 'accounts/user/profile.html', {'tasks':tasks, 'profile_user':u, 'profile_favorites':fav, 'profile_content':dict_content, 'is_user_manager':manager}, mm.messages())
@cache_control(private=True)
def edit_account(request, username):
"""edit a user's account details"""
# prepare messages
mm = MessageManager(request)
try:
u = cUser.objects.get(username=username)
except cUser.DoesNotExist:
u = get_object_or_404(User, username=username)
# must be an authenticated user and own the profile in order to get the form.
if request.user.is_authenticated() and request.user == u:
if request.POST:
# collect form
form = EditAccountForm(request.POST)
if form.is_valid():
form.update(request, u)
mm.set_success("Your account has been updated")
return redirect(request.user.get_absolute_url())
else:
return render(request, 'accounts/user/edit.html', {'form':form}, mm.messages())
else:
# prepare form
form = EditAccountForm(initial={'username':u.username, 'email':u.email})
return render(request, 'accounts/user/edit.html', {'form':form}, mm.messages())
else:
return redirect(u.get_absolute_url())
@cache_control(private=True)
def user_metrics(request, username):
"""display user metrics page"""
# TODO: only group administrators can view user metrics
mm = MessageManager(request)
try:
u = cUser.objects.get(username=username)
except cUser.DoesNotExist:
u = get_object_or_404(User, username=username)
has_permission_to_see_metrics = False
if request.user.is_authenticated() and request.user.username == username:
has_permission_to_see_metrics = True
elif is_manager(request.user, u):
has_permission_to_see_metrics = True
if has_permission_to_see_metrics:
return render(request, 'accounts/user/metrics.html', {'profile_user':u}, mm.messages())
else:
#raise Http404
mm.set_notice("you do not have permission to view that page")
return redirect(request.META.get('HTTP_REFERER','/'))
@cache_control(private=True)
def user_account(request, username):
"""display user account details page"""
mm = MessageManager(request)
# must be an authenticated user and own the profile in order to get the form.
if request.user.is_authenticated() and request.user.username == username:
# must also be of type cUser. since the super is default to a User.
if request.user.__class__ == cUser:
if request.POST:
# collect form
form = SettingsForm(request.POST)
if form.is_valid():
s = request.user.settings
s.public_comments = form.cleaned_data['public_comments']
s.code_pkg_updates = form.cleaned_data['code_pkg_updates']
s.save()
mm.set_success("Your settings have been saved")
return render(request, 'accounts/user/account.html', {'form':form}, mm.messages())
else:
mm.set_error("Sorry! We encountered an error and were unable to save your settings")
form = SettingsForm(instance=request.user.settings)
return render(request, 'accounts/user/account.html', {'form':form}, mm.messages())
else:
# prepare form
form = SettingsForm(instance=request.user.settings)
return render(request, 'accounts/user/account.html', {'form':form}, mm.messages())
return render(request, 'accounts/user/account.html', mm.messages())
@cache_control(private=True)
def groups(request):
"""list all the public groups"""
mm = MessageManager(request)
groups = cGroup.objects.all()
return render(request, 'accounts/groups/index.html', mm.messages(), {'groups':groups})
@cache_control(private=True)
def group_page(request, gid):
"""display the group page"""
mm = MessageManager(request)
group = get_object_or_404(cGroup, pk=gid)
manager = False
member = False
if request.user.is_authenticated() and request.user.__class__ == cUser:
manager = request.user.is_group_manager(group)
member = request.user.is_group_member(group)
pending = request.user in group.pending_users.all()
return render(request, 'accounts/groups/group_page.html', mm.messages(), {'group':group,'is_group_manager':manager, 'is_group_member':member, 'is_pending_member':pending})
@cache_control(private=True)
def confirm_leave_group(request, gid):
"""leave a group"""
mm = MessageManager(request)
group = get_object_or_404(cGroup, pk=gid)
return render(request, 'accounts/groups/leave.html', {'group':group}, mm.messages())
@cache_control(private=True)
def leave_group(request, gid):
"""leave a group"""
mm = MessageManager(request)
group = get_object_or_404(cGroup, pk=gid)
if request.user in group.managers.all():
if group.managers.count() > 1:
group.managers.remove(request.user)
group.save()
else:
mm.set_notice("you cannot leave this group because you are the only manager")
return redirect(group.get_absolute_url())
elif request.user in group.members.all():
group.members.remove(request.user)
group.save()
else:
mm.set_notice("you cannot leave a group unless you are a member")
return redirect(group.get_absolute_url())
request.user.groups.remove(group)
request.user.save()
mm.set_success("you are no longer a member of this group.")
return redirect(group.get_absolute_url())
@cache_control(private=True)
def create_group(request):
"""create a new user group"""
mm = MessageManager(request)
if request.user.is_authenticated() and request.user.__class__ is cUser:
if request.POST:
form = CreateGroupForm(request.POST)
if form.is_valid():
data = request.POST.copy()
# make sure the group name is unique
try:
cGroup.objects.get(name=data["title"])
mm.set_error("that group name is already taken")
return render(request,'accounts/groups/create.html', mm.messages(), {'form':form})
except cGroup.DoesNotExist:
# create group
g = cGroup()
g.name = data["title"]
g.description = data["description"]
g.type = data["type"]
g.visibility = data["visibility"]
g.open_registration = data.has_key("open_registration")
g.save()
# add user to group
request.user.groups.add(g)
g.managers.add(request.user)
# Build the invitation key
salt = sha.new(str(random.random())).hexdigest()[:5]
g.invitation_key = sha.new(salt+g.name).hexdigest()[:13]
g.key_expires = datetime.datetime.today() + datetime.timedelta(2)
g.save()
manage_url = reverse('manage-group', None, (), {'gid':g.pk})
invite_url = "%s%s/%s" % (SITE_URL, reverse('join-group', None, (), {'gid':g.pk}), g.invitation_key)
mm.set_success("Successfully Created Group \"%s\"!<p> We automatically generated an invitation url that you can share with your friends. For more information check out the <a href=\"%s\">Administration Page</a>.</p><p>Invitation Url: <a href=\"%s\">%s</a></p>" % (g.name, manage_url, invite_url, invite_url))
# TODO: take to new group page
return redirect(g.get_absolute_url())
else:
return render(request,'accounts/groups/create.html', mm.messages(), {'form':form})
else:
# prepare new form for user
form = CreateGroupForm(initial={'open_registration':True})
return render(request,'accounts/groups/create.html', mm.messages(), {'form':form})
else:
mm.set_error("you are not allowed to create a group")
return redirect(request.META.get('HTTP_REFERER','/'))
@cache_control(private=True)
def join_group(request, gid, invitation_key = None):
"""request to join a group"""
mm = MessageManager(request)
g = get_object_or_404(cGroup, pk=gid)
if not g.open_registration and invitation_key is None:
return redirect(g.get_absolute_url())
if request.user.is_authenticated() and request.user.__class__ == cUser:
if request.user.is_group_member(g):
mm.set_notice("you are already a member of this group.")
return redirect(g.get_absolute_url())
if request.user.is_group_manager(g):
mm.set_notice("you are a manager of this group and cannot become a member.")
return redirect(g.get_absolute_url())
if invitation_key != None:
if invitation_key == g.invitation_key:
if g.key_expires < datetime.datetime.today():
mm.set_notice("This invitation code as expired.")
else:
mm.set_success("you are now a member of this group!")
request.user.groups.add(g)
request.user.save()
if request.user in g.pending_users.all():
g.pending_users.remove(request.user)
g.members.add(request.user)
g.save()
else:
mm.set_notice("that invitation key is invalid or has expired")
else:
g.pending_users.add(request.user)
g.save()
mm.set_success("your membership request has been sent to the group administrators")
else:
mm.set_notice("you must login or create an acount before you can join a group.")
return HttpResponseRedirect(reverse('login-page'))
return redirect(g.get_absolute_url())
@cache_control(private=True)
def admit_to_group(request, gid, uid):
"""admit a user into a group"""
mm = MessageManager(request)
g = get_object_or_404(cGroup, pk=gid)
u = get_object_or_404(cUser, pk=uid)
if request.user.is_authenticated() and request.user.is_group_manager(g):
if u in g.pending_users.all():
mm.set_success("%s has been admitted" % u.username)
g.pending_users.remove(u)
u.groups.add(g)
g.members.add(u)
u.save()
g.save()
else:
mm.set_error("you can't just add whomever you want to your group!")
return redirect(reverse('manage-group', None, (), {'gid':g.pk}))
return redirect(g.get_absolute_url())
@cache_control(private=True)
def reject_from_group(request, gid, uid):
"""reject a pending membership request"""
mm = MessageManager(request)
g = get_object_or_404(cGroup, pk=gid)
u = get_object_or_404(cUser, pk=uid)
if request.user.is_authenticated() and request.user.is_group_manager(g):
if u in g.pending_users.all():
mm.set_success("membership refused for %s" % u.username)
g.pending_users.remove(u)
g.save()
else:
mm.set_error("you can't just reject whomever you want!")
return redirect(reverse('manage-group', None, (), {'gid':g.pk}))
return redirect(g.get_absolute_url())
@cache_control(private=True)
def kick_from_group(request, gid, uid):
"""kick a user out of a group"""
mm = MessageManager(request)
g = get_object_or_404(cGroup, pk=gid)
u = get_object_or_404(cUser, pk=uid)
if request.user.is_authenticated() and request.user.is_group_manager(g):
# can only kick normal users, not managers
if u in g.users():
u.groups.remove(g)
g.members.remove(u)
u.save()
g.save()
mm.set_success("%s has been kicked" % u.username)
else:
mm.set_error("you can only kick existing members")
return redirect(reverse('manage-group', None, (), {'gid':g.pk}))
return redirect(g.get_absolute_url())
@cache_control(private=True)
def edit_group(request, gid):
"""edit a group"""
mm = MessageManager(request)
g = get_object_or_404(cGroup, pk=gid)
if request.user.is_authenticated() and request.user.is_group_manager(g):
if request.POST:
form = CreateGroupForm(request.POST)
if form.is_valid():
data = request.POST.copy()
# make sure the group name is unique
#try:
# cGroup.objects.get(name=data["title"])
# mm.set_error("that group name is already taken")
# return render(request,'accounts/groups/create.html', mm.messages(), {'form':form})
#except cGroup.DoesNotExist:
# create group
#g = cGroup()
g.name = data["title"]
g.description = data["description"]
g.type = data["type"]
g.visibility = data["visibility"]
g.open_registration = data.has_key("open_registration")
g.save()
# add user to group
#request.user.groups.add(g)
#g.managers.add(request.user)
#g.save()
mm.set_success("edits were successfull")
# TODO: take to new group page
return redirect(g.get_absolute_url())
else:
return render(request,'accounts/groups/edit.html', mm.messages(), {'form':form, 'group':g})
else:
# prepare new form for user
form = CreateGroupForm(initial={'title': g.name, 'description':g.description, 'type':g.type, 'open_registration':g.open_registration})
return render(request,'accounts/groups/edit.html', mm.messages(), {'form':form, 'group':g})
else:
mm.set_error("you are not allowed to edit this group")
return redirect(g.get_absolute-url())
@cache_control(private=True)
def delete_group(request, gid):
"""delete a group"""
mm = MessageManager(request)
g = get_object_or_404(cGroup, pk=gid)
if request.user.is_authenticated() and request.user.is_group_manager(g):
# TODO: do we need to remove group reference from users?
g.delete()
return redirect(request.user.get_absolute_url() + "#groups")
else:
mm.set_error("you are not allowed to delete this group")
return redirect(request.META.get('HTTP_REFERER','/'))
@cache_control(private=True)
def manage_group(request, gid):
"""manage a group"""
mm = MessageManager(request)
g = get_object_or_404(cGroup, pk=gid)
if request.user.is_authenticated() and request.user.is_group_manager(g):
return render(request, 'accounts/groups/manage.html', mm.messages(), {'group':g})
else:
mm.set_error("you are not allowed to manage this group")
return redirect(g.get_absolute_url())
@cache_control(private=True)
def group_invitation(request, gid):
"""generate an invitation url for a group"""
mm = MessageManager(request)
g = get_object_or_404(cGroup, pk=gid)
if request.user.is_authenticated() and request.user.is_group_manager(g):
# Build the invitation key
salt = sha.new(str(random.random())).hexdigest()[:5]
g.invitation_key = sha.new(salt+g.name).hexdigest()[:13]
g.key_expires = datetime.datetime.today() + datetime.timedelta(2)
g.save()
return redirect(g.get_absolute_url() + "/manage")
else:
mm.set_error("you are not authorized to perform this action")
return redirect(g.get_absolute_url())
@cache_control(private=True)
def delete_group_invitation(request, gid):
"""delete the current group invitation key so that it cannot be used"""
mm = MessageManager(request)
g = get_object_or_404(cGroup, pk=gid)
if request.user.is_authenticated() and request.user.is_group_manager(g):
g.invitation_key = None
g.key_expires = None
g.save()
return redirect(g.get_absolute_url() + "/manage")
else:
mm.set_error("you are not authorized to perform this action")
return redirect(g.get_absolute_url())
# TODO: move these generic functions into tools
def add_favorite(request, model, id):
"""add some content to a users favorites"""
try:
obj = model.objects.get(pk=id)
except model.DoesNotExist:
return False
f = request.user.favorites
print obj
if f.add_by_model(model, obj):
f.save()
return True
else:
return False
def remove_favorite(request, model, id):
"""remove some content from a users favorites"""
try:
obj = model.objects.get(pk=id)
except model.DoesNotExist:
return False
f = request.user.favorites
return f.remove_by_model(model, obj)
def favor_discussion(request, id):
"""add a discussion to a users favorites"""
if request.user.is_authenticated() and request.user.__class__ == cUser:
add_favorite(request, Discussion, id)
return redirect(request.META.get('HTTP_REFERER', '/'))
def unfavor_discussion(request, id):
"""remove a discussion from a users favorites"""
if request.user.is_authenticated() and request.user.__class__ == cUser:
remove_favorite(request, Discussion, id)
return redirect(request.META.get('HTTP_REFERER', '/'))
def favor_code_package(request, id):
"""add a code package to a users favorites"""
if request.user.is_authenticated() and request.user.__class__ == cUser:
add_favorite(request, CodePackage, id)
return redirect(request.META.get('HTTP_REFERER', '/'))
def unfavor_code_package(request, id):
"""remove a code package from a users favorites"""
if request.user.is_authenticated() and request.user.__class__ == cUser:
remove_favorite(request, CodePackage, id)
return redirect(request.META.get('HTTP_REFERER', '/'))
def favor_group(request, id):
"""add a group to a users favorites"""
if request.user.is_authenticated() and request.user.__class__ == cUser:
add_favorite(request, cGroup, id)
return redirect(request.META.get('HTTP_REFERER', '/'))
def unfavor_group(request, id):
"""remove a group from a users favorites"""
if request.user.is_authenticated() and request.user.__class__ == cUser:
remove_favorite(request, cGroup, id)
return redirect(request.META.get('HTTP_REFERER', '/'))
@cache_control(private=True)
def add_favorite_user(request, username):
"""add a user to favorites"""
if request.user.is_authenticated() and request.user.__class__ == cUser:
f = request.user.favorites
user = get_object_or_404(cUser, username=username)
f.users.add(user)
f.save()
return redirect(request.META.get('HTTP_REFERER','/'))
@cache_control(private=True)
def remove_favorite_user(request, username):
"""remove a user from favorites"""
if request.user.is_authenticated() and request.user.__class__ == cUser:
f = request.user.favorites
user = get_object_or_404(cUser, username=username)
f.users.remove(user)
f.save()
return redirect(request.META.get('HTTP_REFERER','/'))
|
|
# Copyright 2014, Ansible, Inc.
# Luke Sneeringer <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import copy
import os
import warnings
import six
from six.moves import configparser
from six import StringIO
from sdict import adict
class Parser(configparser.ConfigParser):
"""ConfigParser subclass that doesn't strictly require section
headers.
"""
def _read(self, fp, fpname):
"""Read the configuration from the given file.
If the file lacks any section header, add a [general] section
header that encompasses the whole thing.
"""
# Attempt to read the file using the superclass implementation.
#
# If it doesn't work because there's no section header, then
# create a section header and call the superclass implementation
# again.
try:
return configparser.ConfigParser._read(self, fp, fpname)
except configparser.MissingSectionHeaderError:
fp.seek(0)
string = '[general]\n%s' % fp.read()
flo = StringIO(string) # flo == file-like object
return configparser.ConfigParser._read(self, flo, fpname)
class Settings(object):
"""An object that understands permanent configuration provided to
tower-cli through configuration files or command-line arguments.
The order of precedence for settings, from least to greatest, is:
- defaults provided in this method
- `/etc/awx/tower_cli.cfg`
- `~/.tower_cli.cfg`
- command line arguments
"""
_parser_names = ['runtime', 'local', 'user', 'global', 'defaults']
def __init__(self):
"""Create the settings object, and read from appropriate files as
well as from `sys.argv`.
"""
self._cache = {}
# Initialize the data dictionary for the default level
# precedence (that is, the bottom of the totem pole).
defaults = {
'color': 'true',
'format': 'human',
'host': '127.0.0.1',
'password': '',
'username': '',
'verbose': 'false',
}
self._defaults = Parser(defaults=defaults)
self._defaults.add_section('general')
# If there is a global settings file, initialize it.
self._global = Parser()
self._global.add_section('general')
if os.path.isdir('/etc/awx/'):
# Sanity check: Try to actually get a list of files in `/etc/awx/`.
#
# The default Tower installation caused `/etc/awx/` to have
# extremely restrictive permissions, since it has its own user
# and group and has a chmod of 0750.
#
# This makes it very easy for a user to fall into the mistake
# of writing a config file under sudo which they then cannot read,
# which could lead to difficult-to-troubleshoot situations.
#
# Therefore, check for that particular problem and give a warning
# if we're in that situation.
try:
global_settings = 'tower_cli.cfg' in os.listdir('/etc/awx/')
except OSError:
warnings.warn('/etc/awx/ is present, but not readable with '
'current permissions. Any settings defined in '
'/etc/awx/tower_cli.cfg will not be honored.',
RuntimeWarning)
# If there is a global settings file for Tower CLI, read in its
# contents.
self._global.read('/etc/awx/tower_cli.cfg')
# Initialize a parser for the user settings file.
self._user = Parser()
self._user.add_section('general')
# If there is a user settings file, read it into the parser object.
user_filename = os.path.expanduser('~/.tower_cli.cfg')
self._user.read(user_filename)
# Initialize a parser for the local settings file.
self._local = Parser()
self._local.add_section('general')
# If there is a local settings file in the current working directory
# or any parent, read it into the parser object.
#
# As a first step, we need to get each of the parents.
cwd = os.getcwd()
local_dirs = []
for i in range(0, len(cwd.split('/'))):
local_dir = '/'.join(cwd.split('/')[0:i + 1])
if len(local_dir) == 0:
local_dir = '/'
# Sanity check: if this directory corresponds to our global or
# user directory, skip it.
if local_dir in (os.path.expanduser('~'), '/etc/awx'):
continue
# Add this directory to the list.
local_dirs.append(local_dir)
# Iterate over each potential local config file and attempt to read
# it (most won't exist, which is fine).
for local_dir in local_dirs:
local_filename = '%s/.tower_cli.cfg' % local_dir
self._local.read(local_filename)
# Put a stubbed runtime parser in.
self._runtime = Parser()
self._runtime.add_section('general')
def __getattr__(self, key):
"""Return the approprate value, intelligently type-casted in the
case of numbers or booleans.
"""
# Sanity check: Have I cached this value? If so, return that.
if key in self._cache:
return self._cache[key]
# Run through each of the parsers and check for a value. Whenever
# we actually find a value, try to determine the correct type for it
# and cache and return a value of that type.
for parser in self._parsers:
# Get the value from this parser; if it's None, then this
# key isn't present and we move on to the next one.
try:
value = parser.get('general', key)
except configparser.NoOptionError:
continue
# We have a value; it may or may not be a string, though, so
# try to return it as an int, float, or boolean (in that order)
# before falling back to the string value.
type_method = ('getint', 'getfloat', 'getboolean')
for tm in type_method:
try:
value = getattr(parser, tm)('general', key)
break
except ValueError:
pass
# Write the value to the cache, so we don't have to do this lookup
# logic on subsequent requests.
self._cache[key] = value
return self._cache[key]
# If we got here, that means that the attribute wasn't found, and
# also that there is no default; raise an exception.
raise AttributeError('No setting exists: %s.' % key.lower())
@property
def _parsers(self):
"""Return a tuple of all parsers, in order.
This is referenced at runtime, to avoid gleefully ignoring the
`runtime_values` context manager.
"""
return tuple([getattr(self, '_%s' % i) for i in self._parser_names])
@contextlib.contextmanager
def runtime_values(self, **kwargs):
"""Temporarily override the runtime settings, which exist at the
highest precedence level.
"""
# Coerce all values to strings (to be coerced back by configparser
# later) and defenestrate any None values.
for k, v in copy.copy(kwargs).items():
# If the value is None, just get rid of it.
if v is None:
kwargs.pop(k)
continue
# Remove these keys from the cache, if they are present.
self._cache.pop(k, None)
# Coerce values to strings.
kwargs[k] = six.text_type(v)
# Replace the `self._runtime` INI parser with a new one, using
# the context manager's kwargs as the "defaults" (there can never
# be anything other than defaults, but that isn't a problem for our
# purposes because we're using our own precedence system).
#
# Ensure that everything is put back to rights at the end of the
# context manager call.
old_runtime_parser = self._runtime
try:
self._runtime = Parser(defaults=kwargs)
self._runtime.add_section('general')
yield self
finally:
# Revert the runtime configparser object.
self._runtime = old_runtime_parser
# Remove the keys from the cache again, since the settings
# have been reverted.
for key in kwargs:
self._cache.pop(k, None)
# The primary way to interact with settings is to simply hit the
# already constructed settings object.
settings = Settings()
|
|
import pygame
import os
from Game.Framework.Scene import *
from Game.Framework.Vector import *
class Cereal:
box_32 = pygame.image.load(os.path.join('Assets', 'cereal32.png'))
def __init__(self, position, id):
self.__size = 32
self.image = Cereal.box_32
self.rect = pygame.Rect((position.x, position.y), (self.__size, self.__size))
self.velocity = Vector(0, 0)
self.__id = id
@property
def id(self):
return self.__id
class Player:
"""
Holds the state of the player object
"""
right_facing_32 = pygame.image.load(os.path.join('Assets', 'soul_right.png'))
right_facing_40 = pygame.image.load(os.path.join('Assets', 'soul_right40.png'))
right_facing_48 = pygame.image.load(os.path.join('Assets', 'soul_right48.png'))
right_facing_56 = pygame.image.load(os.path.join('Assets', 'soul_right56.png'))
right_facing_64 = pygame.image.load(os.path.join('Assets', 'soul_right64.png'))
right_facing_list = [right_facing_32, right_facing_40, right_facing_48, right_facing_56, right_facing_64]
left_facing_32 = pygame.image.load(os.path.join('Assets', 'soul_left.png'))
left_facing_40 = pygame.image.load(os.path.join('Assets', 'soul_left40.png'))
left_facing_48 = pygame.image.load(os.path.join('Assets', 'soul_left48.png'))
left_facing_56 = pygame.image.load(os.path.join('Assets', 'soul_left56.png'))
left_facing_64 = pygame.image.load(os.path.join('Assets', 'soul_left64.png'))
left_facing_list = [left_facing_32, left_facing_40, left_facing_48, left_facing_56, left_facing_64]
right_facing_scrunched = pygame.image.load(os.path.join('Assets', 'soul_scrunch_right.png'))
right_facing_scrunched_40 = pygame.image.load(os.path.join('Assets', 'soul_scrunch_right40.png'))
right_facing_scrunched_48 = pygame.image.load(os.path.join('Assets', 'soul_scrunch_right48.png'))
right_facing_scrunched_56 = pygame.image.load(os.path.join('Assets', 'soul_scrunch_right56.png'))
right_facing_scrunched_64 = pygame.image.load(os.path.join('Assets', 'soul_scrunch_right64.png'))
right_facing_crouch_list = [right_facing_scrunched, right_facing_scrunched_40, right_facing_scrunched_48,
right_facing_scrunched_56, right_facing_scrunched_64]
left_facing_scrunched = pygame.image.load(os.path.join('Assets', 'soul_scrunch_left.png'))
left_facing_scrunched_40 = pygame.image.load(os.path.join('Assets', 'soul_scrunch_left40.png'))
left_facing_scrunched_48 = pygame.image.load(os.path.join('Assets', 'soul_scrunch_left48.png'))
left_facing_scrunched_56 = pygame.image.load(os.path.join('Assets', 'soul_scrunch_left56.png'))
left_facing_scrunched_64 = pygame.image.load(os.path.join('Assets', 'soul_scrunch_left64.png'))
left_facing_crouch_list = [left_facing_scrunched, left_facing_scrunched_40, left_facing_scrunched_48,
left_facing_scrunched_56, left_facing_scrunched_64]
def __init__(self, position, velocity):
self.__velocity = velocity
self.__size_index = 0
self.image = Player.right_facing_list[self.__size_index]
self.rect = self.image.get_rect()
self.rect.x = position.x
self.rect.y = position.y
self.on_ground = False
self.__moving_left = False
self.__moving_right = False
self.__facing_right = True
self.is_scrunched = False
@property
def size(self):
return self.__size_index
def grow(self):
if self.__size_index < 4:
self.__size_index += 1
@property
def velocity(self):
return self.__velocity
@velocity.setter
def velocity(self, velocity):
self.__velocity = velocity
@property
def moving_left(self):
return self.__moving_left
@moving_left.setter
def moving_left(self, start_moving_left):
if start_moving_left and not self.__moving_right:
self.__facing_right = False
self.__moving_left = start_moving_left
@property
def moving_right(self):
return self.__moving_right
@moving_right.setter
def moving_right(self, start_moving_right):
if start_moving_right and not self.__moving_right:
self.__facing_right = True
self.__moving_right = start_moving_right
@property
def growth_difference(self):
return 8
@property
def scrunch_difference(self):
return (Player.left_facing_list[self.__size_index].get_rect().height -
Player.left_facing_crouch_list[self.__size_index].get_rect().height)
def unscrunch(self):
if self.__facing_right:
self.image = Player.right_facing_list[self.__size_index]
else:
self.image = Player.left_facing_list[self.__size_index]
current_position = Vector(self.rect.x, self.rect.y)
self.rect = self.image.get_rect()
self.rect.x = current_position.x
self.rect.y = current_position.y - self.scrunch_difference
def scrunch(self):
if self.__facing_right:
self.image = Player.right_facing_crouch_list[self.__size_index]
else:
self.image = Player.left_facing_crouch_list[self.__size_index]
current_position = Vector(self.rect.x, self.rect.y)
self.rect = self.image.get_rect()
self.rect.x = current_position.x
self.rect.y = current_position.y + self.scrunch_difference
def update(self):
if self.__facing_right:
if self.is_scrunched:
self.image = Player.right_facing_crouch_list[self.__size_index]
else:
self.image = Player.right_facing_list[self.__size_index]
else:
if self.is_scrunched:
self.image = Player.left_facing_crouch_list[self.__size_index]
else:
self.image = Player.left_facing_list[self.__size_index]
current_position = Vector(self.rect.x, self.rect.y)
self.rect = self.image.get_rect()
self.rect.x = current_position.x
self.rect.y = current_position.y
class Platform:
"""
Holds the state of a basic platform object
"""
def __init__(self, position, image, id):
self.image = image
self.rect = self.image.get_rect()
self.rect.x = position.x
self.rect.y = position.y
self.__id = id
@property
def id(self):
return self.__id
@staticmethod
def create_standard_image(dimensions):
image = pygame.Surface(dimensions)
image.fill(color=(0, 255, 0))
return image
class GameScene(Scene):
move_left_vector = Vector(-3, 0)
move_right_vector = Vector(3, 0)
jump_vector = Vector(0, -4)
platform_thickness = 6
platform_75_image = pygame.image.load(os.path.join('Assets', 'platform_75.png'))
platform_50_image = pygame.image.load(os.path.join('Assets', 'platform_50.png'))
platform_477_image = pygame.image.load(os.path.join('Assets', 'platform_477.png'))
platform_315_image = pygame.image.load(os.path.join('Assets', 'platform_315.png'))
cave_image = pygame.image.load(os.path.join('Assets', 'cave.png'))
cave_full_image = pygame.image.load(os.path.join('Assets', 'cave_full.png'))
def __init__(self, screen_dimensions):
super(GameScene, self).__init__(screen_dimensions)
self.__player = Player(position=Vector(375, 0), velocity=Vector(0, 0))
soul_friend = Platform(Vector(500, 326), pygame.image.load(os.path.join('Assets', 'soul_friend_left56.png')), 'soul-friend')
breakable_door = Platform(Vector(285, 326), pygame.image.load(os.path.join('Assets', 'breakable_door.png')), 'breakable-door')
self.__platform_display_list = [soul_friend, breakable_door]
self.__platforms = [Platform(Vector(700, 125), Platform.create_standard_image([75, GameScene.platform_thickness]), 'top-right'),
Platform(Vector(25, 525), Platform.create_standard_image([75, GameScene.platform_thickness]), 'bottom-left'),
Platform(Vector(125, 260), Platform.create_standard_image([50, GameScene.platform_thickness]), 'center-left'),
Platform(Vector(180, 382), Platform.create_standard_image([475, GameScene.platform_thickness]), 'bottom-center'),
Platform(Vector(275, 320), Platform.create_standard_image([315, GameScene.platform_thickness]), 'center-center'),
Platform(Vector(300, 326), Platform.create_standard_image([50, 15]), 'hang-down'),
Platform(Vector(375, 270), Platform.create_standard_image([75, GameScene.platform_thickness]), 'top-center'),
Platform(Vector(575, 326), Platform.create_standard_image([10, 56]), 'back-wall'),
breakable_door,
soul_friend]
self.__top_right_toasties = Cereal(position=Vector(705, 0), id='top-right')
self.__bottom_left_toasties = Cereal(position=Vector(25, 0), id='bottom-left')
self.__cave_toasties = Cereal(position=Vector(425, 344), id='cave')
self.__cereal_boxes = [self.__cave_toasties]
# [K_a, K_d] - this holds the state of whether the key is pressed or not
self.__grunt_sound = pygame.mixer.Sound(os.path.join('Assets', 'grunt.ogg'))
self.__short_grunt_sound = pygame.mixer.Sound(os.path.join('Assets', 'grunt_short.ogg'))
self.__eating_sound = pygame.mixer.Sound(os.path.join('Assets', 'eating.ogg'))
self.__wall_collapse_sound = pygame.mixer.Sound(os.path.join('Assets', 'wall_collapse.ogg'))
self.__unscrunch_requested = False
self.__background = pygame.image.load(os.path.join('Assets', 'background.png'))
self.__cave_opened = False
self.__first_time_landed_on_platform = False
def __handle_player_off_screen(self):
self.__player_fallen_off_world = True
if self.__player.rect.x < 400:
self.__player.rect.x += 400
else:
self.__player.rect.x -= 400
self.__player.rect.y = 0
self.__player.on_ground = False
def reset(self):
pygame.mixer.music.load(os.path.join('Assets', 'intro.ogg'))
pygame.mixer.music.play(0)
self.__player.rect.x = 375
self.__player.rect.y = 0
self.__player.on_ground = False
def __apply_gravity(self):
GameScene.__apply_gravity_to_entity(self.__player)
for cereal_box in self.__cereal_boxes:
GameScene.__apply_gravity_to_entity(cereal_box)
@staticmethod
def __apply_gravity_to_entity(entity):
if entity.velocity.y == 0:
entity.velocity.y = 1
else:
entity.velocity.y += 0.15
def update(self, events):
self.__player.on_ground = self.__is_entity_on_ground(self.__player)
if self.__player.on_ground:
self.__player.velocity.x = 0
for cereal_box in self.__cereal_boxes:
if self.__is_entity_on_ground(cereal_box):
cereal_box.velocity.x = 0
# handle input
for event in events:
# handle clicking the X on the game window
if event.type == pygame.QUIT:
return 'exit'
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
self.__player.moving_left = True
elif event.key == pygame.K_d:
self.__player.moving_right = True
elif event.key == pygame.K_SPACE and self.__player.on_ground:
self.__player.velocity += GameScene.jump_vector
self.__grunt_sound.play(0)
elif event.key == pygame.K_s:
self.__unscrunch_requested = False
self.__player.is_scrunched = True
self.__player.scrunch()
elif event.key == pygame.K_ESCAPE:
# return exit to trigger graceful exit of program
return 'exit'
if event.type == pygame.KEYUP:
if event.key == pygame.K_a:
self.__player.moving_left = False
elif event.key == pygame.K_d:
self.__player.moving_right = False
elif event.key == pygame.K_s:
self.__unscrunch_requested = True
if self.__unscrunch_requested and self.__player.is_scrunched:
anticipated_position = self.__player.rect.copy()
anticipated_position.y -= self.__player.scrunch_difference
can_unscrunch = True
for platform in self.__platforms:
if anticipated_position.colliderect(platform.rect):
can_unscrunch = False
break
if can_unscrunch:
self.__player.unscrunch()
self.__player.is_scrunched = False
if self.__player.moving_left:
self.__player.velocity.x = -3
if self.__player.moving_right:
self.__player.velocity.x = 3
self.__apply_gravity()
self.__handle_platform_collisions()
self.__handle_entity_collisions()
if self.__player.rect.left < 0 or self.__player.rect.right > self.screen_width or self.__player.rect.top > self.screen_height:
self.__handle_player_off_screen()
self.__player.update()
def __handle_entity_collisions(self):
to_remove = []
spawn_next_toasties = False
for cereal_box in self.__cereal_boxes:
if self.__player.rect.colliderect(cereal_box.rect):
if cereal_box.id == self.__bottom_left_toasties.id:
spawn_next_toasties = True
if cereal_box.id == self.__cave_toasties.id:
pygame.mixer.music.stop()
pygame.mixer.music.load(os.path.join('Assets', 'escape.ogg'))
pygame.mixer.music.play(0)
self.__eating_sound.play(0)
self.__player.grow()
self.__player.rect.y -= self.__player.growth_difference
to_remove.append(cereal_box)
for cereal_box in to_remove:
self.__cereal_boxes.remove(cereal_box)
if spawn_next_toasties:
pygame.mixer.music.stop()
pygame.mixer.music.load(os.path.join('Assets', 'toasties.ogg'))
pygame.mixer.music.play(0)
self.__cereal_boxes.append(self.__top_right_toasties)
@staticmethod
def __check_entity_x_collision(entity, platform):
if entity.rect.colliderect(platform.rect):
# entity collides with a platform on their right
if entity.velocity.x > 0:
entity.rect.right = platform.rect.left
# entity collides with a platform on their left
elif entity.velocity.x < 0:
entity.rect.left = platform.rect.right
entity.velocity.x = 0
return True
return False
@staticmethod
def __check_entity_y_collision(entity, platform):
if entity.rect.colliderect(platform.rect):
# entity collides with a platform on their head
if entity.velocity.y < 0:
entity.rect.top = platform.rect.bottom
# entity collides standing on a platform
elif entity.velocity.y > 0:
entity.rect.bottom = platform.rect.top
entity.velocity.y = 0
return True
return False
def __handle_platform_collisions(self):
for cereal_box in self.__cereal_boxes:
cereal_box.rect.x += cereal_box.velocity.x
self.__player.rect.x += self.__player.velocity.x
to_remove = []
for platform in self.__platforms:
if GameScene.__check_entity_x_collision(self.__player, platform):
if platform.id == 'breakable-door' and self.__player.size > 1:
self.__cave_opened = True
self.__wall_collapse_sound.play(0)
to_remove.append(platform)
pygame.mixer.music.stop()
pygame.mixer.music.load(os.path.join('Assets', 'dont_come_in.ogg'))
pygame.mixer.music.play(0)
for cereal_box in self.__cereal_boxes:
GameScene.__check_entity_x_collision(cereal_box, platform)
for cereal_box in self.__cereal_boxes:
cereal_box.rect.y += cereal_box.velocity.y
self.__player.rect.y += self.__player.velocity.y
for platform in self.__platforms:
if GameScene.__check_entity_y_collision(self.__player, platform):
if platform.id == 'top-right' and self.__is_entity_on_ground(self.__player) and\
not self.__first_time_landed_on_platform:
self.__first_time_landed_on_platform = True
pygame.mixer.music.stop()
pygame.mixer.music.load(os.path.join('Assets', 'thinking.ogg'))
pygame.mixer.music.play(0)
self.__cereal_boxes.append(self.__bottom_left_toasties)
for cereal_box in self.__cereal_boxes:
GameScene.__check_entity_y_collision(cereal_box, platform)
for platform in to_remove:
self.__platforms.remove(platform)
self.__platform_display_list.remove(platform)
def __is_entity_on_ground(self, entity):
entity_rect = entity.rect.copy()
entity_rect.y += 2
for platform in self.__platforms:
if entity_rect.colliderect(platform.rect):
return True
return False
def draw(self, display):
# draw the black background
super(GameScene, self).draw(display)
display.blit(self.__background, (0, 0))
display.blit(GameScene.cave_image, (274, 315))
# draw the platforms
for platform in self.__platform_display_list:
display.blit(platform.image, (platform.rect.x, platform.rect.y))
for cereal_box in self.__cereal_boxes:
display.blit(cereal_box.image, (cereal_box.rect.x, cereal_box.rect.y))
# draw the player
display.blit(self.__player.image, (self.__player.rect.x, self.__player.rect.y))
display.blit(GameScene.platform_75_image, (699, 122))
display.blit(GameScene.platform_75_image, (24, 522))
display.blit(GameScene.platform_75_image, (374, 267))
display.blit(GameScene.platform_50_image, (124, 256))
display.blit(GameScene.platform_315_image, (274, 315))
if not self.__cave_opened:
display.blit(GameScene.cave_full_image, (274, 315))
display.blit(GameScene.platform_477_image, (179, 375))
def id(self):
return 'game_scene'
|
|
"""
A Printer which converts an expression into its LaTeX equivalent.
"""
from sympy.core import S, C, Add
from sympy.core.function import _coeff_isneg
from printer import Printer
from conventions import split_super_sub
from sympy.simplify import fraction
import sympy.mpmath.libmp as mlib
from sympy.mpmath.libmp import prec_to_dps
from sympy.utilities import default_sort_key
import re, warnings
# Hand-picked functions which can be used directly in both LaTeX and MathJax
# Complete list at http://www.mathjax.org/docs/1.1/tex.html#supported-latex-commands
# This variable only contains those functions which sympy uses.
accepted_latex_functions = ['arcsin','arccos','arctan','sin','cos','tan',
'theta','beta','alpha','gamma','sinh','cosh','tanh','sqrt',
'ln','log','sec','csc','cot','coth','re','im','frac','root',
'arg','zeta','psi']
class LatexPrinter(Printer):
printmethod = "_latex"
_default_settings = {
"order": None,
"mode": "plain",
"itex": False,
"fold_frac_powers": False,
"fold_func_brackets": False,
"mul_symbol": None,
"inv_trig_style": "abbreviated",
"mat_str": "smallmatrix",
"mat_delim": "[",
"symbol_names": {},
}
def __init__(self, settings=None):
if settings is not None and 'inline' in settings and not settings['inline']:
# Change to "good" defaults for inline=False
settings['mat_str'] = 'bmatrix'
settings['mat_delim'] = None
Printer.__init__(self, settings)
if ('inline') in self._settings:
warnings.warn("'inline' is deprecated, please use 'mode'. "
"'mode' can be one of 'inline', 'plain', 'equation', or "
"'equation*'.")
if self._settings['inline']:
self._settings['mode'] = 'inline'
else:
self._settings['mode'] = 'equation*'
if 'mode' in self._settings:
valid_modes = ['inline', 'plain', 'equation', \
'equation*']
if self._settings['mode'] not in valid_modes:
raise ValueError("'mode' must be one of 'inline', 'plain', " \
"'equation' or 'equation*'")
mul_symbol_table = {
None : r" ",
"ldot" : r" \,.\, ",
"dot" : r" \cdot ",
"times" : r" \times "
}
self._settings['mul_symbol_latex'] = \
mul_symbol_table[self._settings['mul_symbol']]
self._delim_dict = {'(':')','[':']'}
def doprint(self, expr):
tex = Printer.doprint(self, expr)
if self._settings['mode'] == 'plain':
return tex
elif self._settings['mode'] == 'inline':
return r"$%s$" % tex
elif self._settings['itex']:
return r"$$%s$$" % tex
else:
env_str = self._settings['mode']
return r"\begin{%s}%s\end{%s}" % (env_str, tex, env_str)
def _needs_brackets(self, expr):
"""
Returns True if the expression needs to be wrapped in brackets when
printed, False otherwise. For example: a + b => True; a => False;
10 => False; -10 => True.
"""
return not ((expr.is_Integer and expr.is_nonnegative)
or (expr.is_Atom and expr is not S.NegativeOne))
def _needs_function_brackets(self, expr):
"""
Returns True if the expression needs to be wrapped in brackets when
passed as an argument to a function, False otherwise. This is a more
liberal version of _needs_brackets, in that many expressions which need
to be wrapped in brackets when added/subtracted/raised to a power do
not need them when passed to a function. Such an example is a*b.
"""
if not self._needs_brackets(expr):
return False
else:
# Muls of the form a*b*c... can be folded
if expr.is_Mul and not self._mul_is_clean(expr):
return True
# Pows which don't need brackets can be folded
elif expr.is_Pow and not self._pow_is_clean(expr):
return True
# Add and Function always need brackets
elif expr.is_Add or expr.is_Function:
return True
else:
return False
def _mul_is_clean(self, expr):
for arg in expr.args:
if arg.is_Function:
return False
return True
def _pow_is_clean(self, expr):
return not self._needs_brackets(expr.base)
def _do_exponent(self, expr, exp):
if exp is not None:
return r"\left(%s\right)^{%s}" % (expr, exp)
else:
return expr
def _print_Add(self, expr, order=None):
if self.order == 'none':
terms = list(expr.args)
else:
terms = self._as_ordered_terms(expr, order=order)
tex = self._print(terms[0])
for term in terms[1:]:
if not _coeff_isneg(term):
tex += " +"
tex += " " + self._print(term)
return tex
def _print_Float(self, expr):
# Based off of that in StrPrinter
dps = prec_to_dps(expr._prec)
str_real = mlib.to_str(expr._mpf_, dps, strip_zeros=True)
# Must always have a mul symbol (as 2.5 10^{20} just looks odd)
separator = r" \times "
if self._settings['mul_symbol'] is not None:
separator = self._settings['mul_symbol_latex']
if 'e' in str_real:
(mant, exp) = str_real.split('e')
if exp[0] == '+':
exp = exp[1:]
return r"%s%s10^{%s}" % (mant, separator, exp)
elif str_real == "+inf":
return r"\infty"
elif str_real == "-inf":
return r"- \infty"
else:
return str_real
def _print_Mul(self, expr):
coeff, tail = expr.as_coeff_Mul()
if not coeff.is_negative:
tex = ""
else:
coeff = -coeff
tex = "- "
numer, denom = fraction(tail, exact=True)
separator = self._settings['mul_symbol_latex']
def convert(expr):
if not expr.is_Mul:
return str(self._print(expr))
else:
_tex = last_term_tex = ""
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
args = expr.args
for term in args:
pretty = self._print(term)
if term.is_Add:
term_tex = (r"\left(%s\right)" % pretty)
else:
term_tex = str(pretty)
# between two digits, \times must always be used,
# to avoid confusion
if separator == " " and \
re.search("[0-9][} ]*$", last_term_tex) and \
re.match("[{ ]*[-+0-9]", term_tex):
_tex += r" \times "
elif _tex:
_tex += separator
_tex += term_tex
last_term_tex = term_tex
return _tex
if denom is S.One:
if numer.is_Add:
_tex = r"\left(%s\right)" % convert(numer)
else:
_tex = r"%s" % convert(numer)
if coeff is not S.One:
tex += str(self._print(coeff))
# between two digits, \times must always be used, to avoid
# confusion
if separator == " " and re.search("[0-9][} ]*$", tex) and \
re.match("[{ ]*[-+0-9]", _tex):
tex += r" \times " + _tex
else:
tex += separator + _tex
else:
tex += _tex
else:
if numer is S.One:
if coeff.is_Integer:
numer *= coeff.p
elif coeff.is_Rational:
if coeff.p != 1:
numer *= coeff.p
denom *= coeff.q
elif coeff is not S.One:
tex += str(self._print(coeff)) + " "
else:
if coeff.is_Rational and coeff.p == 1:
denom *= coeff.q
elif coeff is not S.One:
tex += str(self._print(coeff)) + " "
tex += r"\frac{%s}{%s}" % \
(convert(numer), convert(denom))
return tex
def _print_Pow(self, expr):
# Treat x**Rational(1,n) as special case
if expr.exp.is_Rational and abs(expr.exp.p) == 1 and expr.exp.q != 1:
base = self._print(expr.base)
expq = expr.exp.q
if expq == 2:
tex = r"\sqrt{%s}" % base
elif self._settings['itex']:
tex = r"\root{%d}{%s}" % (expq,base)
else:
tex = r"\sqrt[%d]{%s}" % (expq,base)
if expr.exp.is_negative:
return r"\frac{1}{%s}" % tex
else:
return tex
elif self._settings['fold_frac_powers'] \
and expr.exp.is_Rational \
and expr.exp.q != 1:
base, p, q = self._print(expr.base), expr.exp.p, expr.exp.q
return r"%s^{%s/%s}" % (base, p, q)
elif expr.exp.is_Rational and expr.exp.is_negative and expr.base.is_Function:
# Things like 1/x
return r"\frac{%s}{%s}" % \
(1, self._print(C.Pow(expr.base, -expr.exp)))
else:
if expr.base.is_Function:
return self._print(expr.base, self._print(expr.exp))
else:
if expr.is_commutative and expr.exp == -1:
#solves issue 1030
#As Mul always simplify 1/x to x**-1
#The objective is achieved with this hack
#first we get the latex for -1 * expr,
#which is a Mul expression
tex = self._print(S.NegativeOne * expr).strip()
#the result comes with a minus and a space, so we remove
if tex[:1] == "-":
return tex[1:].strip()
if self._needs_brackets(expr.base):
tex = r"\left(%s\right)^{%s}"
else:
tex = r"%s^{%s}"
return tex % (self._print(expr.base),
self._print(expr.exp))
def _print_Sum(self, expr):
if len(expr.limits) == 1:
tex = r"\sum_{%s=%s}^{%s} " % \
tuple([ self._print(i) for i in expr.limits[0] ])
else:
def _format_ineq(l):
return r"%s \leq %s \leq %s" % \
tuple([self._print(s) for s in l[1], l[0], l[2]])
tex = r"\sum_{\substack{%s}} " % \
str.join('\\\\', [ _format_ineq(l) for l in expr.limits ])
if isinstance(expr.function, Add):
tex += r"\left(%s\right)" % self._print(expr.function)
else:
tex += self._print(expr.function)
return tex
def _print_Product(self, expr):
if len(expr.limits) == 1:
tex = r"\prod_{%s=%s}^{%s} " % \
tuple([ self._print(i) for i in expr.limits[0] ])
else:
def _format_ineq(l):
return r"%s \leq %s \leq %s" % \
tuple([self._print(s) for s in l[1], l[0], l[2]])
tex = r"\prod_{\substack{%s}} " % \
str.join('\\\\', [ _format_ineq(l) for l in expr.limits ])
if isinstance(expr.function, Add):
tex += r"\left(%s\right)" % self._print(expr.function)
else:
tex += self._print(expr.function)
return tex
def _print_Derivative(self, expr):
dim = len(expr.variables)
if dim == 1:
tex = r"\frac{\partial}{\partial %s}" % \
self._print(expr.variables[0])
else:
multiplicity, i, tex = [], 1, ""
current = expr.variables[0]
for symbol in expr.variables[1:]:
if symbol == current:
i = i + 1
else:
multiplicity.append((current, i))
current, i = symbol, 1
else:
multiplicity.append((current, i))
for x, i in multiplicity:
if i == 1:
tex += r"\partial %s" % self._print(x)
else:
tex += r"\partial^{%s} %s" % (i, self._print(x))
tex = r"\frac{\partial^{%s}}{%s} " % (dim, tex)
if isinstance(expr.expr, C.AssocOp):
return r"%s\left(%s\right)" % (tex, self._print(expr.expr))
else:
return r"%s %s" % (tex, self._print(expr.expr))
def _print_Integral(self, expr):
tex, symbols = "", []
# Only up to \iiiint exists
if len(expr.limits) <= 4 and all(len(lim) == 1 for lim in expr.limits):
# Use len(expr.limits)-1 so that syntax highlighters don't think
# \" is an escaped quote
tex = r"\i" + "i"*(len(expr.limits)-1) + "nt"
symbols = [r"\, d%s" % self._print(symbol[0]) for symbol in expr.limits]
else:
for lim in reversed(expr.limits):
symbol = lim[0]
tex += r"\int"
if len(lim) > 1:
if self._settings['mode'] in ['equation','equation*'] \
and not self._settings['itex']:
tex += r"\limits"
if len(lim) == 3:
tex += "_{%s}^{%s}" % (self._print(lim[1]),
self._print(lim[2]))
if len(lim) == 2:
tex += "^{%s}" % (self._print(lim[1]))
symbols.insert(0, r"\, d%s" % self._print(symbol))
return r"%s %s%s" % (tex,
str(self._print(expr.function)), "".join(symbols))
def _print_Limit(self, expr):
e, z, z0, dir = expr.args
tex = r"\lim_{%s \to %s}" % (self._print(z),
self._print(z0))
if isinstance(e, C.AssocOp):
return r"%s\left(%s\right)" % (tex, self._print(e))
else:
return r"%s %s" % (tex, self._print(e))
def _print_Function(self, expr, exp=None):
func = expr.func.__name__
if hasattr(self, '_print_' + func):
return getattr(self, '_print_' + func)(expr, exp)
else:
args = [ str(self._print(arg)) for arg in expr.args ]
# How inverse trig functions should be displayed, formats are:
# abbreviated: asin, full: arcsin, power: sin^-1
inv_trig_style = self._settings['inv_trig_style']
# If we are dealing with a power-style inverse trig function
inv_trig_power_case = False
# If it is applicable to fold the argument brackets
can_fold_brackets = self._settings['fold_func_brackets'] and \
len(args) == 1 and \
not self._needs_function_brackets(expr.args[0])
inv_trig_table = ["asin", "acos", "atan", "acot"]
# If the function is an inverse trig function, handle the style
if func in inv_trig_table:
if inv_trig_style == "abbreviated":
func = func
elif inv_trig_style == "full":
func = "arc" + func[1:]
elif inv_trig_style == "power":
func = func[1:]
inv_trig_power_case = True
# Can never fold brackets if we're raised to a power
if exp is not None:
can_fold_brackets = False
if inv_trig_power_case:
if func in accepted_latex_functions:
name = r"\%s^{-1}" % func
else:
name = r"\operatorname{%s}^{-1}" % func
elif exp is not None:
if func in accepted_latex_functions:
name = r"\%s^{%s}" % (func,exp)
else:
# If the generic function name contains an underscore, handle it
name = r"\operatorname{%s}^{%s}" % (func.replace("_", r"\_"), exp)
else:
if func in accepted_latex_functions:
name = r"\%s" % func
else:
# If the generic function name contains an underscore, handle it
name = r"\operatorname{%s}" % func.replace("_", r"\_")
if can_fold_brackets:
if func in accepted_latex_functions:
# Wrap argument safely to avoid parse-time conflicts
# with the function name itself
name += r" {%s}"
else:
name += r"%s"
else:
name += r"{\left (%s \right )}"
if inv_trig_power_case and exp is not None:
name += r"^{%s}" % exp
return name % ",".join(args)
def _print_Lambda(self, expr):
symbols, expr = expr.args
if len(symbols) == 1:
symbols = self._print(symbols[0])
else:
symbols = self._print(tuple(symbols))
args = (symbols, self._print(expr))
tex = r"\Lambda {\left (%s \right )}" % ", ".join(args)
return tex
def _print_Min(self, expr, exp=None):
texargs = [r"%s" % self._print(symbol) for symbol in expr.args]
return r"\min\left(%s\right)" % ", ".join(texargs)
def _print_Max(self, expr, exp=None):
texargs = [r"%s" % self._print(symbol) for symbol in expr.args]
return r"\max\left(%s\right)" % ", ".join(texargs)
def _print_floor(self, expr, exp=None):
tex = r"\lfloor{%s}\rfloor" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_ceiling(self, expr, exp=None):
tex = r"\lceil{%s}\rceil" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_Abs(self, expr, exp=None):
tex = r"\lvert{%s}\rvert" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_re(self, expr, exp=None):
if self._needs_brackets(expr.args[0]):
tex = r"\Re {\left (%s \right )}" % self._print(expr.args[0])
else:
tex = r"\Re{%s}" % self._print(expr.args[0])
return self._do_exponent(tex, exp)
def _print_im(self, expr, exp=None):
if self._needs_brackets(expr.args[0]):
tex = r"\Im {\left ( %s \right )}" % self._print(expr.args[0])
else:
tex = r"\Im{%s}" % self._print(expr.args[0])
return self._do_exponent(tex, exp)
def _print_Not(self, e):
if (e.args[0].is_Boolean):
return r"\neg (%s)" % self._print(e.args[0])
else:
return r"\neg %s" % self._print(e.args[0])
def _print_And(self, e):
args = sorted(e.args, key=default_sort_key)
arg = args[0]
if arg.is_Boolean and not arg.is_Not:
tex = r"\left(%s\right)" % self._print(arg)
else:
tex = r"%s" % self._print(arg)
for arg in args[1:]:
if arg.is_Boolean and not arg.is_Not:
tex += r" \wedge \left(%s\right)" % (self._print(arg))
else:
tex += r" \wedge %s" % (self._print(arg))
return tex
def _print_Or(self, e):
args = sorted(e.args, key=default_sort_key)
arg = args[0]
if arg.is_Boolean and not arg.is_Not:
tex = r"\left(%s\right)" % self._print(arg)
else:
tex = r"%s" % self._print(arg)
for arg in args[1:]:
if arg.is_Boolean and not arg.is_Not:
tex += r" \vee \left(%s\right)" % (self._print(arg))
else:
tex += r" \vee %s" % (self._print(arg))
return tex
def _print_Implies(self, e):
return r"%s \Rightarrow %s" % (self._print(e.args[0]), self._print(e.args[1]))
def _print_Equivalent(self, e):
return r"%s \Leftrightarrow %s" % (self._print(e.args[0]), self._print(e.args[1]))
def _print_conjugate(self, expr, exp=None):
tex = r"\overline{%s}" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_ExpBase(self, expr, exp=None):
# TODO should exp_polar be printed differently?
# what about exp_polar(0), exp_polar(1)?
tex = r"e^{%s}" % self._print(expr.args[0])
return self._do_exponent(tex, exp)
def _print_gamma(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\Gamma^{%s}%s" % (exp, tex)
else:
return r"\Gamma%s" % tex
def _print_uppergamma(self, expr, exp=None):
tex = r"\left(%s, %s\right)" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"\Gamma^{%s}%s" % (exp, tex)
else:
return r"\Gamma%s" % tex
def _print_lowergamma(self, expr, exp=None):
tex = r"\left(%s, %s\right)" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"\gamma^{%s}%s" % (exp, tex)
else:
return r"\gamma%s" % tex
def _print_expint(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[1])
nu = self._print(expr.args[0])
if exp is not None:
return r"\operatorname{E}_{%s}^{%s}%s" % (nu, exp, tex)
else:
return r"\operatorname{E}_{%s}%s" % (nu, tex)
def _print_factorial(self, expr, exp=None):
x = expr.args[0]
if self._needs_brackets(x):
tex = r"\left(%s\right)!" % self._print(x)
else:
tex = self._print(x) + "!"
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_factorial2(self, expr, exp=None):
x = expr.args[0]
if self._needs_brackets(x):
tex = r"\left(%s\right)!!" % self._print(x)
else:
tex = self._print(x) + "!!"
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_binomial(self, expr, exp=None):
tex = r"{\binom{%s}{%s}}" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_RisingFactorial(self, expr, exp=None):
tex = r"{\left(%s\right)}^{\left(%s\right)}" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
return self._do_exponent(tex, exp)
def _print_FallingFactorial(self, expr, exp=None):
tex = r"{\left(%s\right)}_{\left(%s\right)}" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
return self._do_exponent(tex, exp)
def _hprint_BesselBase(self, expr, exp, sym):
tex = r"%s" % (sym)
need_exp = False
if exp is not None:
if tex.find('^') == -1:
tex = r"%s^{%s}" % (tex, self._print(exp))
else:
need_exp = True
tex = r"%s_{%s}\left(%s\right)" % (tex, self._print(expr.order),
self._print(expr.argument))
if need_exp:
tex = self._do_exponent(tex, exp)
return tex
def _hprint_vec(self, vec):
if len(vec) == 0:
return ""
s = ""
for i in vec[:-1]:
s += "%s, " % self._print(i)
s += self._print(vec[-1])
return s
def _print_besselj(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'J')
def _print_besseli(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'I')
def _print_besselk(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'K')
def _print_bessely(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'Y')
def _print_yn(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'y')
def _print_jn(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'j')
def _print_hankel1(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'H^{(1)}')
def _print_hankel2(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'H^{(2)}')
def _print_hyper(self, expr, exp=None):
tex = r"{{}_{%s}F_{%s}\left.\left(\begin{matrix} %s \\ %s \end{matrix}" \
r"\right| {%s} \right)}" % \
(self._print(len(expr.ap)), self._print(len(expr.bq)),
self._hprint_vec(expr.ap), self._hprint_vec(expr.bq),
self._print(expr.argument))
if exp is not None:
tex = r"{%s}^{%s}" % (tex, self._print(exp))
return tex
def _print_meijerg(self, expr, exp=None):
tex = r"{G_{%s, %s}^{%s, %s}\left.\left(\begin{matrix} %s & %s \\" \
r"%s & %s \end{matrix} \right| {%s} \right)}" % \
(self._print(len(expr.ap)), self._print(len(expr.bq)),
self._print(len(expr.bm)), self._print(len(expr.an)),
self._hprint_vec(expr.an), self._hprint_vec(expr.aother),
self._hprint_vec(expr.bm), self._hprint_vec(expr.bother),
self._print(expr.argument))
if exp is not None:
tex = r"{%s}^{%s}" % (tex, self._print(exp))
return tex
def _print_dirichlet_eta(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\eta^{%s}%s" % (self._print(exp), tex)
return r"\eta%s" % tex
def _print_zeta(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"\left(%s, %s\right)" % tuple(map(self._print, expr.args))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\zeta^{%s}%s" % (self._print(exp), tex)
return r"\zeta%s" % tex
def _print_lerchphi(self, expr, exp=None):
tex = r"\left(%s, %s, %s\right)" % tuple(map(self._print, expr.args))
if exp is None:
return r"\Phi%s" % tex
return r"\Phi^{%s}%s" % (self._print(exp), tex)
def _print_polylog(self, expr, exp=None):
s, z = map(self._print, expr.args)
tex = r"\left(%s\right)" % z
if exp is None:
return r"\operatorname{Li}_{%s}%s" % (s, tex)
return r"\operatorname{Li}_{%s}^{%s}%s" % (s, self._print(exp), tex)
def _print_Rational(self, expr):
if expr.q != 1:
sign = ""
p = expr.p
if expr.p < 0:
sign = "- "
p = -p
return r"%s\frac{%d}{%d}" % (sign, p, expr.q)
else:
return self._print(expr.p)
def _print_Infinity(self, expr):
return r"\infty"
def _print_NegativeInfinity(self, expr):
return r"-\infty"
def _print_ComplexInfinity(self, expr):
return r"\tilde{\infty}"
def _print_ImaginaryUnit(self, expr):
return r"\mathbf{\imath}"
def _print_NaN(self, expr):
return r"\bot"
def _print_Pi(self, expr):
return r"\pi"
def _print_Exp1(self, expr):
return r"e"
def _print_EulerGamma(self, expr):
return r"\gamma"
def _print_Order(self, expr):
return r"\mathcal{O}\left(%s\right)" % \
self._print(expr.args[0])
def _print_Symbol(self, expr):
if expr in self._settings['symbol_names']:
return self._settings['symbol_names'][expr]
name, supers, subs = split_super_sub(expr.name)
# translate name, supers and subs to tex keywords
greek = set([ 'alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta',
'eta', 'theta', 'iota', 'kappa', 'lambda', 'mu', 'nu',
'xi', 'omicron', 'pi', 'rho', 'sigma', 'tau', 'upsilon',
'phi', 'chi', 'psi', 'omega' ])
greek_translated = {'lamda': 'lambda', 'Lamda': 'Lambda'}
other = set( ['aleph', 'beth', 'daleth', 'gimel', 'ell', 'eth',
'hbar', 'hslash', 'mho' ])
def translate(s):
tmp = s.lower()
if tmp in greek or tmp in other:
return "\\" + s
if s in greek_translated:
return "\\" + greek_translated[s]
else:
return s
name = translate(name)
supers = [translate(sup) for sup in supers]
subs = [translate(sub) for sub in subs]
# glue all items together:
if len(supers) > 0:
name += "^{%s}" % " ".join(supers)
if len(subs) > 0:
name += "_{%s}" % " ".join(subs)
return name
def _print_Relational(self, expr):
if self._settings['itex']:
gt = r"\gt"
lt = r"\lt"
else:
gt = ">"
lt = "<"
charmap = {
"==" : "=",
">" : gt,
"<" : lt,
">=" : r"\geq",
"<=" : r"\leq",
"!=" : r"\neq",
}
return "%s %s %s" % (self._print(expr.lhs),
charmap[expr.rel_op], self._print(expr.rhs))
def _print_Piecewise(self, expr):
ecpairs = [r"%s & \text{for}\: %s" % (self._print(e), self._print(c)) \
for e, c in expr.args[:-1]]
if expr.args[-1].cond == True:
ecpairs.append(r"%s & \text{otherwise}" % \
self._print(expr.args[-1].expr))
else:
ecpairs.append(r"%s & \text{for}\: %s" % \
(self._print(expr.args[-1].expr),
self._print(expr.args[-1].cond)))
tex = r"\begin{cases} %s \end{cases}"
return tex % r" \\".join(ecpairs)
def _print_MatrixBase(self, expr):
lines = []
for line in range(expr.rows): # horrible, should be 'rows'
lines.append(" & ".join([ self._print(i) for i in expr[line,:] ]))
out_str = r'\begin{%MATSTR%}%s\end{%MATSTR%}'
out_str = out_str.replace('%MATSTR%', self._settings['mat_str'])
if self._settings['mat_delim']:
left_delim = self._settings['mat_delim']
right_delim = self._delim_dict[left_delim]
out_str = r'\left' + left_delim + out_str + \
r'\right' + right_delim
return out_str % r"\\".join(lines)
_print_ImmutableMatrix = _print_MatrixBase
_print_MutableMatrix = _print_MatrixBase
def _print_BlockMatrix(self, expr):
return self._print(expr.mat)
def _print_Transpose(self, expr):
mat = expr.arg
if mat.is_Add or mat.is_Mul:
return r"\left(%s\right)^T"%self._print(mat)
else:
return "%s^T"%self._print(mat)
def _print_MatAdd(self, expr):
return self._print_Add(expr)
def _print_MatMul(self, expr):
return self._print_Mul(expr)
def _print_MatPow(self, expr):
base, exp = expr.base, expr.exp
if base.is_Add or base.is_Mul:
return r"\left(%s\right)^{%s}"%(self._print(base), self._print(exp))
else:
return "%s^{%s}"%(self._print(base), self._print(exp))
def _print_ZeroMatrix(self, Z):
return r"\bold{0}"
def _print_Identity(self, I):
return r"\mathbb{I}"
def _print_tuple(self, expr):
return r"\begin{pmatrix}%s\end{pmatrix}" % \
r", & ".join([ self._print(i) for i in expr ])
def _print_Tuple(self, expr):
return self._print_tuple(expr)
def _print_list(self, expr):
return r"\begin{bmatrix}%s\end{bmatrix}" % \
r", & ".join([ self._print(i) for i in expr ])
def _print_dict(self, d):
keys = sorted(d.keys(), key=default_sort_key)
items = []
for key in keys:
val = d[key]
items.append("%s : %s" % (self._print(key), self._print(val)))
return r"\begin{Bmatrix}%s\end{Bmatrix}" % r", & ".join(items)
def _print_Dict(self, expr):
return self._print_dict(expr)
def _print_DiracDelta(self, expr):
if len(expr.args) == 1 or expr.args[1] == 0:
tex = r"\delta\left(%s\right)" % self._print(expr.args[0])
else:
tex = r"\delta^{\left( %s \right)}\left( %s \right)" % (\
self._print(expr.args[1]), self._print(expr.args[0]))
return tex
def _print_ProductSet(self, p):
return r" \cross ".join(self._print(set) for set in p.sets)
def _print_RandomDomain(self, d):
try:
return 'Domain: '+ self._print(d.as_boolean())
except:
try:
return ('Domain: ' + self._print(d.symbols) + ' in ' +
self._print(d.set))
except:
return 'Domain on ' + self._print(d.symbols)
def _print_FiniteSet(self, s):
if len(s) > 10:
printset = s.args[:3] + ('...',) + s.args[-3:]
else:
printset = s.args
return (r"\left\{"
+ r", ".join(self._print(el) for el in printset)
+ r"\right\}")
_print_frozenset = _print_FiniteSet
_print_set = _print_FiniteSet
def _print_Interval(self, i):
if i.start == i.end:
return r"\left{%s\right}" % self._print(i.start)
else:
if i.left_open:
left = '('
else:
left = '['
if i.right_open:
right = ')'
else:
right = ']'
return r"\left%s%s, %s\right%s" % \
(left, self._print(i.start), self._print(i.end), right)
def _print_Union(self, u):
return r" \cup ".join([self._print(i) for i in u.args])
def _print_Intersection(self, u):
return r" \cap ".join([self._print(i) for i in u.args])
def _print_EmptySet(self, e):
return r"\emptyset"
def _print_FiniteField(self, expr):
return r"\mathbb{F}_{%s}" % expr.mod
def _print_IntegerRing(self, expr):
return r"\mathbb{Z}"
def _print_RationalField(self, expr):
return r"\mathbb{Q}"
def _print_RealDomain(self, expr):
return r"\mathbb{R}"
def _print_ComplexDomain(self, expr):
return r"\mathbb{C}"
def _print_PolynomialRing(self, expr):
domain = self._print(expr.dom)
gens = ", ".join(map(self._print, expr.gens))
return r"%s\left\[%s\right\]" % (domain, gens)
def _print_FractionField(self, expr):
domain = self._print(expr.dom)
gens = ", ".join(map(self._print, expr.gens))
return r"%s\left(%s\right)" % (domain, gens)
def _print_Poly(self, poly):
cls = poly.__class__.__name__
expr = self._print(poly.as_expr())
gens = map(self._print, poly.gens)
domain = "domain=%s" % self._print(poly.get_domain())
args = ", ".join([expr] + gens + [domain])
if cls in accepted_latex_functions:
tex = r"\%s {\left (%s \right )}" % (cls, args)
else:
tex = r"\operatorname{%s}{\left( %s \right)}" % (cls, args)
return tex
def _print_RootOf(self, root):
cls = root.__class__.__name__
expr = self._print(root.expr)
index = root.index
if cls in accepted_latex_functions:
return r"\%s {\left(%s, %d\right)}" % (cls, expr, index)
else:
return r"\operatorname{%s} {\left(%s, %d\right)}" % (cls, expr, index)
def _print_RootSum(self, expr):
cls = expr.__class__.__name__
args = [self._print(expr.expr)]
if expr.fun is not S.IdentityFunction:
args.append(self._print(expr.fun))
if cls in accepted_latex_functions:
return r"\%s {\left(%s\right)}" % (cls, ", ".join(args))
else:
return r"\operatorname{%s} {\left(%s\right)}" % (cls, ", ".join(args))
def _print_euler(self, expr):
return r"E_{%s}" % self._print(expr.args[0])
def _print_catalan(self, expr):
return r"C_{%s}" % self._print(expr.args[0])
def _print_MellinTransform(self, expr):
return r"\mathcal{M}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseMellinTransform(self, expr):
return r"\mathcal{M}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_LaplaceTransform(self, expr):
return r"\mathcal{L}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseLaplaceTransform(self, expr):
return r"\mathcal{L}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_FourierTransform(self, expr):
return r"\mathcal{F}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseFourierTransform(self, expr):
return r"\mathcal{F}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_SineTransform(self, expr):
return r"\mathcal{SIN}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseSineTransform(self, expr):
return r"\mathcal{SIN}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_CosineTransform(self, expr):
return r"\mathcal{COS}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseCosineTransform(self, expr):
return r"\mathcal{COS}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def latex(expr, **settings):
r"""
Convert the given expression to LaTeX representation.
>>> from sympy import latex, sin, asin, Matrix, Rational
>>> from sympy.abc import x, y, mu, tau
>>> latex((2*tau)**Rational(7,2))
'8 \\sqrt{2} \\tau^{\\frac{7}{2}}'
order: Any of the supported monomial orderings (currently "lex", "grlex", or
"grevlex"), "old", and "none". This parameter does nothing for Mul objects.
Setting order to "old" uses the compatibility ordering for Add defined in
Printer. For very large expressions, set the 'order' keyword to 'none' if
speed is a concern.
mode: Specifies how the generated code will be delimited. 'mode' can be one
of 'plain', 'inline', 'equation' or 'equation*'. If 'mode' is set to
'plain', then the resulting code will not be delimited at all (this is the
default). If 'mode' is set to 'inline' then inline LaTeX $ $ will be used.
If 'mode' is set to 'equation' or 'equation*', the resulting code will be
enclosed in the 'equation' or 'equation*' environment (remember to import
'amsmath' for 'equation*'), unless the 'itex' option is set. In the latter
case, the ``$$ $$`` syntax is used.
>>> latex((2*mu)**Rational(7,2), mode='plain')
'8 \\sqrt{2} \\mu^{\\frac{7}{2}}'
>>> latex((2*tau)**Rational(7,2), mode='inline')
'$8 \\sqrt{2} \\tau^{\\frac{7}{2}}$'
>>> latex((2*mu)**Rational(7,2), mode='equation*')
'\\begin{equation*}8 \\sqrt{2} \\mu^{\\frac{7}{2}}\\end{equation*}'
>>> latex((2*mu)**Rational(7,2), mode='equation')
'\\begin{equation}8 \\sqrt{2} \\mu^{\\frac{7}{2}}\\end{equation}'
itex: Specifies if itex-specific syntax is used, including emitting ``$$ $$``.
>>> latex((2*mu)**Rational(7,2), mode='equation', itex=True)
'$$8 \\sqrt{2} \\mu^{\\frac{7}{2}}$$'
fold_frac_powers: Emit "^{p/q}" instead of "^{\frac{p}{q}}" for fractional
powers.
>>> latex((2*tau)**Rational(7,2), fold_frac_powers=True)
'8 \\sqrt{2} \\tau^{7/2}'
fold_func_brackets: Fold function brackets where applicable.
>>> latex((2*tau)**sin(Rational(7,2)))
'\\left(2 \\tau\\right)^{\\sin{\\left (\\frac{7}{2} \\right )}}'
>>> latex((2*tau)**sin(Rational(7,2)), fold_func_brackets = True)
'\\left(2 \\tau\\right)^{\\sin {\\frac{7}{2}}}'
mul_symbol: The symbol to use for multiplication. Can be one of None,
"ldot", "dot", or "times".
>>> latex((2*tau)**sin(Rational(7,2)), mul_symbol="times")
'\\left(2 \\times \\tau\\right)^{\\sin{\\left (\\frac{7}{2} \\right )}}'
inv_trig_style: How inverse trig functions should be displayed. Can be one
of "abbreviated", "full", or "power". Defaults to "abbreviated".
>>> latex(asin(Rational(7,2)))
'\\operatorname{asin}{\\left (\\frac{7}{2} \\right )}'
>>> latex(asin(Rational(7,2)), inv_trig_style="full")
'\\arcsin{\\left (\\frac{7}{2} \\right )}'
>>> latex(asin(Rational(7,2)), inv_trig_style="power")
'\\sin^{-1}{\\left (\\frac{7}{2} \\right )}'
mat_str: Which matrix environment string to emit. "smallmatrix", "bmatrix",
etc. Defaults to "smallmatrix".
>>> latex(Matrix(2, 1, [x, y]), mat_str = "array")
'\\left[\\begin{array}x\\\\y\\end{array}\\right]'
mat_delim: The delimiter to wrap around matrices. Can be one of "[", "(",
or the empty string. Defaults to "[".
>>> latex(Matrix(2, 1, [x, y]), mat_delim="(")
'\\left(\\begin{smallmatrix}x\\\\y\\end{smallmatrix}\\right)'
symbol_names: Dictionary of symbols and the custom strings they should be
emitted as.
>>> latex(x**2, symbol_names={x:'x_i'})
'x_i^{2}'
Besides all Basic based expressions, you can recursively
convert Python containers (lists, tuples and dicts) and
also SymPy matrices:
>>> latex([2/x, y], mode='inline')
'$\\begin{bmatrix}\\frac{2}{x}, & y\\end{bmatrix}$'
"""
return LatexPrinter(settings).doprint(expr)
def print_latex(expr, **settings):
"""Prints LaTeX representation of the given expression."""
print latex(expr, **settings)
|
|
import numpy as np
import skimage.feature
import sys
from VideoReader import *
from scipy.ndimage.filters import convolve
from scipy.io import loadmat
import cv
import cv2
def findLanesConvolution(img, origSize=(960,1280), lastCols=[None, None], lastLine=[None,None,None,None], P=np.eye(3), responseOnlyNearLastCols=False, frame_rate=1):
(rows, cols, channels) = img.shape
border_img = np.zeros((rows,cols,3))
border_img = border_img.astype(np.uint8)
border_img[:,:15] = 255
border_img[:,-15:] = 255
border_img[-15:,:] = 255
img = cv2.warpPerspective(img, P, (cols, rows))
img = img.astype(np.float64)
border_img = cv2.warpPerspective(border_img, P, (cols, rows))
"""
#idea for windowing around current lastCol, but could be bad as possible
#to not recover on a bad misdetection
if responseOnlyNearLastCols == True:
left_lane_min_x = max(0,lastCols[0]-40);
left_lane_max_x = lastCols[0]+40;
right_lane_min_x = lastCols[1]-40;
right_lane_max_x = min(cols,lastCols[1]+40);
img[:,0:left_lane_min_x] = 0
img[:,left_lane_max_x:right_lane_min_x] = 0
img[:,right_lane_max_x:] = 0
m_left = np.mean(np.mean(img[:,left_lane_min_x : left_lane_max_x],axis=0),axis=0)
m_right = np.mean(np.mean(img[:,right_lane_min_x : right_lane_max_x],axis=0),axis=0)
print m_left
print np.max(img[:,:,0])
img[:, left_lane_min_x : left_lane_max_x,:] -= m_left
img[:, right_lane_min_x : right_lane_max_x,:] -= m_right
print np.max(img[:,:,0])
else:
"""
# mean subtraction on image, clamp to 0
m = np.mean(np.mean(img[:,:],axis=0),axis=0)
img = img - m
img[img < 0] = 0
img = 255 * img / np.max(img)
# set max_lane_size to about 20 in the 1280x960 image
max_lane_size = int(np.round(origSize[1] / 80)) # approximation of lane width
if max_lane_size % 2 == 1:
max_lane_size += 1
O = np.zeros((rows, cols, channels))
lane_width = max_lane_size
v = 4*np.array([np.concatenate([-1*np.ones(lane_width), 2*np.ones(lane_width+1), -1*np.ones(lane_width)])])
v = v/v.size
O_1 = np.round(convolve(img[:,:,0], v, mode='reflect')).reshape((rows,cols,1))
O_2 = np.round(convolve(img[:,:,1], v, mode='reflect')).reshape((rows,cols,1))
O_3 = np.round(convolve(img[:,:,2], v, mode='reflect')).reshape((rows,cols,1))
O = cv2.merge([O_1, O_2, O_3])
# get rid of perspective transform border detections
O[border_img > 0] = 0
# thresholding for lane detection
#white_lane_detect = np.sum(O,axis=2) > 350
white_lane_detect = np.logical_and(O[:,:,0] > 150, np.logical_and(O[:,:,1] > 150, O[:,:,2] > 150))
#yellow_lane_detect = np.logical_and(O[:,:,1] + O[:,:,2] > 90, O[:,:,0] < 20)
eps = 0.000001
yellow_lane_detect = np.logical_and(((O[:,:,1] + O[:,:,2]) / (eps + O[:,:,0]) ) > 5, O[:,:,1] + O[:,:,2] > 150)
low_vals = np.logical_and(np.logical_not(white_lane_detect), np.logical_not(yellow_lane_detect))
O[low_vals,:] = 0
# increase yellow lane detection score
O[yellow_lane_detect,:] *= 5
column_O = np.sum(np.sum(O,axis=2),axis=0);
column_O[column_O < 1000] = 0
O[:,column_O < 1000,:] = 0
#O = cv2.warpPerspective(O, P, (cols, rows), flags=cv.CV_WARP_INVERSE_MAP)
return (O, lastCols, lastLine)
def findLanes(O, origSize=(960,1280), lastCols=[None, None], lastLine=[None,None,None,None], P=np.eye(3), responseOnlyNearLastCols=False, frame_rate=1):
O = O.astype(np.float64)
(rows, cols, channels) = O.shape
max_lane_size = int(np.round(origSize[1] / 96)) # approximation of lane width
if max_lane_size % 2 == 1:
max_lane_size += 1
#O = cv2.warpPerspective(O, P, (cols, rows))
"""
#mean subtract output image
m = np.mean(np.mean(O[rows/2:rows,:],axis=0),axis=0)
O = O - m
O[O < 0] = 0
"""
# get rid of top of the image so we don't find a column fit to it
O[0:0*rows/4,:] = 0
# nd the midpoint and submidpoints for
# zero-ing the center of the lane
midpoint_lastCols = 0.5*(lastCols[0] + lastCols[1])
mid_left = 0.5*(lastCols[0] + midpoint_lastCols);
mid_right = 0.5*(lastCols[1] + midpoint_lastCols);
O[:, mid_left:mid_right,:] = 0
# compute the sum of activations in each column and find the max
# responding column on the left and right sides
column_O = np.sum(np.sum(O,axis=2),axis=0);
column_O[column_O < 8000] = 0
O[:,column_O < 8000,:] = 0
top_k = 15
top_k = min(top_k, np.nonzero(column_O[0:midpoint_lastCols])[0].size)
top_k = min(top_k, np.nonzero(column_O[midpoint_lastCols:])[0].size)
resp_left = np.copy(column_O[0:midpoint_lastCols].argsort()[-top_k:][::-1])
min_left = np.argmax(np.abs(resp_left)) # closest to midpoint
resp_left = resp_left[min_left]
resp_right = np.copy(column_O[midpoint_lastCols:].argsort()[-top_k:][::-1])
min_right = np.argmin(np.abs(resp_right)) #closest to midpoint
resp_right = resp_right[min_right] + midpoint_lastCols
# was there a detection on either side?
LEFT_LANE_DETECTION = np.max(column_O[0: midpoint_lastCols]) != 0
RIGHT_LANE_DETECTION = np.max(column_O[midpoint_lastCols:]) != 0
if not LEFT_LANE_DETECTION:
resp_right = lastCols[1]
if not RIGHT_LANE_DETECTION:
resp_left = lastCols[0]
# how to move the columns based on previous cols
left_mom = 0.05
if LEFT_LANE_DETECTION:
if abs(lastCols[0] - resp_left) < 2:
left_mom = 0.5
elif abs(lastCols[0] - resp_left) < 5:
left_mom = 0.3
elif abs(lastCols[0] - resp_left) < 10:
left_mom = 0.15
left_mom = 1 - (1 - left_mom) ** frame_rate
lastCols[0] = left_mom*resp_left + (1 - left_mom)*lastCols[0]
right_mom = 0.05
if RIGHT_LANE_DETECTION:
if abs(lastCols[1] - resp_right) < 2:
right_mom = 0.5
elif abs(lastCols[1] - resp_right) < 5:
right_mom = 0.3
elif abs(lastCols[1] - resp_right) < 10:
right_mom = 0.15
right_mom = 1 - (1 - right_mom) ** frame_rate
lastCols[1] = right_mom*resp_right + (1 - right_mom)*lastCols[1]
# only consider detections around lastCol
if LEFT_LANE_DETECTION:
left_lane_min_x = lastCols[0]-max_lane_size;
left_lane_max_x = lastCols[0]+max_lane_size;
else:
left_lane_min_x = midpoint_lastCols
left_lane_max_x = midpoint_lastCols
if RIGHT_LANE_DETECTION:
right_lane_min_x = lastCols[1]-max_lane_size;
right_lane_max_x = lastCols[1]+max_lane_size;
else:
right_lane_min_x = cols
right_lane_max_x = cols
O[:,0:left_lane_min_x] = 0
O[:,left_lane_max_x:right_lane_min_x] = 0
O[:,right_lane_max_x:] = 0
#too_far = 9/16.0
#too_close = 6.5/16.0
too_far = 5.5/16.0
too_close = 4.5/16.0
# if the cols want to move too close or too far, push them away/closer
if lastCols[0] is not None and lastCols[1] is not None:
if lastCols[1] - lastCols[0] > too_far*cols:
if LEFT_LANE_DETECTION:
lastCols[0] = midpoint_lastCols - too_far/2*cols;
if RIGHT_LANE_DETECTION:
lastCols[1] = midpoint_lastCols + too_far/2*cols;
if lastCols[1] - lastCols[0] < too_close*cols:
if midpoint_lastCols < too_close * cols / 2:
midpoint_lastCols = too_close * cols / 2
if midpoint_lastCols > cols - too_close * cols / 2:
midpoint_lastCols = 1 - too_close * cols / 2
if LEFT_LANE_DETECTION:
lastCols[0] = midpoint_lastCols - too_close/2*cols;
if RIGHT_LANE_DETECTION:
lastCols[1] = midpoint_lastCols + too_close/2*cols;
O = cv2.warpPerspective(O, P, (cols, rows), flags=cv.CV_WARP_INVERSE_MAP)
return (O, lastCols, lastLine)
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
from itertools import product
from fractions import Fraction
from abc import ABCMeta, abstractmethod
from collections.abc import Sequence
import numpy as np
import warnings
import re
from monty.serialization import loadfn
from pymatgen.core.operations import SymmOp
from monty.design_patterns import cached_class
"""
Defines SymmetryGroup parent class and PointGroup and SpaceGroup classes.
Shyue Ping Ong thanks Marc De Graef for his generous sharing of his
SpaceGroup data as published in his textbook "Structure of Materials".
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2013, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "4/4/14"
SYMM_DATA = None
def get_symm_data(name):
global SYMM_DATA
if SYMM_DATA is None:
SYMM_DATA = loadfn(os.path.join(os.path.dirname(__file__),
"symm_data.json"))
return SYMM_DATA[name]
class SymmetryGroup(Sequence):
__metaclass__ = ABCMeta
@property
@abstractmethod
def symmetry_ops(self):
pass
def __contains__(self, item):
for i in self.symmetry_ops:
if np.allclose(i.affine_matrix, item.affine_matrix):
return True
return False
def __hash__(self):
return self.__len__()
def __getitem__(self, item):
return self.symmetry_ops[item]
def __len__(self):
return len(self.symmetry_ops)
def is_subgroup(self, supergroup):
"""
True if this group is a subgroup of the supplied group.
Args:
supergroup (SymmetryGroup): Supergroup to test.
Returns:
True if this group is a subgroup of the supplied group.
"""
warnings.warn("This is not fully functional. Only trivial subsets are tested right now. ")
return set(self.symmetry_ops).issubset(supergroup.symmetry_ops)
def is_supergroup(self, subgroup):
"""
True if this group is a supergroup of the supplied group.
Args:
subgroup (SymmetryGroup): Subgroup to test.
Returns:
True if this group is a supergroup of the supplied group.
"""
warnings.warn("This is not fully functional. Only trivial subsets are "
"tested right now. ")
return set(subgroup.symmetry_ops).issubset(self.symmetry_ops)
@cached_class
class PointGroup(SymmetryGroup):
"""
Class representing a Point Group, with generators and symmetry operations.
.. attribute:: symbol
Full International or Hermann-Mauguin Symbol.
.. attribute:: generators
List of generator matrices. Note that 3x3 matrices are used for Point
Groups.
.. attribute:: symmetry_ops
Full set of symmetry operations as matrices.
"""
def __init__(self, int_symbol):
"""
Initializes a Point Group from its international symbol.
Args:
int_symbol (str): International or Hermann-Mauguin Symbol.
"""
self.symbol = int_symbol
self.generators = [get_symm_data("generator_matrices")[c]
for c in get_symm_data("point_group_encoding")[int_symbol]]
self._symmetry_ops = set([SymmOp.from_rotation_and_translation(m)
for m in self._generate_full_symmetry_ops()])
self.order = len(self._symmetry_ops)
@property
def symmetry_ops(self):
return self._symmetry_ops
def _generate_full_symmetry_ops(self):
symm_ops = list(self.generators)
new_ops = self.generators
while len(new_ops) > 0:
gen_ops = []
for g1, g2 in product(new_ops, symm_ops):
op = np.dot(g1, g2)
if not in_array_list(symm_ops, op):
gen_ops.append(op)
symm_ops.append(op)
new_ops = gen_ops
return symm_ops
def get_orbit(self, p, tol=1e-5):
"""
Returns the orbit for a point.
Args:
p: Point as a 3x1 array.
tol: Tolerance for determining if sites are the same. 1e-5 should
be sufficient for most purposes. Set to 0 for exact matching
(and also needed for symbolic orbits).
Returns:
([array]) Orbit for point.
"""
orbit = []
for o in self.symmetry_ops:
pp = o.operate(p)
if not in_array_list(orbit, pp, tol=tol):
orbit.append(pp)
return orbit
@cached_class
class SpaceGroup(SymmetryGroup):
"""
Class representing a SpaceGroup.
.. attribute:: symbol
Full International or Hermann-Mauguin Symbol.
.. attribute:: int_number
International number
.. attribute:: generators
List of generator matrices. Note that 4x4 matrices are used for Space
Groups.
.. attribute:: order
Order of Space Group
"""
SYMM_OPS = loadfn(os.path.join(os.path.dirname(__file__),
"symm_ops.json"))
SG_SYMBOLS = set(get_symm_data("space_group_encoding").keys())
for op in SYMM_OPS:
op["hermann_mauguin"] = re.sub(r" ", "", op["hermann_mauguin"])
op["universal_h_m"] = re.sub(r" ", "", op["universal_h_m"])
SG_SYMBOLS.add(op["hermann_mauguin"])
SG_SYMBOLS.add(op["universal_h_m"])
gen_matrices = get_symm_data("generator_matrices")
# POINT_GROUP_ENC = SYMM_DATA["point_group_encoding"]
sgencoding = get_symm_data("space_group_encoding")
abbrev_sg_mapping = get_symm_data("abbreviated_spacegroup_symbols")
translations = {k: Fraction(v) for k, v in get_symm_data(
"translations").items()}
full_sg_mapping = {
v["full_symbol"]: k
for k, v in get_symm_data("space_group_encoding").items()}
def __init__(self, int_symbol):
"""
Initializes a Space Group from its full or abbreviated international
symbol. Only standard settings are supported.
Args:
int_symbol (str): Full International (e.g., "P2/m2/m2/m") or
Hermann-Mauguin Symbol ("Pmmm") or abbreviated symbol. The
notation is a LaTeX-like string, with screw axes being
represented by an underscore. For example, "P6_3/mmc".
Alternative settings can be access by adding a ":identifier".
For example, the hexagonal setting for rhombohedral cells can be
accessed by adding a ":H", e.g., "R-3m:H". To find out all
possible settings for a spacegroup, use the get_settings
classmethod. Alternative origin choices can be indicated by a
translation vector, e.g., 'Fm-3m(a-1/4,b-1/4,c-1/4)'.
"""
int_symbol = re.sub(r" ", "", int_symbol)
if int_symbol in SpaceGroup.abbrev_sg_mapping:
int_symbol = SpaceGroup.abbrev_sg_mapping[int_symbol]
elif int_symbol in SpaceGroup.full_sg_mapping:
int_symbol = SpaceGroup.full_sg_mapping[int_symbol]
for spg in SpaceGroup.SYMM_OPS:
if int_symbol in [spg["hermann_mauguin"], spg["universal_h_m"]]:
ops = [SymmOp.from_xyz_string(s) for s in spg["symops"]]
self.symbol = re.sub(r":", "",
re.sub(r" ", "", spg["universal_h_m"]))
if int_symbol in SpaceGroup.sgencoding:
self.full_symbol = SpaceGroup.sgencoding[int_symbol]["full_symbol"]
self.point_group = SpaceGroup.sgencoding[int_symbol]["point_group"]
else:
self.full_symbol = re.sub(r" ", "",
spg["universal_h_m"])
self.point_group = spg["schoenflies"]
self.int_number = spg["number"]
self.order = len(ops)
self._symmetry_ops = ops
break
else:
if int_symbol not in SpaceGroup.sgencoding:
raise ValueError("Bad international symbol %s" % int_symbol)
data = SpaceGroup.sgencoding[int_symbol]
self.symbol = int_symbol
# TODO: Support different origin choices.
enc = list(data["enc"])
inversion = int(enc.pop(0))
ngen = int(enc.pop(0))
symm_ops = [np.eye(4)]
if inversion:
symm_ops.append(np.array(
[[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0],
[0, 0, 0, 1]]))
for i in range(ngen):
m = np.eye(4)
m[:3, :3] = SpaceGroup.gen_matrices[enc.pop(0)]
m[0, 3] = SpaceGroup.translations[enc.pop(0)]
m[1, 3] = SpaceGroup.translations[enc.pop(0)]
m[2, 3] = SpaceGroup.translations[enc.pop(0)]
symm_ops.append(m)
self.generators = symm_ops
self.full_symbol = data["full_symbol"]
self.point_group = data["point_group"]
self.int_number = data["int_number"]
self.order = data["order"]
self._symmetry_ops = None
def _generate_full_symmetry_ops(self):
symm_ops = np.array(self.generators)
for op in symm_ops:
op[0:3, 3] = np.mod(op[0:3, 3], 1)
new_ops = symm_ops
while len(new_ops) > 0 and len(symm_ops) < self.order:
gen_ops = []
for g in new_ops:
temp_ops = np.einsum('ijk,kl', symm_ops, g)
for op in temp_ops:
op[0:3, 3] = np.mod(op[0:3, 3], 1)
ind = np.where(np.abs(1 - op[0:3, 3]) < 1e-5)
op[ind, 3] = 0
if not in_array_list(symm_ops, op):
gen_ops.append(op)
symm_ops = np.append(symm_ops, [op], axis=0)
new_ops = gen_ops
assert len(symm_ops) == self.order
return symm_ops
@classmethod
def get_settings(cls, int_symbol):
"""
Returns all the settings for a particular international symbol.
Args:
int_symbol (str): Full International (e.g., "P2/m2/m2/m") or
Hermann-Mauguin Symbol ("Pmmm") or abbreviated symbol. The
notation is a LaTeX-like string, with screw axes being
represented by an underscore. For example, "P6_3/mmc".
"""
symbols = []
if int_symbol in SpaceGroup.abbrev_sg_mapping:
symbols.append(SpaceGroup.abbrev_sg_mapping[int_symbol])
int_number = SpaceGroup.sgencoding[int_symbol]["int_number"]
elif int_symbol in SpaceGroup.full_sg_mapping:
symbols.append(SpaceGroup.full_sg_mapping[int_symbol])
int_number = SpaceGroup.sgencoding[int_symbol]["int_number"]
else:
for spg in SpaceGroup.SYMM_OPS:
if int_symbol in [re.split(r"\(|:", spg["hermann_mauguin"])[0],
re.split(r"\(|:", spg["universal_h_m"])[0]]:
int_number = spg["number"]
break
for spg in SpaceGroup.SYMM_OPS:
if int_number == spg["number"]:
symbols.append(spg["hermann_mauguin"])
symbols.append(spg["universal_h_m"])
return set(symbols)
@property
def symmetry_ops(self):
"""
Full set of symmetry operations as matrices. Lazily initialized as
generation sometimes takes a bit of time.
"""
if self._symmetry_ops is None:
self._symmetry_ops = [
SymmOp(m) for m in self._generate_full_symmetry_ops()]
return self._symmetry_ops
def get_orbit(self, p, tol=1e-5):
"""
Returns the orbit for a point.
Args:
p: Point as a 3x1 array.
tol: Tolerance for determining if sites are the same. 1e-5 should
be sufficient for most purposes. Set to 0 for exact matching
(and also needed for symbolic orbits).
Returns:
([array]) Orbit for point.
"""
orbit = []
for o in self.symmetry_ops:
pp = o.operate(p)
pp = np.mod(np.round(pp, decimals=10), 1)
if not in_array_list(orbit, pp, tol=tol):
orbit.append(pp)
return orbit
def is_compatible(self, lattice, tol=1e-5, angle_tol=5):
"""
Checks whether a particular lattice is compatible with the
*conventional* unit cell.
Args:
lattice (Lattice): A Lattice.
tol (float): The tolerance to check for equality of lengths.
angle_tol (float): The tolerance to check for equality of angles
in degrees.
"""
abc, angles = lattice.lengths_and_angles
crys_system = self.crystal_system
def check(param, ref, tolerance):
return all([abs(i - j) < tolerance for i, j in zip(param, ref)
if j is not None])
if crys_system == "cubic":
a = abc[0]
return check(abc, [a, a, a], tol) and\
check(angles, [90, 90, 90], angle_tol)
elif crys_system == "hexagonal" or (
crys_system == "trigonal" and (
self.symbol.endswith("H") or
self.int_number in [143, 144, 145, 147, 149, 150, 151, 152,
153, 154, 156, 157, 158, 159, 162, 163,
164, 165])):
a = abc[0]
return check(abc, [a, a, None], tol)\
and check(angles, [90, 90, 120], angle_tol)
elif crys_system == "trigonal":
a = abc[0]
alpha = angles[0]
return check(abc, [a, a, a], tol) \
and check(angles, [alpha, alpha, alpha], angle_tol)
elif crys_system == "tetragonal":
a = abc[0]
return check(abc, [a, a, None], tol) and\
check(angles, [90, 90, 90], angle_tol)
elif crys_system == "orthorhombic":
return check(angles, [90, 90, 90], angle_tol)
elif crys_system == "monoclinic":
return check(angles, [90, None, 90], angle_tol)
return True
@property
def crystal_system(self):
i = self.int_number
if i <= 2:
return "triclinic"
elif i <= 15:
return "monoclinic"
elif i <= 74:
return "orthorhombic"
elif i <= 142:
return "tetragonal"
elif i <= 167:
return "trigonal"
elif i <= 194:
return "hexagonal"
else:
return "cubic"
def is_subgroup(self, supergroup):
"""
True if this space group is a subgroup of the supplied group.
Args:
group (Spacegroup): Supergroup to test.
Returns:
True if this space group is a subgroup of the supplied group.
"""
if len(supergroup.symmetry_ops) < len(self.symmetry_ops):
return False
groups = [[supergroup.int_number]]
all_groups = [supergroup.int_number]
max_subgroups = {int(k): v
for k, v in get_symm_data("maximal_subgroups").items()}
while True:
new_sub_groups = set()
for i in groups[-1]:
new_sub_groups.update([j for j in max_subgroups[i] if j
not in all_groups])
if self.int_number in new_sub_groups:
return True
elif len(new_sub_groups) == 0:
break
else:
groups.append(new_sub_groups)
all_groups.extend(new_sub_groups)
return False
def is_supergroup(self, subgroup):
"""
True if this space group is a supergroup of the supplied group.
Args:
subgroup (Spacegroup): Subgroup to test.
Returns:
True if this space group is a supergroup of the supplied group.
"""
return subgroup.is_subgroup(self)
@classmethod
def from_int_number(cls, int_number, hexagonal=True):
"""
Obtains a SpaceGroup from its international number.
Args:
int_number (int): International number.
hexagonal (bool): For rhombohedral groups, whether to return the
hexagonal setting (default) or rhombohedral setting.
Returns:
(SpaceGroup)
"""
return SpaceGroup(sg_symbol_from_int_number(int_number,
hexagonal=hexagonal))
def __str__(self):
return "Spacegroup %s with international number %d and order %d" % (
self.symbol, self.int_number, len(self.symmetry_ops))
def sg_symbol_from_int_number(int_number, hexagonal=True):
"""
Obtains a SpaceGroup name from its international number.
Args:
int_number (int): International number.
hexagonal (bool): For rhombohedral groups, whether to return the
hexagonal setting (default) or rhombohedral setting.
Returns:
(str) Spacegroup symbol
"""
syms = []
for n, v in get_symm_data("space_group_encoding").items():
if v["int_number"] == int_number:
syms.append(n)
if len(syms) == 0:
raise ValueError("Invalid international number!")
if len(syms) == 2:
if hexagonal:
syms = list(filter(lambda s: s.endswith("H"), syms))
else:
syms = list(filter(lambda s: not s.endswith("H"), syms))
return syms.pop()
def in_array_list(array_list, a, tol=1e-5):
"""
Extremely efficient nd-array comparison using numpy's broadcasting. This
function checks if a particular array a, is present in a list of arrays.
It works for arrays of any size, e.g., even matrix searches.
Args:
array_list ([array]): A list of arrays to compare to.
a (array): The test array for comparison.
tol (float): The tolerance. Defaults to 1e-5. If 0, an exact match is
done.
Returns:
(bool)
"""
if len(array_list) == 0:
return False
axes = tuple(range(1, a.ndim + 1))
if not tol:
return np.any(np.all(np.equal(array_list, a[None, :]), axes))
else:
return np.any(np.sum(np.abs(array_list - a[None, :]), axes) < tol)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the datasets shape inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.grappler import item
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class GrapplerTest(test.TestCase):
def testFromTensors(self):
test_cases = [{
'tensor': 0,
'shape': tensor_shape.TensorShape([])
}, {
'tensor': np.array([1, 2, 3]),
'shape': tensor_shape.TensorShape([3])
}, {
'tensor': np.array([[1, 2, 3]]),
'shape': tensor_shape.TensorShape([1, 3])
}]
for test_case in test_cases:
with ops.Graph().as_default() as g:
dataset = dataset_ops.Dataset.from_tensors(test_case['tensor'])
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(get_next)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
op_properties = grappler_item.GetOpProperties()
self.assertEqual(test_case['shape'],
op_properties['IteratorGetNext'][0].shape)
def testFromTensorSlices(self):
test_cases = [{
'tensor': np.array([1, 2, 3]),
'shape': tensor_shape.TensorShape([])
}, {
'tensor': np.array([[1, 2, 3]]),
'shape': tensor_shape.TensorShape([3])
}, {
'tensor': np.array([[[1, 2, 3]]]),
'shape': tensor_shape.TensorShape([1, 3])
}]
for test_case in test_cases:
with ops.Graph().as_default() as g:
dataset = dataset_ops.Dataset.from_tensor_slices(test_case['tensor'])
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(get_next)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
op_properties = grappler_item.GetOpProperties()
self.assertEqual(test_case['shape'],
op_properties['IteratorGetNext'][0].shape)
def testFromGenerator(self):
test_cases = [{
'tensor': 0,
'shape': tensor_shape.TensorShape([])
}, {
'tensor': np.array([1, 2, 3]),
'shape': tensor_shape.TensorShape([3])
}, {
'tensor': np.array([[1, 2, 3]]),
'shape': tensor_shape.TensorShape([1, 3])
}]
for test_case in test_cases:
def make_generator(tensor):
def generator():
yield tensor
return generator
with ops.Graph().as_default() as g:
dataset = dataset_ops.Dataset.from_generator(
make_generator(test_case['tensor']),
dtypes.int64,
output_shapes=test_case['shape'])
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(get_next)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
op_properties = grappler_item.GetOpProperties()
self.assertEqual(test_case['shape'],
op_properties['IteratorGetNext'][0].shape)
def testRange(self):
with ops.Graph().as_default() as g:
dataset = dataset_ops.Dataset.range(42)
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(get_next)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
op_properties = grappler_item.GetOpProperties()
self.assertEqual(
tensor_shape.TensorShape([]),
op_properties['IteratorGetNext'][0].shape)
def _testTransformation(self, fn):
test_cases = [{
'tensor': 0,
'shape': tensor_shape.TensorShape({})
}, {
'tensor': np.array([1, 2, 3]),
'shape': tensor_shape.TensorShape([3])
}, {
'tensor': np.array([[1, 2, 3]]),
'shape': tensor_shape.TensorShape([1, 3])
}]
for test_case in test_cases:
with ops.Graph().as_default() as g:
dataset = dataset_ops.Dataset.from_tensors(test_case['tensor'])
dataset = fn(dataset, test_case['tensor'], test_case['shape'])
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(get_next)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
op_properties = grappler_item.GetOpProperties()
self.assertEqual(test_case['shape'],
op_properties['IteratorGetNext'][0].shape)
def testConcatenate(self):
def fn(dataset, tensor, shape):
del shape
return dataset.concatenate(dataset_ops.Dataset.from_tensors(tensor))
self._testTransformation(fn)
def testPrefetch(self):
def fn(dataset, tensor, shape):
del tensor, shape
return dataset.prefetch(42)
self._testTransformation(fn)
def testRepeat(self):
def fn(dataset, tensor, shape):
del tensor, shape
return dataset.repeat(42)
self._testTransformation(fn)
def testShuffle(self):
def fn(dataset, tensor, shape):
del tensor, shape
return dataset.shuffle(42)
self._testTransformation(fn)
def testCache(self):
def fn(dataset, tensor, shape):
del tensor, shape
return dataset.cache()
self._testTransformation(fn)
def testTake(self):
def fn(dataset, tensor, shape):
del tensor, shape
return dataset.take(42)
self._testTransformation(fn)
def testSkip(self):
def fn(dataset, tensor, shape):
del tensor, shape
return dataset.skip(42)
self._testTransformation(fn)
def testShard(self):
def fn(dataset, tensor, shape):
del tensor, shape
return dataset.shard(42, 0)
self._testTransformation(fn)
def testFilter(self):
def fn(dataset, tensor, shape):
del tensor, shape
return dataset.filter(lambda x: True)
self._testTransformation(fn)
def as_tensor_shape(self, proto_with_symbolic_values):
for i in range(len(proto_with_symbolic_values.dim)):
if proto_with_symbolic_values.dim[i].size < -1:
proto_with_symbolic_values.dim[i].size = -1
return tensor_shape.TensorShape(proto_with_symbolic_values)
def testBatch(self):
test_cases = [{
'tensor': 0,
'shape': tensor_shape.TensorShape([None])
}, {
'tensor': np.array([1, 2, 3]),
'shape': tensor_shape.TensorShape([None, 3])
}, {
'tensor': np.array([[1, 2, 3]]),
'shape': tensor_shape.TensorShape([None, 1, 3])
}]
for test_case in test_cases:
with ops.Graph().as_default() as g:
dataset = dataset_ops.Dataset.from_tensors(test_case['tensor'])
dataset = dataset.batch(42)
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(get_next)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
op_properties = grappler_item.GetOpProperties()
inferred_shape = self.as_tensor_shape(
op_properties['IteratorGetNext'][0].shape)
self.assertTrue(test_case['shape'].dims[0].is_compatible_with(
inferred_shape[0]))
self.assertEqual(test_case['shape'][1:], inferred_shape[1:])
def testPaddedBatch(self):
test_cases = [{
'tensor': 0,
'shape': tensor_shape.TensorShape([None])
}, {
'tensor': np.array([1, 2, 3]),
'shape': tensor_shape.TensorShape([None, 4])
}, {
'tensor': np.array([[1, 2, 3]]),
'shape': tensor_shape.TensorShape([None, 2, 4])
}]
for test_case in test_cases:
with ops.Graph().as_default() as g:
dataset = dataset_ops.Dataset.from_tensors(test_case['tensor'])
dataset = dataset.padded_batch(42, padded_shapes=test_case['shape'][1:])
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(get_next)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
op_properties = grappler_item.GetOpProperties()
inferred_shape = self.as_tensor_shape(
op_properties['IteratorGetNext'][0].shape)
self.assertTrue(test_case['shape'].dims[0].is_compatible_with(
inferred_shape[0]))
self.assertEqual(test_case['shape'][1:], inferred_shape[1:])
def testFlatMap(self):
test_cases = [{
'tensor': 0,
'shape': tensor_shape.TensorShape([])
}, {
'tensor': np.array([1, 2, 3]),
'shape': tensor_shape.TensorShape([3])
}, {
'tensor': np.array([[1, 2, 3]]),
'shape': tensor_shape.TensorShape([1, 3])
}]
for test_case in test_cases:
with ops.Graph().as_default() as g:
dataset = dataset_ops.Dataset.range(42)
def make_dataset(tensor):
def dataset_fn(n):
return dataset_ops.Dataset.from_tensors(tensor).repeat(n)
return dataset_fn
dataset = dataset.flat_map(make_dataset(test_case['tensor']))
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(get_next)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
op_properties = grappler_item.GetOpProperties()
self.assertEqual(test_case['shape'],
op_properties['IteratorGetNext'][0].shape)
def testInterleave(self):
test_cases = [{
'tensor': 0,
'shape': tensor_shape.TensorShape([])
}, {
'tensor': np.array([1, 2, 3]),
'shape': tensor_shape.TensorShape([3])
}, {
'tensor': np.array([[1, 2, 3]]),
'shape': tensor_shape.TensorShape([1, 3])
}]
for test_case in test_cases:
with ops.Graph().as_default() as g:
dataset = dataset_ops.Dataset.range(42)
def make_dataset(tensor):
def dataset_fn(n):
return dataset_ops.Dataset.from_tensors(tensor).repeat(n)
return dataset_fn
dataset = dataset.interleave(
make_dataset(test_case['tensor']), cycle_length=42)
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(get_next)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
op_properties = grappler_item.GetOpProperties()
self.assertEqual(test_case['shape'],
op_properties['IteratorGetNext'][0].shape)
def testMap(self):
test_cases = [{
'tensor': 0,
'shape': tensor_shape.TensorShape([])
}, {
'tensor': np.array([1, 2, 3]),
'shape': tensor_shape.TensorShape([3])
}, {
'tensor': np.array([[1, 2, 3]]),
'shape': tensor_shape.TensorShape([3, 1])
}, {
'tensor': np.array([[[1, 2, 3], [4, 5, 6]]]),
'shape': tensor_shape.TensorShape([3, 2, 1])
}]
for test_case in test_cases:
with ops.Graph().as_default() as g:
dataset = dataset_ops.Dataset.from_tensors(test_case['tensor'])
dataset = dataset.map(array_ops.transpose)
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(get_next)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
op_properties = grappler_item.GetOpProperties()
self.assertEqual(test_case['shape'],
op_properties['IteratorGetNext'][0].shape)
def testFromStructure(self):
test_cases = [{
'shape': tensor_shape.TensorShape([])
}, {
'shape': tensor_shape.TensorShape([3])
}, {
'shape': tensor_shape.TensorShape([1, 2])
}, {
'shape': tensor_shape.TensorShape([1, 2, 3])
}]
for test_case in test_cases:
with ops.Graph().as_default() as g:
iterator = iterator_ops.Iterator.from_structure(
dtypes.int64, output_shapes=test_case['shape'])
get_next = iterator.get_next()
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(get_next)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
op_properties = grappler_item.GetOpProperties()
self.assertEqual(test_case['shape'],
op_properties['IteratorGetNext'][0].shape)
def testFromStringHandle(self):
test_cases = [{
'shape': tensor_shape.TensorShape([])
}, {
'shape': tensor_shape.TensorShape([3])
}, {
'shape': tensor_shape.TensorShape([1, 2])
}, {
'shape': tensor_shape.TensorShape([1, 2, 3])
}]
for test_case in test_cases:
with ops.Graph().as_default() as g:
iterator = iterator_ops.Iterator.from_structure(dtypes.int64)
handle = iterator.string_handle()
iterator = iterator_ops.Iterator.from_string_handle(
handle, dtypes.int64, output_shapes=test_case['shape'])
get_next = iterator.get_next()
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(get_next)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
op_properties = grappler_item.GetOpProperties()
self.assertEqual(test_case['shape'],
op_properties['IteratorGetNext'][0].shape)
if __name__ == '__main__':
test.main()
|
|
# Jaikrishna
# Karan Nayan
# John Cole
# Initial Date: June 24, 2013
# Last Updated: Nov 4, 2013
# http://www.dexterindustries.com/
#
# These files have been made available online through a Creative Commons Attribution-ShareAlike 3.0 license.
# (http://creativecommons.org/licenses/by-sa/3.0/)
#
# Ported from Matthew Richardson's BrickPi library for C
# This library can be used in RaspberryPi to communicate with BrickPi
# Major Changes from C code:
# - The timeout parameter for BrickPiRx is in seconds expressed as a floating value
# - Instead of Call by Reference in BrickPiRx, multiple values are returned and then copied to the main Array appropriately
# - BrickPiStruct Variables are assigned to None and then modified to avoid appending which may lead to errors
##################################################################################################################
# Debugging:
# - NOTE THAT DEBUGGING ERROR MESSAGES ARE TURNED OFF BY DEFAULT. To debug, just take the comment out of Line 29.
#
# If you #define DEBUG in the program, the BrickPi.h drivers will print debug messages to the terminal. One common message is
# "BrickPiRx error: -2", in function BrickPiUpdateValues(). This is caused by an error in the communication with one of the
# microcontrollers on the BrickPi. When this happens, the drivers automatically re-try the communication several times before the
# function gives up and returns -1 (unsuccessful) to the user-program.
# Function BrickPiUpdateValues() will either return 0 (success), or -1 (error that could not automatically be resolved, even after
# re-trying several times). We have rarely had BrickPiUpdateValues() retry more than once before the communication was successful.
# A known cause for "BrickPiRx error: -2" is the RPi splitting the UART message. Sometimes the RPi will send e.g. 3 bytes, wait a
# while, and then send 4 more, when it should have just sent 7 at once. During the pause between the packs of bytes, the BrickPi
# microcontrollers will think the transmission is complete, realize the message doesn't make sense, throw it away, and not return
# a message to the RPi. The RPi will then fail to receive a message in the specified amount of time, timeout, and then retry the
# communication.
# If a function returns 0, it completed successfully. If it returns -1, there was an error (most likely a communications error).
# Function BrickPiRx() (background function that receives UART messages from the BrickPi) can return 0 (success), -1 (undefined error that shouldn't have happened, e.g. a filesystem error), -2 (timeout: the RPi didn't receive any UART communication from the BrickPi within the specified time), -4 (the message was too short to even contain a valid header), -5 (communication checksum error), or -6 (the number of bytes received was less than specified by the length byte).
import time
import serial
ser = serial.Serial()
ser.port='/dev/ttyAMA0'
ser.baudrate = 500000
# ser.writeTimeout = 0.0005
# ser.timeout = 0.0001
#DEBUG = 1 # Remove to hide errors
#Jan's US fix##########
print "Welcome to BrickPi - with Jan's US sensor fix"
# The I2C speed (see below) for the ultrasound is hard
# coded to 7 in the firmware of the BrickPi. Unfortunately
# this speed is not very robust and sometimes causes the
# most significant bit to become corrupted. This leads to
# values being wrong by +-128.
# This modification to BrickPi.py fixes the problem
# without changing any program source code by mapping
# TYPE_SENSOR_ULTRASONIC_CONT to TYPE_SENSOR_I2C and
# setting it up manually.
# For more info see the BrickPi forum:
# http://www.dexterindustries.com/forum/?topic=problem-ultrasonic-sensor/#post-1273
# If you still have problems try tweaking the value below
US_I2C_SPEED = 10 #tweak this value
US_I2C_IDX = 0
LEGO_US_I2C_ADDR = 0x02
LEGO_US_I2C_DATA_REG = 0x42
#######################
PORT_A = 0
PORT_B = 1
PORT_C = 2
PORT_D = 3
PORT_1 = 0
PORT_2 = 1
PORT_3 = 2
PORT_4 = 3
MASK_D0_M = 0x01
MASK_D1_M = 0x02
MASK_9V = 0x04
MASK_D0_S = 0x08
MASK_D1_S = 0x10
BYTE_MSG_TYPE = 0 # MSG_TYPE is the first byte.
MSG_TYPE_CHANGE_ADDR = 1 # Change the UART address.
MSG_TYPE_SENSOR_TYPE = 2 # Change/set the sensor type.
MSG_TYPE_VALUES = 3 # Set the motor speed and direction, and return the sesnors and encoders.
MSG_TYPE_E_STOP = 4 # Float motors immidately
MSG_TYPE_TIMEOUT_SETTINGS = 5 # Set the timeout
# New UART address (MSG_TYPE_CHANGE_ADDR)
BYTE_NEW_ADDRESS = 1
# Sensor setup (MSG_TYPE_SENSOR_TYPE)
BYTE_SENSOR_1_TYPE = 1
BYTE_SENSOR_2_TYPE = 2
BYTE_TIMEOUT=1
TYPE_MOTOR_PWM = 0
TYPE_MOTOR_SPEED = 1
TYPE_MOTOR_POSITION = 2
TYPE_SENSOR_RAW = 0 # - 31
TYPE_SENSOR_LIGHT_OFF = 0
TYPE_SENSOR_LIGHT_ON = (MASK_D0_M | MASK_D0_S)
TYPE_SENSOR_TOUCH = 32
TYPE_SENSOR_ULTRASONIC_CONT = 33
TYPE_SENSOR_ULTRASONIC_SS = 34
TYPE_SENSOR_RCX_LIGHT = 35 # tested minimally
TYPE_SENSOR_COLOR_FULL = 36
TYPE_SENSOR_COLOR_RED = 37
TYPE_SENSOR_COLOR_GREEN = 38
TYPE_SENSOR_COLOR_BLUE = 39
TYPE_SENSOR_COLOR_NONE = 40
TYPE_SENSOR_I2C = 41
TYPE_SENSOR_I2C_9V = 42
BIT_I2C_MID = 0x01 # Do one of those funny clock pulses between writing and reading. defined for each device.
BIT_I2C_SAME = 0x02 # The transmit data, and the number of bytes to read and write isn't going to change. defined for each device.
INDEX_RED = 0
INDEX_GREEN = 1
INDEX_BLUE = 2
INDEX_BLANK = 3
Array = [0] * 256
BytesReceived = None
Bit_Offset = 0
Retried = 0
class BrickPiStruct:
Address = [ 1, 2 ]
MotorSpeed = [0] * 4
MotorEnable = [0] * 4
EncoderOffset = [None] * 4
Encoder = [None] * 4
Sensor = [None] * 4
SensorArray = [ [None] * 4 for i in range(4) ]
SensorType = [0] * 4
SensorSettings = [ [None] * 8 for i in range(4) ]
SensorI2CDevices = [None] * 4
SensorI2CSpeed = [None] * 4
SensorI2CAddr = [ [None] * 8 for i in range(4) ]
SensorI2CWrite = [ [None] * 8 for i in range(4) ]
SensorI2CRead = [ [None] * 8 for i in range(4) ]
SensorI2COut = [ [ [None] * 16 for i in range(8) ] for i in range(4) ]
SensorI2CIn = [ [ [None] * 16 for i in range(8) ] for i in range(4) ]
Timeout = 0
BrickPi = BrickPiStruct()
#PSP Mindsensors class
class button:
#Initialize all the buttons to 0
def init(self):
self.l1=0
self.l2=0
self.r1=0
self.r2=0
self.a=0
self.b=0
self.c=0
self.d=0
self.tri=0
self.sqr=0
self.cir=0
self.cro=0
self.ljb=0
self.ljx=0
self.ljy=0
self.rjx=0
rjy=0
#Update all the buttons
def upd(self,I2C_PORT):
#For all buttons:
#0: Unpressed
#1: Pressed
#
#Left and right joystick: -127 to 127
self.ljb=~(BrickPi.SensorI2CIn[I2C_PORT][0][0]>>1)&1
self.rjb=~(BrickPi.SensorI2CIn[I2C_PORT][0][0]>>2)&1
#For buttons a,b,c,d
self.d=~(BrickPi.SensorI2CIn[I2C_PORT][0][0]>>4)&1
self.c=~(BrickPi.SensorI2CIn[I2C_PORT][0][0]>>5)&1
self.b=~(BrickPi.SensorI2CIn[I2C_PORT][0][0]>>6)&1
self.a=~(BrickPi.SensorI2CIn[I2C_PORT][0][0]>>7)&1
#For buttons l1,l2,r1,r2
self.l2=~(BrickPi.SensorI2CIn[I2C_PORT][0][1])&1
self.r2=~(BrickPi.SensorI2CIn[I2C_PORT][0][1]>>1)&1
self.l1=~(BrickPi.SensorI2CIn[I2C_PORT][0][1]>>2)&1
self.r1=~(BrickPi.SensorI2CIn[I2C_PORT][0][1]>>3)&1
#For buttons square,triangle,cross,circle
self.tri=~(BrickPi.SensorI2CIn[I2C_PORT][0][1]>>4)&1
self.cir=~(BrickPi.SensorI2CIn[I2C_PORT][0][1]>>5)&1
self.cro=~(BrickPi.SensorI2CIn[I2C_PORT][0][1]>>6)&1
self.sqr=~(BrickPi.SensorI2CIn[I2C_PORT][0][1]>>7)&1
#Left joystick x and y , -127 to 127
self.ljx=BrickPi.SensorI2CIn[I2C_PORT][0][2]-128
self.ljy=~BrickPi.SensorI2CIn[I2C_PORT][0][3]+129
#Right joystick x and y , -127 to 127
self.rjx=BrickPi.SensorI2CIn[I2C_PORT][0][4]-128
self.rjy=~BrickPi.SensorI2CIn[I2C_PORT][0][5]+129
#Show button values
def show_val(self):
print "ljb","rjb","d","c","b","a","l2","r2","l1","r1","tri","cir","cro","sqr","ljx","ljy","rjx","rjy"
print self.ljb," ",self.rjb," ",self.d,self.c,self.b,self.a,self.l2,"",self.r2,"",self.l1,"",self.r1,"",self.tri," ",self.cir," ",self.cro," ",self.sqr," ",self.ljx," ",self.ljy," ",self.rjx," ",self.rjy
print ""
def BrickPiChangeAddress(OldAddr, NewAddr):
Array[BYTE_MSG_TYPE] = MSG_TYPE_CHANGE_ADDR;
Array[BYTE_NEW_ADDRESS] = NewAddr;
BrickPiTx(OldAddr, 2, Array)
res, BytesReceived, InArray = BrickPiRx(0.005000)
if res :
return -1
for i in range(len(InArray)):
Array[i] = InArray[i]
if not (BytesReceived == 1 and Array[BYTE_MSG_TYPE] == MSG_TYPE_CHANGE_ADDR):
return -1
return 0
def BrickPiSetTimeout():
for i in range(2):
Array[BYTE_MSG_TYPE] = MSG_TYPE_TIMEOUT_SETTINGS
Array[BYTE_TIMEOUT] = BrickPi.Timeout&0xFF
Array[BYTE_TIMEOUT + 1] = (BrickPi.Timeout / 256 ) & 0xFF
Array[BYTE_TIMEOUT + 2] = (BrickPi.Timeout / 65536 ) & 0xFF
Array[BYTE_TIMEOUT + 3] = (BrickPi.Timeout / 16777216) & 0xFF
BrickPiTx(BrickPi.Address[i], 5, Array)
res, BytesReceived, InArray = BrickPiRx(0.002500)
if res :
return -1
for j in range(len(InArray)):
Array[j] = InArray[j]
if not (BytesReceived == 1 and Array[BYTE_MSG_TYPE] == MSG_TYPE_TIMEOUT_SETTINGS):
return -1
i+=1
return 0
def motorRotateDegree(power,deg,port,sampling_time=.1):
"""Rotate the selected motors by specified degre
Args:
power : an array of the power values at which to rotate the motors (0-255)
deg : an array of the angle's (in degrees) by which to rotate each of the motor
port : an array of the port's on which the motor is connected
sampling_time : (optional) the rate(in seconds) at which to read the data in the encoders
Returns:
0 on success
Usage:
Pass the arguments in a list. if a single motor has to be controlled then the arguments should be
passed like elements of an array,e.g, motorRotateDegree([255],[360],[PORT_A]) or
motorRotateDegree([255,255],[360,360],[PORT_A,PORT_B])
"""
num_motor=len(power) #Number of motors being used
init_val=[0]*num_motor
final_val=[0]*num_motor
BrickPiUpdateValues()
for i in range(num_motor):
BrickPi.MotorEnable[port[i]] = 1 #Enable the Motors
power[i]=abs(power[i])
BrickPi.MotorSpeed[port[i]] = power[i] if deg[i]>0 else -power[i] #For running clockwise and anticlockwise
init_val[i]=BrickPi.Encoder[port[i]] #Initial reading of the encoder
final_val[i]=init_val[i]+(deg[i]*2) #Final value when the motor has to be stopped;One encoder value counts for 0.5 degrees
run_stat=[0]*num_motor
while True:
result = BrickPiUpdateValues() #Ask BrickPi to update values for sensors/motors
if not result :
for i in range(num_motor): #Do for each of the motors
if run_stat[i]==1:
continue
if(deg[i]>0 and final_val[i]>init_val[i]) or (deg[i]<0 and final_val[i]<init_val[i]) : #Check if final value reached for each of the motors
init_val[i]=BrickPi.Encoder[port[i]] #Read the encoder degrees
else:
run_stat[i]=1
BrickPi.MotorSpeed[port[i]]=-power[i] if deg[i]>0 else power[i]
# Group40 fix right motor needs to reverse more
'''if i == 1:
BrickPi.MotorSpeed[port[i]]=int(-power[i]*1.8) if deg[i]>0 else power[i]
print "port ==: " + str(port[i])
print "i ==: " + str(i)
else:
BrickPi.MotorSpeed[port[i]]=-power[i] if deg[i]>0 else power[i] #Run the motors in reverse direction to stop instantly
print "port ==: " + str(port[i])
print "i ==: " + str(i)'''
BrickPiUpdateValues()
time.sleep(.04)
BrickPi.MotorEnable[port[i]] = 0
BrickPiUpdateValues()
time.sleep(sampling_time) #sleep for the sampling time given (default:100 ms)
if(all(e==1 for e in run_stat)): #If all the motors have already completed their rotation, then stop
break
return 0
def GetBits( byte_offset, bit_offset, bits):
global Bit_Offset
result = 0
i = bits
while i:
result *= 2
result |= ((Array[(byte_offset + ((bit_offset + Bit_Offset + (i-1)) / 8))] >> ((bit_offset + Bit_Offset + (i-1)) % 8)) & 0x01)
i -= 1
Bit_Offset += bits
return result
def BitsNeeded(value):
for i in range(32):
if not value:
return i
value /= 2
return 31
def AddBits(byte_offset, bit_offset, bits, value):
global Bit_Offset
for i in range(bits):
if(value & 0x01):
Array[(byte_offset + ((bit_offset + Bit_Offset + i)/ 8))] |= (0x01 << ((bit_offset + Bit_Offset + i) % 8));
value /=2
Bit_Offset += bits
def BrickPiSetupSensors():
global Array
global Bit_Offset
global BytesReceived
for i in range(2):
Array = [0] * 256
Bit_Offset = 0
Array[BYTE_MSG_TYPE] = MSG_TYPE_SENSOR_TYPE
Array[BYTE_SENSOR_1_TYPE] = BrickPi.SensorType[PORT_1 + i*2 ]
Array[BYTE_SENSOR_2_TYPE] = BrickPi.SensorType[PORT_2 + i*2 ]
for ii in range(2):
port = i*2 + ii
#Jan's US fix###########
if(Array[BYTE_SENSOR_1_TYPE + ii] == TYPE_SENSOR_ULTRASONIC_CONT):
Array[BYTE_SENSOR_1_TYPE + ii] = TYPE_SENSOR_I2C
BrickPi.SensorI2CSpeed[port] = US_I2C_SPEED
BrickPi.SensorI2CDevices[port] = 1
BrickPi.SensorSettings[port][US_I2C_IDX] = BIT_I2C_MID | BIT_I2C_SAME
BrickPi.SensorI2CAddr[port][US_I2C_IDX] = LEGO_US_I2C_ADDR
BrickPi.SensorI2CWrite [port][US_I2C_IDX] = 1
BrickPi.SensorI2CRead [port][US_I2C_IDX] = 1
BrickPi.SensorI2COut [port][US_I2C_IDX][0] = LEGO_US_I2C_DATA_REG
########################
if(Array[BYTE_SENSOR_1_TYPE + ii] == TYPE_SENSOR_I2C or Array[BYTE_SENSOR_1_TYPE + ii] == TYPE_SENSOR_I2C_9V ):
AddBits(3,0,8,BrickPi.SensorI2CSpeed[port])
if(BrickPi.SensorI2CDevices[port] > 8):
BrickPi.SensorI2CDevices[port] = 8
if(BrickPi.SensorI2CDevices[port] == 0):
BrickPi.SensorI2CDevices[port] = 1
AddBits(3,0,3, (BrickPi.SensorI2CDevices[port] - 1))
for device in range(BrickPi.SensorI2CDevices[port]):
AddBits(3,0,7, (BrickPi.SensorI2CAddr[port][device] >> 1))
AddBits(3,0,2, BrickPi.SensorSettings[port][device])
if(BrickPi.SensorSettings[port][device] & BIT_I2C_SAME):
AddBits(3,0,4, BrickPi.SensorI2CWrite[port][device])
AddBits(3,0,4, BrickPi.SensorI2CRead[port][device])
for out_byte in range(BrickPi.SensorI2CWrite[port][device]):
AddBits(3,0,8, BrickPi.SensorI2COut[port][device][out_byte])
tx_bytes = (((Bit_Offset + 7) / 8) + 3) #eq to UART_TX_BYTES
BrickPiTx(BrickPi.Address[i], tx_bytes , Array)
res, BytesReceived, InArray = BrickPiRx(0.500000)
if res :
return -1
for i in range(len(InArray)):
Array[i]=InArray[i]
if not (BytesReceived ==1 and Array[BYTE_MSG_TYPE] == MSG_TYPE_SENSOR_TYPE) :
return -1
return 0
def BrickPiUpdateValues():
global Array
global Bit_Offset
global Retried
ret = False
i = 0
while i < 2 :
if not ret:
Retried = 0
#Retry Communication from here, if failed
Array = [0] * 256
Array[BYTE_MSG_TYPE] = MSG_TYPE_VALUES
Bit_Offset = 0
for ii in range(2):
port = (i * 2) + ii
if(BrickPi.EncoderOffset[port]):
Temp_Value = BrickPi.EncoderOffset[port]
AddBits(1,0,1,1)
if Temp_Value < 0 :
Temp_ENC_DIR = 1
Temp_Value *= -1
Temp_BitsNeeded = BitsNeeded(Temp_Value) + 1
AddBits(1,0,5, Temp_BitsNeeded)
Temp_Value *= 2
Temp_Value |= Temp_ENC_DIR
AddBits(1,0, Temp_BitsNeeded, Temp_Value)
else:
AddBits(1,0,1,0)
for ii in range(2):
port = (i *2) + ii
speed = BrickPi.MotorSpeed[port]
direc = 0
if speed<0 :
direc = 1
speed *= -1
if speed>255:
speed = 255
AddBits(1,0,10,((((speed & 0xFF) << 2) | (direc << 1) | (BrickPi.MotorEnable[port] & 0x01)) & 0x3FF))
for ii in range(2):
port = (i * 2) + ii
#Jan's US Fix##########
#old# if(BrickPi.SensorType[port] == TYPE_SENSOR_I2C or BrickPi.SensorType[port] == TYPE_SENSOR_I2C_9V):
if(BrickPi.SensorType[port] == TYPE_SENSOR_I2C or BrickPi.SensorType[port] == TYPE_SENSOR_I2C_9V or BrickPi.SensorType[port] == TYPE_SENSOR_ULTRASONIC_CONT):
#######################
for device in range(BrickPi.SensorI2CDevices[port]):
if not (BrickPi.SensorSettings[port][device] & BIT_I2C_SAME):
AddBits(1,0,4, BrickPi.SensorI2CWrite[port][device])
AddBits(1,0,4, BrickPi.SensorI2CRead[port][device])
for out_byte in range(BrickPi.SensorI2CWrite[port][device]):
AddBits(1,0,8, BrickPi.SensorI2COut[port][device][out_byte])
device += 1
tx_bytes = (((Bit_Offset + 7) / 8 ) + 1) #eq to UART_TX_BYTES
BrickPiTx(BrickPi.Address[i], tx_bytes, Array)
result, BytesReceived, InArray = BrickPiRx(0.007500) #check timeout
for j in range(len(InArray)):
Array[j]=InArray[j]
if result != -2 :
BrickPi.EncoderOffset[(i * 2) + PORT_A] = 0
BrickPi.EncoderOffset[(i * 2) + PORT_B] = 0
if (result or (Array[BYTE_MSG_TYPE] != MSG_TYPE_VALUES)):
if 'DEBUG' in globals():
if DEBUG == 1:
print "BrickPiRx Error :", result
if Retried < 2 :
ret = True
Retried += 1
#print "Retry", Retried
continue
else:
if 'DEBUG' in globals():
if DEBUG == 1:
print "Retry Failed"
return -1
ret = False
Bit_Offset = 0
Temp_BitsUsed = []
Temp_BitsUsed.append(GetBits(1,0,5))
Temp_BitsUsed.append(GetBits(1,0,5))
for ii in range(2):
Temp_EncoderVal = GetBits(1,0, Temp_BitsUsed[ii])
if Temp_EncoderVal & 0x01 :
Temp_EncoderVal /= 2
BrickPi.Encoder[ii + i*2] = Temp_EncoderVal*(-1)
else:
BrickPi.Encoder[ii + i*2] = Temp_EncoderVal / 2
for ii in range(2):
port = ii + (i * 2)
if BrickPi.SensorType[port] == TYPE_SENSOR_TOUCH :
BrickPi.Sensor[port] = GetBits(1,0,1)
#Jan's US fix##########
#old# elif BrickPi.SensorType[port] == TYPE_SENSOR_ULTRASONIC_CONT or BrickPi.SensorType[port] == TYPE_SENSOR_ULTRASONIC_SS :
elif BrickPi.SensorType[port] == TYPE_SENSOR_ULTRASONIC_SS :
#######################
BrickPi.Sensor[port] = GetBits(1,0,8)
elif BrickPi.SensorType[port] == TYPE_SENSOR_COLOR_FULL:
BrickPi.Sensor[port] = GetBits(1,0,3)
BrickPi.SensorArray[port][INDEX_BLANK] = GetBits(1,0,10)
BrickPi.SensorArray[port][INDEX_RED] = GetBits(1,0,10)
BrickPi.SensorArray[port][INDEX_GREEN] = GetBits(1,0,10)
BrickPi.SensorArray[port][INDEX_BLUE] = GetBits(1,0,10)
#Jan's US fix##########
#old# elif BrickPi.SensorType[port] == TYPE_SENSOR_I2C or BrickPi.SensorType[port] == TYPE_SENSOR_I2C_9V :
elif BrickPi.SensorType[port] == TYPE_SENSOR_I2C or BrickPi.SensorType[port] == TYPE_SENSOR_I2C_9V or BrickPi.SensorType[port] == TYPE_SENSOR_ULTRASONIC_CONT:
#######################
BrickPi.Sensor[port] = GetBits(1,0, BrickPi.SensorI2CDevices[port])
for device in range(BrickPi.SensorI2CDevices[port]):
if (BrickPi.Sensor[port] & ( 0x01 << device)) :
for in_byte in range(BrickPi.SensorI2CRead[port][device]):
BrickPi.SensorI2CIn[port][device][in_byte] = GetBits(1,0,8)
#Jan's US fix##########
if BrickPi.SensorType[port] == TYPE_SENSOR_ULTRASONIC_CONT :
if(BrickPi.Sensor[port] & ( 0x01 << US_I2C_IDX)) :
BrickPi.Sensor[port] = BrickPi.SensorI2CIn[port][US_I2C_IDX][0]
else:
BrickPi.Sensor[port] = -1
#######################
else: #For all the light, color and raw sensors
BrickPi.Sensor[ii + (i * 2)] = GetBits(1,0,10)
i += 1
return 0
def BrickPiSetup():
if ser.isOpen():
return -1
ser.open()
if not ser.isOpen():
return -1
return 0
def BrickPiTx(dest, ByteCount, OutArray):
tx_buffer = ''
tx_buffer+=chr(dest)
tx_buffer+=chr((dest+ByteCount+sum(OutArray[:ByteCount]))%256)
tx_buffer+=chr(ByteCount)
for i in OutArray[:ByteCount]:
tx_buffer+=chr(i)
ser.write(tx_buffer)
def BrickPiRx(timeout):
rx_buffer = ''
ser.timeout=0
ot = time.time()
while( ser.inWaiting() <= 0):
if time.time() - ot >= timeout :
return -2, 0 , []
if not ser.isOpen():
return -1, 0 , []
try:
while ser.inWaiting():
rx_buffer += ( ser.read(ser.inWaiting()) )
#time.sleep(.000075)
except:
return -1, 0 , []
RxBytes=len(rx_buffer)
if RxBytes < 2 :
return -4, 0 , []
if RxBytes < ord(rx_buffer[1])+2 :
return -6, 0 , []
CheckSum = 0
for i in rx_buffer[1:]:
CheckSum += ord(i)
InArray = []
for i in rx_buffer[2:]:
InArray.append(ord(i))
if (CheckSum % 256) != ord(rx_buffer[0]) : #Checksum equals sum(InArray)+len(InArray)
return -5, 0 , []
InBytes = RxBytes - 2
return 0, InBytes, InArray
|
|
import os
import shutil
import tempfile
from mock import patch
from mock import Mock, MagicMock
from zope.interface import implements
from twisted.trial import unittest
from twisted.test import proto_helpers
from twisted.internet import defer, error, task, tcp
from twisted.internet.endpoints import TCP4ServerEndpoint
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.internet.endpoints import serverFromString
from twisted.internet.endpoints import clientFromString
from twisted.python.failure import Failure
from twisted.internet.error import ConnectionRefusedError
from twisted.internet.interfaces import IReactorCore
from twisted.internet.interfaces import IProtocolFactory
from twisted.internet.interfaces import IProtocol
from twisted.internet.interfaces import IReactorTCP
from twisted.internet.interfaces import IListeningPort
from twisted.internet.interfaces import IAddress
from txtorcon import TorControlProtocol
from txtorcon import ITorControlProtocol
from txtorcon import TorConfig
from txtorcon import launch_tor
from txtorcon import TCPHiddenServiceEndpoint
from txtorcon import TorClientEndpoint
from txtorcon import TorNotFound
from txtorcon import TCPHiddenServiceEndpointParser
from txtorcon import IProgressProvider
from txtorcon import TorOnionAddress
from txtorcon.util import NoOpProtocolFactory
from txtorcon.endpoints import get_global_tor # FIXME
from txtorcon.endpoints import _HAVE_TLS
import util
class EndpointTests(unittest.TestCase):
def setUp(self):
from txtorcon import endpoints
endpoints._global_tor_config = None
del endpoints._global_tor_lock
endpoints._global_tor_lock = defer.DeferredLock()
self.reactor = FakeReactorTcp(self)
self.protocol = FakeControlProtocol([])
self.protocol.event_happened('INFO', 'something craaaaaaazy')
self.protocol.event_happened(
'INFO',
'connection_dir_client_reached_eof(): Uploaded rendezvous '
'descriptor (status 200 ("Service descriptor (v2) stored"))'
)
self.config = TorConfig(self.protocol)
self.protocol.answers.append(
'config/names=\nHiddenServiceOptions Virtual'
)
self.protocol.answers.append('HiddenServiceOptions')
self.patcher = patch(
'txtorcon.torconfig.find_tor_binary',
return_value='/not/tor'
)
self.patcher.start()
def tearDown(self):
from txtorcon import endpoints
endpoints._global_tor_config = None
del endpoints._global_tor_lock
endpoints._global_tor_lock = defer.DeferredLock()
self.patcher.stop()
@defer.inlineCallbacks
def test_global_tor(self):
config = yield get_global_tor(
Mock(),
_tor_launcher=lambda x, y, z: True
)
self.assertEqual(0, config.SOCKSPort)
@defer.inlineCallbacks
def test_global_tor_error(self):
config0 = yield get_global_tor(
Mock(),
_tor_launcher=lambda x, y, z: True
)
# now if we specify a control_port it should be an error since
# the above should have launched one.
try:
config1 = yield get_global_tor(Mock(), control_port=111,
_tor_launcher=lambda x, y, z: True)
self.fail()
except RuntimeError as e:
# should be an error
pass
@defer.inlineCallbacks
def test_endpoint_properties(self):
ep = yield TCPHiddenServiceEndpoint.private_tor(Mock(), 80)
self.assertEqual(None, ep.onion_private_key)
self.assertEqual(None, ep.onion_uri)
ep.hiddenservice = Mock()
ep.hiddenservice.private_key = 'mumble'
self.assertEqual('mumble', ep.onion_private_key)
@defer.inlineCallbacks
def test_private_tor(self):
m = Mock()
from txtorcon import endpoints
endpoints.launch_tor = m
ep = yield TCPHiddenServiceEndpoint.private_tor(Mock(), 80,
control_port=1234)
self.assertTrue(m.called)
@defer.inlineCallbacks
def test_private_tor_no_control_port(self):
m = Mock()
from txtorcon import endpoints
endpoints.launch_tor = m
ep = yield TCPHiddenServiceEndpoint.private_tor(Mock(), 80)
self.assertTrue(m.called)
@defer.inlineCallbacks
def test_system_tor(self):
from test_torconfig import FakeControlProtocol
def boom(*args):
# why does the new_callable thing need a callable that
# returns a callable? Feels like I must be doing something
# wrong somewhere...
def bam(*args, **kw):
return self.protocol
return bam
with patch('txtorcon.endpoints.launch_tor') as launch_mock:
with patch('txtorcon.endpoints.build_tor_connection', new_callable=boom) as btc:
client = clientFromString(
self.reactor,
"tcp:host=localhost:port=9050"
)
ep = yield TCPHiddenServiceEndpoint.system_tor(self.reactor,
client, 80)
port = yield ep.listen(NoOpProtocolFactory())
toa = port.getHost()
self.assertTrue(hasattr(toa, 'onion_uri'))
self.assertTrue(hasattr(toa, 'onion_port'))
port.startListening()
str(port)
port.tor_config
# system_tor should be connecting to a running one,
# *not* launching a new one.
self.assertFalse(launch_mock.called)
@defer.inlineCallbacks
def test_basic(self):
listen = RuntimeError("listen")
connect = RuntimeError("connect")
reactor = proto_helpers.RaisingMemoryReactor(listen, connect)
reactor.addSystemEventTrigger = Mock()
ep = TCPHiddenServiceEndpoint(reactor, self.config, 123)
self.config.bootstrap()
yield self.config.post_bootstrap
self.assertTrue(IProgressProvider.providedBy(ep))
try:
port = yield ep.listen(NoOpProtocolFactory())
self.fail("Should have been an exception")
except RuntimeError as e:
# make sure we called listenTCP not connectTCP
self.assertEqual(e, listen)
repr(self.config.HiddenServices)
def test_progress_updates(self):
config = TorConfig()
ep = TCPHiddenServiceEndpoint(self.reactor, config, 123)
self.assertTrue(IProgressProvider.providedBy(ep))
prog = IProgressProvider(ep)
ding = Mock()
prog.add_progress_listener(ding)
args = (50, "blarg", "Doing that thing we talked about.")
# kind-of cheating, test-wise?
ep._tor_progress_update(*args)
self.assertTrue(ding.called_with(*args))
@patch('txtorcon.endpoints.launch_tor')
def test_progress_updates_private_tor(self, tor):
ep = TCPHiddenServiceEndpoint.private_tor(self.reactor, 1234)
tor.call_args[1]['progress_updates'](40, 'FOO', 'foo to the bar')
return ep
def __test_progress_updates_system_tor(self):
ep = TCPHiddenServiceEndpoint.system_tor(self.reactor, 1234)
ep._tor_progress_update(40, "FOO", "foo to bar")
return ep
@patch('txtorcon.endpoints.get_global_tor')
def test_progress_updates_global_tor(self, tor):
ep = TCPHiddenServiceEndpoint.global_tor(self.reactor, 1234)
tor.call_args[1]['progress_updates'](40, 'FOO', 'foo to the bar')
return ep
def test_hiddenservice_key_unfound(self):
ep = TCPHiddenServiceEndpoint.private_tor(
self.reactor,
1234,
hidden_service_dir='/dev/null'
)
# FIXME Mock() should work somehow for this, but I couldn't
# make it "go"
class Blam(object):
@property
def private_key(self):
raise IOError("blam")
ep.hiddenservice = Blam()
self.assertEqual(ep.onion_private_key, None)
return ep
def test_multiple_listen(self):
ep = TCPHiddenServiceEndpoint(self.reactor, self.config, 123)
d0 = ep.listen(NoOpProtocolFactory())
@defer.inlineCallbacks
def more_listen(arg):
yield arg.stopListening()
d1 = ep.listen(NoOpProtocolFactory())
def foo(arg):
return arg
d1.addBoth(foo)
defer.returnValue(arg)
return
d0.addBoth(more_listen)
self.config.bootstrap()
def check(arg):
self.assertEqual('127.0.0.1', ep.tcp_endpoint._interface)
self.assertEqual(len(self.config.HiddenServices), 1)
d0.addCallback(check).addErrback(self.fail)
return d0
def test_already_bootstrapped(self):
self.config.bootstrap()
ep = TCPHiddenServiceEndpoint(self.reactor, self.config, 123)
d = ep.listen(NoOpProtocolFactory())
return d
@defer.inlineCallbacks
def test_explicit_data_dir(self):
d = tempfile.mkdtemp()
try:
with open(os.path.join(d, 'hostname'), 'w') as f:
f.write('public')
config = TorConfig(self.protocol)
ep = TCPHiddenServiceEndpoint(self.reactor, config, 123, d)
# make sure listen() correctly configures our hidden-serivce
# with the explicit directory we passed in above
port = yield ep.listen(NoOpProtocolFactory())
self.assertEqual(1, len(config.HiddenServices))
self.assertEqual(config.HiddenServices[0].dir, d)
self.assertEqual(config.HiddenServices[0].hostname, 'public')
finally:
shutil.rmtree(d, ignore_errors=True)
def test_failure(self):
self.reactor.failures = 1
ep = TCPHiddenServiceEndpoint(self.reactor, self.config, 123)
d = ep.listen(NoOpProtocolFactory())
self.config.bootstrap()
d.addErrback(self.check_error)
return d
def check_error(self, failure):
self.assertEqual(failure.type, error.CannotListenError)
return None
def test_parse_via_plugin(self):
# make sure we have a valid thing from get_global_tor without
# actually launching tor
config = TorConfig()
config.post_bootstrap = defer.succeed(config)
from txtorcon import torconfig
torconfig._global_tor_config = None
get_global_tor(
self.reactor,
_tor_launcher=lambda react, config, prog: defer.succeed(config)
)
ep = serverFromString(
self.reactor,
'onion:88:localPort=1234:hiddenServiceDir=/foo/bar'
)
self.assertEqual(ep.public_port, 88)
self.assertEqual(ep.local_port, 1234)
self.assertEqual(ep.hidden_service_dir, '/foo/bar')
def test_parse_user_path(self):
# this makes sure we expand users and symlinks in
# hiddenServiceDir args. see Issue #77
# make sure we have a valid thing from get_global_tor without
# actually launching tor
config = TorConfig()
config.post_bootstrap = defer.succeed(config)
from txtorcon import torconfig
torconfig._global_tor_config = None
get_global_tor(
self.reactor,
_tor_launcher=lambda react, config, prog: defer.succeed(config)
)
ep = serverFromString(
self.reactor,
'onion:88:localPort=1234:hiddenServiceDir=~/blam/blarg'
)
# would be nice to have a fixed path here, but then would have
# to run as a known user :/
# maybe using the docker stuff to run integration tests better here?
self.assertEqual(
os.path.expanduser('~/blam/blarg'),
ep.hidden_service_dir
)
def test_parse_relative_path(self):
# this makes sure we convert a relative path to absolute
# hiddenServiceDir args. see Issue #77
# make sure we have a valid thing from get_global_tor without
# actually launching tor
config = TorConfig()
config.post_bootstrap = defer.succeed(config)
from txtorcon import torconfig
torconfig._global_tor_config = None
get_global_tor(
self.reactor,
_tor_launcher=lambda react, config, prog: defer.succeed(config)
)
orig = os.path.realpath('.')
try:
with util.TempDir() as t:
t = str(t)
os.chdir(t)
os.mkdir(os.path.join(t, 'foo'))
hsdir = os.path.join(t, 'foo', 'blam')
os.mkdir(hsdir)
ep = serverFromString(
self.reactor,
'onion:88:localPort=1234:hiddenServiceDir=foo/blam'
)
self.assertEqual(
os.path.realpath(hsdir),
ep.hidden_service_dir
)
finally:
os.chdir(orig)
@defer.inlineCallbacks
def test_stealth_auth(self):
'''
make sure we produce a HiddenService instance with stealth-auth
lines if we had authentication specified in the first place.
'''
config = TorConfig(self.protocol)
ep = TCPHiddenServiceEndpoint(self.reactor, config, 123, '/dev/null',
stealth_auth=['alice', 'bob'])
# make sure listen() correctly configures our hidden-serivce
# with the explicit directory we passed in above
d = ep.listen(NoOpProtocolFactory())
def foo(fail):
print "ERROR", fail
d.addErrback(foo)
port = yield d
self.assertEqual(1, len(config.HiddenServices))
self.assertEqual(config.HiddenServices[0].dir, '/dev/null')
self.assertEqual(config.HiddenServices[0].authorize_client[0], 'stealth alice,bob')
self.assertEqual(None, ep.onion_uri)
config.HiddenServices[0].hostname = 'oh my'
self.assertEqual('oh my', ep.onion_uri)
class EndpointLaunchTests(unittest.TestCase):
def setUp(self):
self.reactor = FakeReactorTcp(self)
self.protocol = FakeControlProtocol([])
def test_onion_address(self):
addr = TorOnionAddress("foo.onion", 80)
# just want to run these and assure they don't throw
# exceptions.
repr(addr)
hash(addr)
def test_onion_parse_unix_socket(self):
r = Mock()
ep = serverFromString(r, "onion:80:controlPort=/tmp/foo")
@patch('txtorcon.TCPHiddenServiceEndpoint.system_tor')
@patch('txtorcon.TCPHiddenServiceEndpoint.global_tor')
@patch('txtorcon.TCPHiddenServiceEndpoint.private_tor')
@defer.inlineCallbacks
def test_endpoint_launch_tor(self, private_tor, global_tor, system_tor):
"""
we just want to confirm that calling listen results in the
spawning of a Tor process; the parsing/setup from string are
checked elsewhere.
"""
reactor = proto_helpers.MemoryReactor()
ep = serverFromString(reactor, 'onion:8888')
r = yield ep.listen(NoOpProtocolFactory())
self.assertEqual(global_tor.call_count, 1)
self.assertEqual(private_tor.call_count, 0)
self.assertEqual(system_tor.call_count, 0)
@patch('txtorcon.TCPHiddenServiceEndpoint.system_tor')
@patch('txtorcon.TCPHiddenServiceEndpoint.global_tor')
@patch('txtorcon.TCPHiddenServiceEndpoint.private_tor')
@defer.inlineCallbacks
def test_endpoint_connect_tor(self, private_tor, global_tor, system_tor):
"""
similar to above test, we're confirming that an
endpoint-string with 'controlPort=xxxx' in it calls the API
that will connect to a running Tor.
"""
reactor = proto_helpers.MemoryReactor()
ep = serverFromString(
reactor,
'onion:8888:controlPort=9055:localPort=1234'
)
r = yield ep.listen(NoOpProtocolFactory())
self.assertEqual(global_tor.call_count, 0)
self.assertEqual(private_tor.call_count, 0)
self.assertEqual(system_tor.call_count, 1)
# unfortunately, we don't add the hidden-service
# configurations until we've connected to the launched Tor
# and bootstrapped a TorConfig object -- and that's a ton
# of stuff to fake out. Most of that is covered by the
# parsing tests (i.e. are we getting the right config
# values from a server-endpoint-string)
# FIXME should probably go somewhere else, so other tests can easily use these.
class FakeProtocol(object):
implements(IProtocol)
def dataReceived(self, data):
print "DATA", data
def connectionLost(self, reason):
print "LOST", reason
def makeConnection(self, transport):
print "MAKE", transport
transport.protocol = self
def connectionMade(self):
print "MADE!"
class FakeAddress(object):
implements(IAddress)
compareAttributes = ('type', 'host', 'port')
type = 'fakeTCP'
def __init__(self, host, port):
self.host = host
self.port = port
def __repr__(self):
return '%s(%r, %d)' % (
self.__class__.__name__, self.host, self.port)
def __hash__(self):
return hash((self.type, self.host, self.port))
class FakeListeningPort(object):
implements(IListeningPort)
def __init__(self, port):
self.port = port
def startListening(self):
self.factory.doStart()
def stopListening(self):
self.factory.doStop()
def getHost(self):
return FakeAddress('host', self.port)
def port_generator():
for x in xrange(65535, 0, -1):
yield x
from test_torconfig import FakeReactor # FIXME put in util or something?
from test_torconfig import FakeProcessTransport # FIXME importing from other test sucks
from test_torconfig import FakeControlProtocol # FIXME
class FakeReactorTcp(FakeReactor):
implements(IReactorTCP)
failures = 0
_port_generator = port_generator()
def __init__(self, test):
self.protocol = TorControlProtocol()
self.protocol.connectionMade = lambda: None
self.transport = proto_helpers.StringTransport()
self.transport = FakeProcessTransport()
self.transport.protocol = self.protocol
def blam():
self.protocol.outReceived("Bootstrap")
self.transport.closeStdin = blam
self.protocol.makeConnection(self.transport)
FakeReactor.__init__(self, test, self.transport, lambda x: None)
def listenTCP(self, port, factory, **kwargs):
'''returns IListeningPort'''
if self.failures > 0:
self.failures -= 1
raise error.CannotListenError(None, None, None)
if port == 0:
port = self._port_generator.next()
p = FakeListeningPort(port)
p.factory = factory
p.startListening()
return p
def connectTCP(self, host, port, factory, timeout, bindAddress):
'''should return IConnector'''
r = tcp.Connector(
host, port, factory, timeout,
bindAddress, reactor=self
)
def blam(*args):
print "BLAAAAAM", args
r.connect = blam
return r
class FakeTorSocksEndpoint(object):
def __init__(self, *args, **kw):
self.host = args[1]
self.port = args[2]
self.transport = None
self.failure = kw.get('failure', None)
self.accept_port = kw.get('accept_port', None)
def connect(self, fac):
self.factory = fac
if self.accept_port:
if self.port != self.accept_port:
return defer.fail(self.failure)
else:
if self.failure:
return defer.fail(self.failure)
self.proto = fac.buildProtocol(None)
transport = proto_helpers.StringTransport()
self.proto.makeConnection(transport)
self.transport = transport
return defer.succeed(self.proto)
class TestTorClientEndpoint(unittest.TestCase):
def test_client_connection_failed(self):
"""
This test is equivalent to txsocksx's
TestSOCKS4ClientEndpoint.test_clientConnectionFailed
"""
args = "host123"
kw = dict()
kw['failure'] = Failure(ConnectionRefusedError())
tor_endpoint = FakeTorSocksEndpoint(*args, **kw)
endpoint = TorClientEndpoint('', 0, socks_endpoint=tor_endpoint)
d = endpoint.connect(None)
return self.assertFailure(d, ConnectionRefusedError)
def test_client_connection_failed_user_password(self):
"""
Same as above, but with a username/password.
"""
args = "fakehost"
kw = dict()
kw['failure'] = Failure(ConnectionRefusedError())
tor_endpoint = FakeTorSocksEndpoint(*args, **kw)
endpoint = TorClientEndpoint(
'invalid host', 0,
socks_username='billy', socks_password='s333cure',
socks_endpoint = tor_endpoint)
d = endpoint.connect(None)
return self.assertFailure(d, ConnectionRefusedError)
def test_no_host(self):
self.assertRaises(
ValueError,
TorClientEndpoint, None, None
)
def test_parser_basic(self):
ep = clientFromString(None, 'tor:host=timaq4ygg2iegci7.onion:port=80:socksPort=9050')
self.assertEqual(ep.host, 'timaq4ygg2iegci7.onion')
self.assertEqual(ep.port, 80)
# XXX what's "the Twisted way" to get the port out here?
self.assertEqual(ep.socks_endpoint._port, 9050)
def test_parser_user_password(self):
epstring = 'tor:host=torproject.org:port=443' + \
':socksUsername=foo:socksPassword=bar'
ep = clientFromString(None, epstring)
self.assertEqual(ep.host, 'torproject.org')
self.assertEqual(ep.port, 443)
self.assertEqual(ep.socks_username, 'foo')
self.assertEqual(ep.socks_password, 'bar')
def test_default_factory(self):
"""
This test is equivalent to txsocksx's TestSOCKS5ClientEndpoint.test_defaultFactory
"""
args = "fakehost"
kw = dict()
tor_endpoint = FakeTorSocksEndpoint(*args, **kw)
endpoint = TorClientEndpoint('', 0, socks_endpoint=tor_endpoint)
endpoint.connect(Mock)
self.assertEqual(tor_endpoint.transport.value(), '\x05\x01\x00')
@patch('txtorcon.endpoints.SOCKS5ClientEndpoint')
@defer.inlineCallbacks
def test_success(self, socks5_factory):
ep = MagicMock()
gold_proto = object()
ep.connect = MagicMock(return_value=gold_proto)
socks5_factory.return_value = ep
args = "fakehost"
kw = dict()
tor_endpoint = FakeTorSocksEndpoint(*args, **kw)
endpoint = TorClientEndpoint('', 0, socks_endpoint = tor_endpoint)
other_proto = yield endpoint.connect(MagicMock())
self.assertEqual(other_proto, gold_proto)
def test_good_port_retry(self):
"""
This tests that our Tor client endpoint retry logic works correctly.
We create a proxy endpoint that fires a ConnectionRefusedError
unless the connecting port matches. We attempt to connect with the
proxy endpoint for each port that the Tor client endpoint will try.
"""
success_ports = TorClientEndpoint.socks_ports_to_try
for port in success_ports:
tor_endpoint = FakeTorSocksEndpoint(
"fakehost", "127.0.0.1", port,
accept_port=port,
failure=Failure(ConnectionRefusedError()),
)
endpoint = TorClientEndpoint('', 0, socks_endpoint=tor_endpoint)
endpoint.connect(None)
self.assertEqual(tor_endpoint.transport.value(), '\x05\x01\x00')
def test_bad_port_retry(self):
"""
This tests failure to connect to the ports on the "try" list.
"""
fail_ports = [1984, 666]
for port in fail_ports:
ep = FakeTorSocksEndpoint(
'', '', 0,
accept_port=port,
failure=Failure(ConnectionRefusedError()),
)
endpoint = TorClientEndpoint('', 0, socks_endpoint=ep)
d = endpoint.connect(None)
return self.assertFailure(d, ConnectionRefusedError)
@patch('txtorcon.endpoints.SOCKS5ClientEndpoint')
def test_default_socks_ports_fails(self, ep_mock):
"""
Ensure we iterate over the default socks ports
"""
class FakeSocks5(object):
def __init__(self, *args, **kw):
pass
def connect(self, *args, **kw):
raise ConnectionRefusedError()
ep_mock.side_effect = FakeSocks5
endpoint = TorClientEndpoint('', 0)#, socks_endpoint=ep)
d = endpoint.connect(None)
self.assertFailure(d, ConnectionRefusedError)
@patch('txtorcon.endpoints.SOCKS5ClientEndpoint')
@defer.inlineCallbacks
def test_default_socks_ports_happy(self, ep_mock):
"""
Ensure we iterate over the default socks ports
"""
proto = object()
class FakeSocks5(object):
def __init__(self, *args, **kw):
pass
def connect(self, *args, **kw):
return proto
ep_mock.side_effect = FakeSocks5
endpoint = TorClientEndpoint('', 0)
p2 = yield endpoint.connect(None)
self.assertTrue(proto is p2)
@patch('txtorcon.endpoints.SOCKS5ClientEndpoint')
@defer.inlineCallbacks
def test_tls_socks_no_endpoint(self, ep_mock):
if not _HAVE_TLS:
print("no TLS support")
return
class FakeWrappedProto(object):
wrappedProtocol = object()
wrap = FakeWrappedProto()
proto = defer.succeed(wrap)
class FakeSocks5(object):
def __init__(self, *args, **kw):
pass
def connect(self, *args, **kw):
return proto
ep_mock.side_effect = FakeSocks5
endpoint = TorClientEndpoint('torproject.org', 0, tls=True)
p2 = yield endpoint.connect(None)
self.assertTrue(wrap.wrappedProtocol is p2)
@patch('txtorcon.endpoints.SOCKS5ClientEndpoint')
@defer.inlineCallbacks
def test_tls_socks_with_endpoint(self, ep_mock):
"""
Same as above, except we provide an explicit endpoint
"""
if not _HAVE_TLS:
print("no TLS support")
return
class FakeWrappedProto(object):
wrappedProtocol = object()
wrap = FakeWrappedProto()
proto = defer.succeed(wrap)
class FakeSocks5(object):
def __init__(self, *args, **kw):
pass
def connect(self, *args, **kw):
return proto
ep_mock.side_effect = FakeSocks5
endpoint = TorClientEndpoint(
'torproject.org', 0,
socks_endpoint=clientFromString(Mock(), "tcp:localhost:9050"),
tls=True,
)
p2 = yield endpoint.connect(None)
self.assertTrue(wrap.wrappedProtocol is p2)
@patch('txtorcon.endpoints.reactor') # FIXME should be passing reactor to TorClientEndpoint :/
def test_client_endpoint_old_api(self, reactor):
"""
Test the old API of passing socks_host, socks_port
"""
endpoint = TorClientEndpoint(
'torproject.org', 0,
socks_hostname='localhost',
socks_port=9050,
)
self.assertTrue(isinstance(endpoint.socks_endpoint, TCP4ClientEndpoint))
d = endpoint.connect(Mock())
calls = reactor.mock_calls
self.assertEqual(1, len(calls))
name, args, kw = calls[0]
self.assertEqual("connectTCP", name)
self.assertEqual("localhost", args[0])
self.assertEqual(9050, args[1])
|
|
from __future__ import absolute_import, division, print_function
import locale
import re
import os
import sys
import stat
import subprocess
from glob import glob
from os.path import (basename, dirname, join, splitext, isdir, isfile, exists,
islink, realpath, relpath)
try:
from os import readlink
except ImportError:
readlink = False
import io
from collections import defaultdict
from conda_build.config import config
from conda_build import external
from conda_build import environ
from conda_build import utils
from conda_build import source
from conda.compat import lchmod
from conda.misc import walk_prefix
from conda.utils import md5_file
if sys.platform.startswith('linux'):
from conda_build import elf
elif sys.platform == 'darwin':
from conda_build import macho
SHEBANG_PAT = re.compile(r'^#!.+$', re.M)
def is_obj(path):
assert sys.platform != 'win32'
return bool((sys.platform.startswith('linux') and elf.is_elf(path)) or
(sys.platform == 'darwin' and macho.is_macho(path)))
def fix_shebang(f, osx_is_app=False):
path = join(config.build_prefix, f)
if is_obj(path):
return
elif os.path.islink(path):
return
with io.open(path, encoding=locale.getpreferredencoding()) as fi:
try:
data = fi.read()
except UnicodeDecodeError: # file is binary
return
m = SHEBANG_PAT.match(data)
if not (m and 'python' in m.group()):
return
py_exec = ('/bin/bash ' + config.build_prefix + '/bin/python.app'
if sys.platform == 'darwin' and osx_is_app else
config.build_prefix + '/bin/' + basename(config.build_python))
new_data = SHEBANG_PAT.sub('#!' + py_exec, data, count=1)
if new_data == data:
return
print("updating shebang:", f)
with io.open(path, 'w', encoding=locale.getpreferredencoding()) as fo:
fo.write(new_data)
os.chmod(path, int('755', 8))
def write_pth(egg_path):
fn = basename(egg_path)
with open(join(environ.get_sp_dir(),
'%s.pth' % (fn.split('-')[0])), 'w') as fo:
fo.write('./%s\n' % fn)
def remove_easy_install_pth(files, preserve_egg_dir=False):
"""
remove the need for easy-install.pth and finally remove easy-install.pth
itself
"""
absfiles = [join(config.build_prefix, f) for f in files]
sp_dir = environ.get_sp_dir()
for egg_path in glob(join(sp_dir, '*-py*.egg')):
if isdir(egg_path):
if preserve_egg_dir or not any(join(egg_path, i) in absfiles for i
in walk_prefix(egg_path, False, windows_forward_slashes=False)):
write_pth(egg_path)
continue
print('found egg dir:', egg_path)
try:
os.rename(join(egg_path, 'EGG-INFO'),
egg_path + '-info')
except OSError:
pass
utils.rm_rf(join(egg_path, 'EGG-INFO'))
for fn in os.listdir(egg_path):
if fn == '__pycache__':
utils.rm_rf(join(egg_path, fn))
else:
# this might be a name-space package
# so the package directory already exists
# from another installed dependency
if os.path.exists(join(sp_dir, fn)):
utils.copy_into(join(egg_path, fn), join(sp_dir, fn))
utils.rm_rf(join(egg_path, fn))
else:
os.rename(join(egg_path, fn), join(sp_dir, fn))
elif isfile(egg_path):
if not egg_path in absfiles:
continue
print('found egg:', egg_path)
write_pth(egg_path)
utils.rm_rf(join(sp_dir, 'easy-install.pth'))
def rm_py_along_so():
"remove .py (.pyc) files alongside .so or .pyd files"
for root, dirs, files in os.walk(config.build_prefix):
for fn in files:
if fn.endswith(('.so', '.pyd')):
name, unused_ext = splitext(fn)
for ext in '.py', '.pyc':
if name + ext in files:
os.unlink(join(root, name + ext))
def compile_missing_pyc():
sp_dir = environ.get_sp_dir()
stdlib_dir = environ.get_stdlib_dir()
need_compile = False
for root, dirs, files in os.walk(sp_dir):
for fn in files:
if fn.endswith('.py') and fn + 'c' not in files:
need_compile = True
break
if need_compile:
print('compiling .pyc files...')
utils._check_call([config.build_python, '-Wi',
join(stdlib_dir, 'compileall.py'),
'-q', '-x', 'port_v3', sp_dir])
def post_process(files, preserve_egg_dir=False):
remove_easy_install_pth(files, preserve_egg_dir=preserve_egg_dir)
rm_py_along_so()
if config.CONDA_PY < 30:
compile_missing_pyc()
def find_lib(link, path=None):
from conda_build.build import prefix_files
files = prefix_files()
if link.startswith(config.build_prefix):
link = link[len(config.build_prefix) + 1:]
if link not in files:
sys.exit("Error: Could not find %s" % link)
return link
if link.startswith('/'): # but doesn't start with the build prefix
return
if link.startswith('@rpath/'):
# Assume the rpath already points to lib, so there is no need to
# change it.
return
if '/' not in link or link.startswith('@executable_path/'):
link = basename(link)
file_names = defaultdict(list)
for f in files:
file_names[basename(f)].append(f)
if link not in file_names:
sys.exit("Error: Could not find %s" % link)
if len(file_names[link]) > 1:
if path and basename(path) == link:
# The link is for the file itself, just use it
return path
# Allow for the possibility of the same library appearing in
# multiple places.
md5s = set()
for f in file_names[link]:
md5s.add(md5_file(join(config.build_prefix, f)))
if len(md5s) > 1:
sys.exit("Error: Found multiple instances of %s: %s" % (link, file_names[link]))
else:
file_names[link].sort()
print("Found multiple instances of %s (%s). "
"Choosing the first one." % (link, file_names[link]))
return file_names[link][0]
print("Don't know how to find %s, skipping" % link)
def osx_ch_link(path, link):
print("Fixing linking of %s in %s" % (link, path))
link_loc = find_lib(link, path)
if not link_loc:
return
lib_to_link = relpath(dirname(link_loc), 'lib')
# path_to_lib = utils.relative(path[len(config.build_prefix) + 1:])
# e.g., if
# path = '/build_prefix/lib/some/stuff/libstuff.dylib'
# link_loc = 'lib/things/libthings.dylib'
# then
# lib_to_link = 'things'
# path_to_lib = '../..'
# @rpath always means 'lib', link will be at
# @rpath/lib_to_link/basename(link), like @rpath/things/libthings.dylib.
# For when we can't use @rpath, @loader_path means the path to the library
# ('path'), so from path to link is
# @loader_path/path_to_lib/lib_to_link/basename(link), like
# @loader_path/../../things/libthings.dylib.
ret = '@rpath/%s/%s' % (lib_to_link, basename(link))
# XXX: IF the above fails for whatever reason, the below can be used
# TODO: This might contain redundant ..'s if link and path are both in
# some subdirectory of lib.
# ret = '@loader_path/%s/%s/%s' % (path_to_lib, lib_to_link, basename(link))
ret = ret.replace('/./', '/')
return ret
def mk_relative_osx(path, build_prefix=None):
'''
if build_prefix is None, then this is a standard conda build. The path
and all dependencies are in the build_prefix.
if package is built in develop mode, build_prefix is specified. Object
specified by 'path' needs to relink runtime dependences to libs found in
build_prefix/lib/. Also, in develop mode, 'path' is not in 'build_prefix'
'''
if build_prefix is None:
assert path.startswith(config.build_prefix + '/')
else:
config.short_build_prefix = build_prefix
assert sys.platform == 'darwin' and is_obj(path)
s = macho.install_name_change(path, osx_ch_link)
names = macho.otool(path)
if names:
# Strictly speaking, not all object files have install names (e.g.,
# bundles and executables do not). In that case, the first name here
# will not be the install name (i.e., the id), but it isn't a problem,
# because in that case it will be a no-op (with the exception of stub
# files, which give an error, which is handled below).
args = [
'install_name_tool',
'-id',
join('@rpath', relpath(dirname(path),
join(config.build_prefix, 'lib')),
basename(names[0])),
path,
]
print(' '.join(args))
return_code = 0
try:
stdout, stderr = utils.execute(args)
except subprocess.CalledProcessError as exc:
stdout, stderr = exc.output
return_code = exc.return_code
if "Mach-O dynamic shared library stub file" in stderr:
print("Skipping Mach-O dynamic shared library stub file %s" % path)
return
else:
print(stderr, file=sys.stderr)
if return_code:
raise RuntimeError("install_name_tool failed with exit "
"status %d" % return_code)
# Add an rpath to every executable to increase the chances of it
# being found.
args = [
'install_name_tool',
'-add_rpath',
join('@loader_path',
relpath(join(config.build_prefix, 'lib'),
dirname(path)), '').replace('/./', '/'),
path,
]
print(' '.join(args))
return_code = 0
try:
stdout, strerr = utils.execute(args)
except subprocess.CalledProcessError as exc:
stdout, stderr = exc.output
return_code = exc.return_code
if "Mach-O dynamic shared library stub file" in stderr:
print("Skipping Mach-O dynamic shared library stub file %s\n" % path)
return
elif "would duplicate path, file already has LC_RPATH for:" in stderr:
print("Skipping -add_rpath, file already has LC_RPATH set")
return
else:
print(stderr, file=sys.stderr)
if return_code:
raise RuntimeError("install_name_tool failed with exit "
"status %d" % return_code)
if s:
# Skip for stub files, which have to use binary_has_prefix_files to be
# made relocatable.
assert_relative_osx(path)
def mk_relative_linux(f, rpaths=('lib',)):
path = join(config.build_prefix, f)
rpath = ':'.join('$ORIGIN/' + utils.relative(f, d) if not
d.startswith('/') else d for d in rpaths)
patchelf = external.find_executable('patchelf')
print('patchelf: file: %s\n setting rpath to: %s' % (path, rpath))
utils.execute([patchelf, '--force-rpath', '--set-rpath', rpath, path])
def assert_relative_osx(path):
for name in macho.otool(path):
assert not name.startswith(config.build_prefix), path
def mk_relative(m, f):
assert sys.platform != 'win32'
path = join(config.build_prefix, f)
if not is_obj(path):
return
if sys.platform.startswith('linux'):
mk_relative_linux(f, rpaths=m.get_value('build/rpaths', ['lib']))
elif sys.platform == 'darwin':
mk_relative_osx(path)
def fix_permissions(files):
print("Fixing permissions")
for root, dirs, unused_files in os.walk(config.build_prefix):
for dn in dirs:
lchmod(join(root, dn), int('755', 8))
for f in files:
path = join(config.build_prefix, f)
st = os.lstat(path)
lchmod(path, stat.S_IMODE(st.st_mode) | stat.S_IWUSR) # chmod u+w
def post_build(m, files):
print('number of files:', len(files))
fix_permissions(files)
if sys.platform == 'win32':
return
binary_relocation = bool(m.get_value('build/binary_relocation', True))
if not binary_relocation:
print("Skipping binary relocation logic")
osx_is_app = bool(m.get_value('build/osx_is_app', False))
for f in files:
if f.startswith('bin/'):
fix_shebang(f, osx_is_app=osx_is_app)
if binary_relocation:
mk_relative(m, f)
check_symlinks(files)
def check_symlinks(files):
if readlink is False:
return # Not on Unix system
msgs = []
real_build_prefix = realpath(config.build_prefix)
for f in files:
path = join(real_build_prefix, f)
if islink(path):
link_path = readlink(path)
real_link_path = realpath(path)
if real_link_path.startswith(real_build_prefix):
# If the path is in the build prefix, this is fine, but
# the link needs to be relative
if not link_path.startswith('.'):
# Don't change the link structure if it is already a
# relative link. It's possible that ..'s later in the path
# can result in a broken link still, but we'll assume that
# such crazy things don't happen.
print("Making absolute symlink %s -> %s relative" % (f, link_path))
os.unlink(path)
os.symlink(relpath(real_link_path, dirname(path)), path)
else:
# Symlinks to absolute paths on the system (like /usr) are fine.
if real_link_path.startswith(config.croot):
msgs.append("%s is a symlink to a path that may not "
"exist after the build is completed (%s)" % (f, link_path))
if msgs:
for msg in msgs:
print("Error: %s" % msg, file=sys.stderr)
sys.exit(1)
def get_build_metadata(m):
src_dir = source.get_dir()
if exists(join(src_dir, '__conda_version__.txt')):
with open(join(src_dir, '__conda_version__.txt')) as f:
version = f.read().strip()
print("Setting version from __conda_version__.txt: %s" % version)
m.meta['package']['version'] = version
if exists(join(src_dir, '__conda_buildnum__.txt')):
with open(join(src_dir, '__conda_buildnum__.txt')) as f:
build_number = f.read().strip()
print("Setting build number from __conda_buildnum__.txt: %s" %
build_number)
m.meta['build']['number'] = build_number
if exists(join(src_dir, '__conda_buildstr__.txt')):
with open(join(src_dir, '__conda_buildstr__.txt')) as f:
buildstr = f.read().strip()
print("Setting version from __conda_buildstr__.txt: %s" % buildstr)
m.meta['build']['string'] = buildstr
|
|
"""Support for Synology NAS Sensors."""
from datetime import timedelta
import logging
from SynologyDSM import SynologyDSM
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_API_VERSION,
CONF_DISKS,
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
EVENT_HOMEASSISTANT_START,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Synology"
CONF_VOLUMES = "volumes"
DEFAULT_NAME = "Synology DSM"
DEFAULT_PORT = 5001
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
_UTILISATION_MON_COND = {
"cpu_other_load": ["CPU Load (Other)", "%", "mdi:chip"],
"cpu_user_load": ["CPU Load (User)", "%", "mdi:chip"],
"cpu_system_load": ["CPU Load (System)", "%", "mdi:chip"],
"cpu_total_load": ["CPU Load (Total)", "%", "mdi:chip"],
"cpu_1min_load": ["CPU Load (1 min)", "%", "mdi:chip"],
"cpu_5min_load": ["CPU Load (5 min)", "%", "mdi:chip"],
"cpu_15min_load": ["CPU Load (15 min)", "%", "mdi:chip"],
"memory_real_usage": ["Memory Usage (Real)", "%", "mdi:memory"],
"memory_size": ["Memory Size", "Mb", "mdi:memory"],
"memory_cached": ["Memory Cached", "Mb", "mdi:memory"],
"memory_available_swap": ["Memory Available (Swap)", "Mb", "mdi:memory"],
"memory_available_real": ["Memory Available (Real)", "Mb", "mdi:memory"],
"memory_total_swap": ["Memory Total (Swap)", "Mb", "mdi:memory"],
"memory_total_real": ["Memory Total (Real)", "Mb", "mdi:memory"],
"network_up": ["Network Up", "Kbps", "mdi:upload"],
"network_down": ["Network Down", "Kbps", "mdi:download"],
}
_STORAGE_VOL_MON_COND = {
"volume_status": ["Status", None, "mdi:checkbox-marked-circle-outline"],
"volume_device_type": ["Type", None, "mdi:harddisk"],
"volume_size_total": ["Total Size", None, "mdi:chart-pie"],
"volume_size_used": ["Used Space", None, "mdi:chart-pie"],
"volume_percentage_used": ["Volume Used", "%", "mdi:chart-pie"],
"volume_disk_temp_avg": ["Average Disk Temp", None, "mdi:thermometer"],
"volume_disk_temp_max": ["Maximum Disk Temp", None, "mdi:thermometer"],
}
_STORAGE_DSK_MON_COND = {
"disk_name": ["Name", None, "mdi:harddisk"],
"disk_device": ["Device", None, "mdi:dots-horizontal"],
"disk_smart_status": ["Status (Smart)", None, "mdi:checkbox-marked-circle-outline"],
"disk_status": ["Status", None, "mdi:checkbox-marked-circle-outline"],
"disk_exceed_bad_sector_thr": ["Exceeded Max Bad Sectors", None, "mdi:test-tube"],
"disk_below_remain_life_thr": ["Below Min Remaining Life", None, "mdi:test-tube"],
"disk_temp": ["Temperature", None, "mdi:thermometer"],
}
_MONITORED_CONDITIONS = (
list(_UTILISATION_MON_COND.keys())
+ list(_STORAGE_VOL_MON_COND.keys())
+ list(_STORAGE_DSK_MON_COND.keys())
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=True): cv.boolean,
vol.Optional(CONF_API_VERSION): cv.positive_int,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS): vol.All(
cv.ensure_list, [vol.In(_MONITORED_CONDITIONS)]
),
vol.Optional(CONF_DISKS): cv.ensure_list,
vol.Optional(CONF_VOLUMES): cv.ensure_list,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Synology NAS Sensor."""
def run_setup(event):
"""Wait until Home Assistant is fully initialized before creating.
Delay the setup until Home Assistant is fully initialized.
This allows any entities to be created already
"""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
use_ssl = config.get(CONF_SSL)
unit = hass.config.units.temperature_unit
monitored_conditions = config.get(CONF_MONITORED_CONDITIONS)
api_version = config.get(CONF_API_VERSION)
api = SynoApi(host, port, username, password, unit, use_ssl, api_version)
sensors = [
SynoNasUtilSensor(api, name, variable, _UTILISATION_MON_COND[variable])
for variable in monitored_conditions
if variable in _UTILISATION_MON_COND
]
# Handle all volumes
if api.storage.volumes is not None:
for volume in config.get(CONF_VOLUMES, api.storage.volumes):
sensors += [
SynoNasStorageSensor(
api, name, variable, _STORAGE_VOL_MON_COND[variable], volume
)
for variable in monitored_conditions
if variable in _STORAGE_VOL_MON_COND
]
# Handle all disks
if api.storage.disks is not None:
for disk in config.get(CONF_DISKS, api.storage.disks):
sensors += [
SynoNasStorageSensor(
api, name, variable, _STORAGE_DSK_MON_COND[variable], disk
)
for variable in monitored_conditions
if variable in _STORAGE_DSK_MON_COND
]
add_entities(sensors, True)
# Wait until start event is sent to load this component.
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, run_setup)
class SynoApi:
"""Class to interface with Synology DSM API."""
def __init__(self, host, port, username, password, temp_unit, use_ssl, api_version):
"""Initialize the API wrapper class."""
self.temp_unit = temp_unit
try:
self._api = SynologyDSM(
host,
port,
username,
password,
use_https=use_ssl,
debugmode=False,
dsm_version=api_version,
)
except: # noqa: E722 pylint: disable=bare-except
_LOGGER.error("Error setting up Synology DSM")
# Will be updated when update() gets called.
self.utilisation = self._api.utilisation
self.storage = self._api.storage
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update function for updating api information."""
self._api.update()
class SynoNasSensor(Entity):
"""Representation of a Synology NAS Sensor."""
def __init__(self, api, name, variable, variable_info, monitor_device=None):
"""Initialize the sensor."""
self.var_id = variable
self.var_name = "{} {}".format(name, variable_info[0])
self.var_units = variable_info[1]
self.var_icon = variable_info[2]
self.monitor_device = monitor_device
self._api = api
@property
def name(self):
"""Return the name of the sensor, if any."""
if self.monitor_device is not None:
return f"{self.var_name} ({self.monitor_device})"
return self.var_name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self.var_icon
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self.var_id in ["volume_disk_temp_avg", "volume_disk_temp_max", "disk_temp"]:
return self._api.temp_unit
return self.var_units
def update(self):
"""Get the latest data for the states."""
if self._api is not None:
self._api.update()
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
class SynoNasUtilSensor(SynoNasSensor):
"""Representation a Synology Utilisation Sensor."""
@property
def state(self):
"""Return the state of the sensor."""
network_sensors = ["network_up", "network_down"]
memory_sensors = [
"memory_size",
"memory_cached",
"memory_available_swap",
"memory_available_real",
"memory_total_swap",
"memory_total_real",
]
if self.var_id in network_sensors or self.var_id in memory_sensors:
attr = getattr(self._api.utilisation, self.var_id)(False)
if attr is None:
return None
if self.var_id in network_sensors:
return round(attr / 1024.0, 1)
if self.var_id in memory_sensors:
return round(attr / 1024.0 / 1024.0, 1)
else:
return getattr(self._api.utilisation, self.var_id)
class SynoNasStorageSensor(SynoNasSensor):
"""Representation a Synology Utilisation Sensor."""
@property
def state(self):
"""Return the state of the sensor."""
temp_sensors = ["volume_disk_temp_avg", "volume_disk_temp_max", "disk_temp"]
if self.monitor_device is not None:
if self.var_id in temp_sensors:
attr = getattr(self._api.storage, self.var_id)(self.monitor_device)
if attr is None:
return None
if self._api.temp_unit == TEMP_CELSIUS:
return attr
return round(attr * 1.8 + 32.0, 1)
return getattr(self._api.storage, self.var_id)(self.monitor_device)
|
|
import json
import requests
import re
from collections import namedtuple
from base64 import b64encode
from urllib.parse import quote, quote_plus
from enum import Enum
from contextlib import closing
from time import time
from math import floor
class CouchDBException(Exception):
def __init__(self, response):
self._response = response
@property
def status(self):
return self._response.status
@property
def reason(self):
return self._response.reason
@property
def body(self):
return self._response.body
@property
def content_type(self):
return self._response.content_type
@property
def is_json(self):
return self._response.is_json
def __str__(self):
if self.is_json:
return '{self.status}: {self.reason} - {self.body.reason}'.format(self=self)
else:
return '{self.status}: {self.reason}'.format(self=self)
class CouchDB:
class _Authentication:
def __init__(self, username, password):
self._username = username
self._password = password
def __eq__(self, other):
return type(self) == type(other) and self._username == other.username and self._password == other.password
@property
def username(self):
return self._username
@property
def password(self):
return self._password
@property
def basic_auth(self):
auth = self._username + ':' + self._password
auth = auth.encode()
return b64encode(auth).decode("ascii")
@property
def url_auth(self):
return quote_plus(self._username) + ':' + quote_plus(self._password)
class DatabaseType(Enum):
Unknown = 0
CouchDB = 1
AvanceDB = 2
PouchDB = 3
Cloudant = 4
class DatabaseVersion:
def __init__(self, version):
self._version = None
self._major = -1
self._minor = -1
self._build = -1
if version and type(version) is str:
m = re.search('^([0-9]+)\.([0-9]+)\.([0-9]+)$', version)
if m:
self._version = version
self._major = m.group(1)
self._minor = m.group(2)
self._build = m.group(3)
@property
def valid(self):
return self._version is not None
@property
def version(self):
return self._version
@property
def major(self):
return int(self._major)
@property
def minor(self):
return int(self._minor)
@property
def build(self):
return int(self._build)
class Response:
def __init__(self, response, body=None, content_type=None):
self._response = response
self._content_type = content_type if content_type is not None else response.headers['content-type']
self._body = body
@property
def status(self):
return self._response.status_code
@property
def reason(self):
return self._response.reason
@property
def body(self):
return self._body
@property
def content_type(self):
return self._content_type
@property
def is_json(self):
return self._content_type.find('application/json') == 0
_auth_cache = {}
_session = requests.Session()
def __init__(self, host, port, secure, get_credentials=None, auth=None, signature=None):
self._host = host
self._port = int(port)
self._secure = secure
self._get_credentials = get_credentials
self._auth = auth
self._auth_active = False
self._signature = signature
def clone(self):
return CouchDB(self._host, self._port, self._secure, get_credentials=self._get_credentials,
auth=self._auth, signature=self._signature)
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def db_type(self):
db_type = CouchDB.DatabaseType.CouchDB
signature = self.get_signature()
if getattr(signature, 'express_pouchdb', None):
db_type = CouchDB.DatabaseType.PouchDB
elif getattr(signature, 'avancedb', None):
db_type = CouchDB.DatabaseType.AvanceDB
elif getattr(signature, 'cloudant_build', None):
db_type = CouchDB.DatabaseType.Cloudant
return db_type
@property
def db_version(self):
signature = self.get_signature()
version = getattr(signature, 'version', None)
if version:
return CouchDB.DatabaseVersion(version)
else:
return None
@property
def auth(self):
return self._auth
@property
def get_credentials_callback(self):
return self._get_credentials
@property
def secure(self):
return self._secure
@property
def host(self):
return self._host
@property
def port(self):
return self._port
def get_url(self):
url = 'https' if self._secure else 'http'
url += '://' + self._host
if (self._secure and self._port != 443) or (not self._secure and self._port != 80):
url += ':' + str(self._port)
return url + '/'
def get_signature(self):
if not self._signature:
response = self._make_request('/')
if response.status != 200 or not response.is_json:
raise CouchDBException(response)
self._signature = response.body
return self._signature
def get_session(self):
response = self._make_request('/_session')
if response.status != 200 or not response.is_json:
raise CouchDBException(response)
return response.body
def create_database(self, name):
response = self._make_request('/', 'PUT', db_name=name)
if response.status != 201 or not response.is_json:
raise CouchDBException(response)
def get_database(self, name):
response = self._make_request('/', db_name=name)
if response.status != 200 or not response.is_json:
raise CouchDBException(response)
return response.body
def delete_database(self, name):
response = self._make_request('/', 'DELETE', db_name=name)
if response.status != 200 or not response.is_json:
raise CouchDBException(response)
def get_databases(self):
response = self._make_request('/_all_dbs')
if response.status != 200 or not response.is_json:
raise CouchDBException(response)
return response.body
def get_docs(self, name, limit=10):
query_string = 'include_docs=true' + ('&limit=' + str(limit) if limit is not None else '')
response = self._make_request('/_all_docs?' + query_string, db_name=name)
if response.status != 200 or not response.is_json:
raise CouchDBException(response)
docs = [row.doc for row in response.body.rows]
return docs
def get_active_tasks(self, task_type=None):
response = self._make_request('/_active_tasks')
if response.status != 200 or not response.is_json:
raise CouchDBException(response)
tasks = response.body
if task_type:
tasks = [task for task in tasks if task.type == task_type]
return tasks
def get_revs_limit(self, name):
response = self._make_request('/_revs_limit', 'GET', db_name=name)
if response.status != 200:
raise CouchDBException(response)
try:
response = int(response.body)
except:
raise CouchDBException(response)
return response
def set_revs_limit(self, name, limit):
response = self._make_request('/_revs_limit', 'PUT', body=str(limit), content_type='application/json', db_name=name)
if response.status != 200 or not response.is_json:
raise CouchDBException(response)
return response.body
def create_replication(self, source, target, create_target=False, continuous=False):
# create a sane-ish replication document id
now = floor(time())
repl_id = '{0}_{1}_{2}'.format(now, source, target)
repl_id = re.sub('[^a-zA-Z0-9]', '_', repl_id)
repl_id = repl_id.replace('__', '_').replace('__', '_').replace('__', '_').replace('__', '_')
job = {'_id': repl_id, 'source': source, 'target': target, 'create_target': create_target,
'continuous': continuous}
if create_target:
session = self.get_session()
user_ctx = session.userCtx
job['user_ctx'] = {'name': user_ctx.name, 'roles': user_ctx.roles}
job_json = json.dumps(job)
response = self._make_request('/_replicator', 'POST', job_json, 'application/json')
if response.status != 201 or not response.is_json:
raise CouchDBException(response)
return response.body
def compact_database(self, name):
response = self._make_request('/_compact', 'POST', None, 'application/json', db_name=name)
if response.status != 202 or not response.is_json:
raise CouchDBException(response)
def _make_request(self, uri, method='GET', body=None, content_type=None, db_name=None):
auth = None
if self._auth:
auth = (self._auth.username, self._auth.password)
headers = {}
if (method == 'PUT' or method == 'POST') and content_type is not None:
headers['Content-Type'] = content_type
request = getattr(CouchDB._session, method.lower())
if db_name:
uri = '/' + CouchDB.encode_db_name(db_name) + uri
server_url = self.get_url()
with closing(request(server_url + uri[1::], headers=headers, data=body, auth=auth)) as response:
if (response.status_code == 401 or response.status_code == 403) and \
callable(self._get_credentials) and not self._auth_active:
try:
auth = self._auth_cache.get(server_url, None)
if auth and not auth == self._auth:
self._auth = auth
else:
self._auth_cache.pop(server_url, None)
self._auth = None
self._auth_active = True
creds = self._get_credentials(server_url)
self._auth_active = False
if creds:
self._auth = self._Authentication(creds.username, creds.password)
if self._auth:
result = self._make_request(uri, method, body, content_type)
self._auth_cache[server_url] = self._auth
return result
finally:
self._auth_active = False
response_body = response.text
response_content_type = response.headers['content-type']
if response_content_type.find('text/plain') == 0 and \
len(response_body) > 0 and \
(response_body[0] == '{' or response_body[0] == '['):
response_content_type = response_content_type.replace('text/plain', 'application/json')
if response_content_type.find('application/json') == 0:
response_body = json.loads(
response_body,
object_hook=lambda o: namedtuple('CouchDBResponse', CouchDB._validate_keys(o.keys()))(*o.values()))
return CouchDB.Response(response, response_body, response_content_type)
@staticmethod
def _validate_keys(keys):
new_keys = []
for key in keys:
new_keys.append(key.replace('-', '_'))
return new_keys
@staticmethod
def encode_db_name(name):
return quote(name, '')
|
|
# Copyright (C) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Policy based configuration of libvirt objects
This module provides helper APIs for populating the config.py
classes based on common operational needs / policies
"""
from nova.pci import utils as pci_utils
MIN_LIBVIRT_ETHERNET_SCRIPT_PATH_NONE = (1, 3, 3)
def set_vif_guest_frontend_config(conf, mac, model, driver, queues,
rx_queue_size):
"""Populate a LibvirtConfigGuestInterface instance
with guest frontend details.
NOTE: @model, @driver, @queues and @rx_queue_size can be None.
"""
conf.mac_addr = mac
if model is not None:
conf.model = model
if driver is not None:
conf.driver_name = driver
if queues is not None:
conf.vhost_queues = queues
if rx_queue_size:
conf.vhost_rx_queue_size = rx_queue_size
def set_vif_host_backend_ethernet_config(conf, tapname, host):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an externally configured
host device.
NB use of this configuration is discouraged by
libvirt project and will mark domains as 'tainted'.
"""
conf.net_type = "ethernet"
conf.target_dev = tapname
# NOTE(mriedem): Before libvirt 1.3.3, passing script=None results
# in errors because /etc/qemu-ifup gets run which is blocked by
# AppArmor. Passing script='' between libvirt 1.3.3 and 3.1 will also
# result in errors. So we have to check the libvirt version and set
# the script value accordingly. Libvirt 3.1 allows and properly handles
# both None and '' as no-ops.
if host.has_min_version(MIN_LIBVIRT_ETHERNET_SCRIPT_PATH_NONE):
conf.script = None
else:
conf.script = ''
def set_vif_host_backend_802qbg_config(conf, devname, managerid,
typeid, typeidversion,
instanceid, tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an 802.1qbg device.
"""
conf.net_type = "direct"
conf.source_dev = devname
conf.source_mode = "vepa"
conf.vporttype = "802.1Qbg"
conf.add_vport_param("managerid", managerid)
conf.add_vport_param("typeid", typeid)
conf.add_vport_param("typeidversion", typeidversion)
conf.add_vport_param("instanceid", instanceid)
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_802qbh_config(conf, net_type, devname, profileid,
tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an 802.1qbh device.
"""
conf.net_type = net_type
if net_type == 'direct':
conf.source_mode = 'passthrough'
conf.source_dev = pci_utils.get_ifname_by_pci_address(devname)
conf.driver_name = 'vhost'
else:
conf.source_dev = devname
conf.model = None
conf.vporttype = "802.1Qbh"
conf.add_vport_param("profileid", profileid)
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_hw_veb(conf, net_type, devname, vlan,
tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an device that supports hardware
virtual ethernet bridge.
"""
conf.net_type = net_type
conf.vlan = vlan
if net_type == 'direct':
conf.source_mode = 'passthrough'
conf.source_dev = pci_utils.get_ifname_by_pci_address(devname)
conf.driver_name = 'vhost'
else: # net_type == network_model.VNIC_TYPE_DIRECT
conf.source_dev = devname
conf.model = None
if tapname:
conf.target_dev = tapname
def set_vif_host_backend_hostdev_pci_config(conf, pci_slot):
"""Populate a LibvirtConfigGuestHostdev instance with pci address data."""
conf.domain, conf.bus, conf.slot, conf.function = (
pci_utils.get_pci_address_fields(pci_slot))
def set_vif_host_backend_direct_config(conf, devname, mode="passthrough"):
"""Populate a LibvirtConfigGuestInterface instance
with direct Interface.
"""
conf.net_type = "direct"
conf.source_mode = mode
conf.source_dev = devname
conf.model = "virtio"
def set_vif_host_backend_vhostuser_config(conf, mode, path, rx_queue_size,
tx_queue_size):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for vhostuser socket.
NOTE: @rx_queue_size and @tx_queue_size can be None
"""
conf.net_type = "vhostuser"
conf.vhostuser_type = "unix"
conf.vhostuser_mode = mode
conf.vhostuser_path = path
if rx_queue_size:
conf.vhost_rx_queue_size = rx_queue_size
if tx_queue_size:
conf.vhost_tx_queue_size = tx_queue_size
def set_vif_mtu_config(conf, mtu):
"""Populate a LibvirtConfigGuestInterface instance
with network mtu.
"""
conf.mtu = mtu
def set_vif_bandwidth_config(conf, inst_type):
"""Config vif inbound/outbound bandwidth limit. parameters are
set in instance_type_extra_specs table, key is in the format
quota:vif_inbound_average.
"""
bandwidth_items = ['vif_inbound_average', 'vif_inbound_peak',
'vif_inbound_burst', 'vif_outbound_average', 'vif_outbound_peak',
'vif_outbound_burst']
for key, value in inst_type.get('extra_specs', {}).items():
scope = key.split(':')
if len(scope) > 1 and scope[0] == 'quota':
if scope[1] in bandwidth_items:
setattr(conf, scope[1], value)
def set_numa_memnode(conf, guest_node_id, host_cell_id):
"""Prepares numa memory node config for the guest.
"""
conf.cellid = guest_node_id
conf.nodeset = [host_cell_id]
conf.mode = "strict"
def set_vcpu_realtime_scheduler(conf, vcpus_rt, priority):
"""Prepares realtime config for the guest."""
conf.vcpus = vcpus_rt
conf.scheduler = "fifo"
conf.priority = priority
def set_driver_iommu_for_sev(conf):
for dev in conf.devices:
if dev.uses_virtio:
dev.driver_iommu = True
|
|
# import transaction
# import datetime
from pyramid.httpexceptions import HTTPFound
# from pyramid.renderers import get_renderer
from ..models import (
StoredQuery,
)
# import json
from ..lib import query_f, filter_funcs
from .. import config
def table(request):
request.do_not_log = True
# the_user = config['get_user_func'](request)
query_id = int(request.matchdict['query_id'])
the_query = config['DBSession'].query(StoredQuery).filter(StoredQuery.id == query_id).first()
data = the_query.extract_data()
query_f.check_query_data(data)
action = request.params['action']
existing_tables = the_query.jdata.get('tables', [])
if action == "add":
if data['tables'] == []:
existing_tables = [request.params['table']]
else:
raise Exception("Can't have more than 1 table yet")
# existing_tables.append(new_table)
elif action == "delete":
table_id = int(request.params['table'])
table_name = existing_tables[table_id]
membership_tester = lambda v: ("%s." % table_name) not in v
data['columns'] = list(filter(membership_tester, data['columns']))
membership_tester = lambda v: ("%s." % table_name) not in v['column']
data['filters'] = list(filter(membership_tester, data['filters']))
if ("%s." % table_name) in data['key']:
data['key'] = None
existing_tables = existing_tables[:table_id] + existing_tables[table_id+1:]
else:
raise KeyError("No handler for action of '%s'" % action)
the_query.jdata['tables'] = existing_tables
the_query.compress_data()
config['DBSession'].add(the_query)
return HTTPFound(location="%s#tables" % request.route_url("concision.query.overview", query_id=query_id))
def column(request):
request.do_not_log = True
query_id = int(request.matchdict['query_id'])
the_query = config['DBSession'].query(StoredQuery).filter(StoredQuery.id == query_id).first()
the_query.extract_data()
action = request.params['action']
existing_columns = the_query.jdata.get('columns', [])
existing_groupbys = the_query.jdata.get('groupby', [])
if action == "add":
new_column = " ".join(filter(None, (
request.params['function0'],
request.params['function1'],
request.params['function2'],
request.params['column'],
)))
existing_columns.append(new_column)
existing_groupbys.append("-")
elif action == "delete":
column_id = int(request.params['column'])
existing_columns = existing_columns[:column_id] + existing_columns[column_id+1:]
existing_groupbys = existing_groupbys[:column_id] + existing_groupbys[column_id+1:]
else:
raise KeyError("No handler for action of '%s'" % action)
the_query.jdata['columns'] = existing_columns
the_query.jdata['groupby'] = existing_groupbys
the_query.compress_data()
config['DBSession'].add(the_query)
return HTTPFound(location="%s#columns" % request.route_url("concision.query.overview", query_id=query_id))
def filters(request):
request.do_not_log = True
query_id = int(request.matchdict['query_id'])
the_query = config['DBSession'].query(StoredQuery).filter(StoredQuery.id == query_id).first()
the_query.extract_data()
action = request.params['action']
existing_filters = the_query.jdata.get('filters', [])
if action == "add_item":
column = " ".join(filter(None, (
request.params['function0'],
request.params['function1'],
request.params['function2'],
request.params['column'],
)))
max_id = filter_funcs.get_max_id(existing_filters) + 1
new_item = {
"column": column,
"operator": request.params['operator'],
"value": request.params['value'].strip(),
"id": max_id,
}
existing_filters = filter_funcs.add_item(existing_filters, int(request.params['item_id']), new_item)
elif action == "add_group":
max_id = filter_funcs.get_max_id(existing_filters) + 1
new_item = {
"type": request.params['type'],
"contents": [],
"id": max_id,
}
existing_filters = filter_funcs.add_item(existing_filters, int(request.params['item_id']), new_item)
elif action == "delete":
filter_id = int(request.params['filter'])
existing_filters = filter_funcs.delete_filter(existing_filters, filter_id)
else:
raise KeyError("No handler for action of '%s'" % action)
the_query.jdata['filters'] = existing_filters
the_query.compress_data()
config['DBSession'].add(the_query)
return HTTPFound(location="%s#filters" % request.route_url("concision.query.overview", query_id=query_id))
def orderby(request):
request.do_not_log = True
query_id = int(request.matchdict['query_id'])
the_query = config['DBSession'].query(StoredQuery).filter(StoredQuery.id == query_id).first()
the_query.extract_data()
action = request.params['action']
existing_orderbys = the_query.jdata.get('orderby', [])
if action == "add":
new_orderby = {
"order": request.params['order'],
"column": " ".join(filter(None, (
request.params['function0'],
request.params['function1'],
request.params['function2'],
request.params['column'],
)))
}
existing_orderbys.append(new_orderby)
elif action == "delete":
order_id = int(request.params['order'])
existing_orderbys = existing_orderbys[:order_id] + existing_orderbys[order_id+1:]
else:
raise KeyError("No handler for action of '%s'" % action)
the_query.jdata['orderby'] = existing_orderbys
the_query.compress_data()
config['DBSession'].add(the_query)
return HTTPFound(location="%s#orderby" % request.route_url("concision.query.overview", query_id=query_id))
def groupby(request):
request.do_not_log = True
action = request.params.get('action', 'add')
query_id = int(request.matchdict['query_id'])
the_query = config['DBSession'].query(StoredQuery).filter(StoredQuery.id == query_id).first()
data = the_query.extract_data()
if action == "add":
groupbys = []
if 'key' in request.params:
the_query.jdata['groupby_key'] = request.params['key']
for i, c in enumerate(data['columns']):
groupbys.append(request.params[str(i)])
elif action == "delete":
groupbys = []
if 'key' in request.params:
the_query.jdata['groupby_key'] = "-"
for i, c in enumerate(data['columns']):
groupbys.append("-")
else:
raise KeyError("No handler for action of '%s'" % action)
the_query.jdata['groupby'] = groupbys
the_query.compress_data()
config['DBSession'].add(the_query)
return HTTPFound(location="%s#groupby" % request.route_url("concision.query.overview", query_id=query_id))
def do_key(request):
request.do_not_log = True
query_id = int(request.matchdict['query_id'])
the_query = config['DBSession'].query(StoredQuery).filter(StoredQuery.id == query_id).first()
the_query.extract_data()
action = request.params['action']
if action == "add":
new_key = " ".join(filter(None, (
request.params['function0'],
request.params['function1'],
request.params['function2'],
request.params['key'],
)))
if new_key == "":
new_key = None
elif action == "delete":
new_key = None
if 'groupby_key' in the_query.jdata:
del(the_query.jdata['groupby_key'])
else:
raise KeyError("No handler for action of '%s'" % action)
the_query.jdata['key'] = new_key
the_query.compress_data()
config['DBSession'].add(the_query)
return HTTPFound(location="%s#graphing" % request.route_url("concision.query.overview", query_id=query_id))
def other(request):
request.do_not_log = True
query_id = int(request.matchdict['query_id'])
the_query = config['DBSession'].query(StoredQuery).filter(StoredQuery.id == query_id).first()
action = request.params['action']
if action == "edit":
the_query.name = request.params['query_name'].strip()
else:
raise KeyError("No handler for action of '%s'" % action)
config['DBSession'].add(the_query)
return HTTPFound(location="%s#other" % request.route_url("concision.query.overview", query_id=query_id))
|
|
#!/usr/bin/env python
import subprocess
import praw
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
# Each day after you post a signup post, copy its 6-character ID to this array.
signupPageSubmissionIds = [ '7zrrj1', '7zxkpq', '8055hn', '80ddrf', '80nbm1', '80waq3' ]
flaskport = 8993
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
# New version of praw does not require explicit use of the OAuth2Util object. Presumably because reddit now REQUIRES oauth.
# o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionsForRedditSession(redditSession):
# submissions = [redditSession.get_submission(submission_id=submissionId) for submissionId in signupPageSubmissionIds]
submissions = [redditSession.submission(id=submissionId) for submissionId in signupPageSubmissionIds]
for submission in submissions:
submission.comments.replace_more(limit=None)
# submission.replace_more_comments(limit=None, threshold=0)
return submissions
def getCommentsForSubmissions(submissions):
comments = []
for submission in submissions:
commentForest = submission.comments
comments += [comment for comment in commentForest.list() if comment.__class__ == praw.models.Comment]
return comments
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatesignups.html')
def moderatesignups():
global commentHashesAndComments
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submissions = getSubmissionsForRedditSession(redditSession)
flat_comments = getCommentsForSubmissions(submissions)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
for submission in submissions:
stringio.write(submission.title)
stringio.write("<br>\n")
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplayduringsignuptoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="Copy display-during-signup.py stdout to clipboard">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.fullname)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
# if ParticipantCollection().participantNamed(authorName).isStillIn:
# stringio.write(' <small><font color="green">(in)</font></small>')
# else:
# stringio.write(' <small><font color="red">(out)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Signup" style="color:white;background-color:green">')
# stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
# stringio.write('<input type="submit" name="actiontotake" value="Relapse">')
# stringio.write('<input type="submit" name="actiontotake" value="Reinstate">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
# stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Signup':
print "signup - " + username
subprocess.call(['./signup.py', username])
comment.upvote()
retireCommentHash(commentHash)
# if actionToTake == 'Signup and checkin':
# print "signup and checkin - " + username
# subprocess.call(['./signup-and-checkin.sh', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Relapse':
# print "relapse - " + username
# subprocess.call(['./relapse.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Reinstate':
# print "reinstate - " + username
# subprocess.call(['./reinstate.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
return Response("hello", mimetype='text/html')
@app.route('/copydisplayduringsignuptoclipboard.html', methods=["POST"])
def copydisplayduringsignuptoclipboard():
print "TODO: Copy display to clipboard"
subprocess.call(['./display-during-signup.py'])
return Response("hello", mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
|
|
"""The tests for the MQTT lock platform."""
from unittest.mock import patch
import pytest
from homeassistant.components.lock import (
DOMAIN as LOCK_DOMAIN,
SERVICE_LOCK,
SERVICE_OPEN,
SERVICE_UNLOCK,
STATE_LOCKED,
STATE_UNLOCKED,
SUPPORT_OPEN,
)
from homeassistant.components.mqtt.lock import MQTT_LOCK_ATTRIBUTES_BLOCKED
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
)
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_encoding_subscribable_topics,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_publishing_with_custom_encoding,
help_test_reloadable,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_setting_blocked_attribute_via_mqtt_json_message,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import async_fire_mqtt_message
DEFAULT_CONFIG = {
LOCK_DOMAIN: {"platform": "mqtt", "name": "test", "command_topic": "test-topic"}
}
async def test_controlling_state_via_topic(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
LOCK_DOMAIN,
{
LOCK_DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "LOCKED",
"state_unlocked": "UNLOCKED",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert not state.attributes.get(ATTR_ASSUMED_STATE)
assert not state.attributes.get(ATTR_SUPPORTED_FEATURES)
async_fire_mqtt_message(hass, "state-topic", "LOCKED")
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
async_fire_mqtt_message(hass, "state-topic", "UNLOCKED")
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
async def test_controlling_non_default_state_via_topic(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
LOCK_DOMAIN,
{
LOCK_DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "closed",
"state_unlocked": "open",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "closed")
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
async_fire_mqtt_message(hass, "state-topic", "open")
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
async def test_controlling_state_via_topic_and_json_message(hass, mqtt_mock):
"""Test the controlling state via topic and JSON message."""
assert await async_setup_component(
hass,
LOCK_DOMAIN,
{
LOCK_DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "LOCKED",
"state_unlocked": "UNLOCKED",
"value_template": "{{ value_json.val }}",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
async_fire_mqtt_message(hass, "state-topic", '{"val":"LOCKED"}')
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
async_fire_mqtt_message(hass, "state-topic", '{"val":"UNLOCKED"}')
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
async def test_controlling_non_default_state_via_topic_and_json_message(
hass, mqtt_mock
):
"""Test the controlling state via topic and JSON message."""
assert await async_setup_component(
hass,
LOCK_DOMAIN,
{
LOCK_DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "closed",
"state_unlocked": "open",
"value_template": "{{ value_json.val }}",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
async_fire_mqtt_message(hass, "state-topic", '{"val":"closed"}')
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
async_fire_mqtt_message(hass, "state-topic", '{"val":"open"}')
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
async def test_sending_mqtt_commands_and_optimistic(hass, mqtt_mock):
"""Test optimistic mode without state topic."""
assert await async_setup_component(
hass,
LOCK_DOMAIN,
{
LOCK_DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "LOCKED",
"state_unlocked": "UNLOCKED",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_LOCK, {ATTR_ENTITY_ID: "lock.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "LOCK", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_UNLOCK, {ATTR_ENTITY_ID: "lock.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "UNLOCK", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
async def test_sending_mqtt_commands_and_explicit_optimistic(hass, mqtt_mock):
"""Test optimistic mode without state topic."""
assert await async_setup_component(
hass,
LOCK_DOMAIN,
{
LOCK_DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "LOCKED",
"state_unlocked": "UNLOCKED",
"optimistic": True,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_LOCK, {ATTR_ENTITY_ID: "lock.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "LOCK", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_UNLOCK, {ATTR_ENTITY_ID: "lock.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "UNLOCK", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
async def test_sending_mqtt_commands_support_open_and_optimistic(hass, mqtt_mock):
"""Test open function of the lock without state topic."""
assert await async_setup_component(
hass,
LOCK_DOMAIN,
{
LOCK_DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"payload_open": "OPEN",
"state_locked": "LOCKED",
"state_unlocked": "UNLOCKED",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == SUPPORT_OPEN
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_LOCK, {ATTR_ENTITY_ID: "lock.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "LOCK", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_UNLOCK, {ATTR_ENTITY_ID: "lock.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "UNLOCK", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_OPEN, {ATTR_ENTITY_ID: "lock.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OPEN", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
async def test_sending_mqtt_commands_support_open_and_explicit_optimistic(
hass, mqtt_mock
):
"""Test open function of the lock without state topic."""
assert await async_setup_component(
hass,
LOCK_DOMAIN,
{
LOCK_DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"payload_open": "OPEN",
"state_locked": "LOCKED",
"state_unlocked": "UNLOCKED",
"optimistic": True,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == SUPPORT_OPEN
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_LOCK, {ATTR_ENTITY_ID: "lock.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "LOCK", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_UNLOCK, {ATTR_ENTITY_ID: "lock.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "UNLOCK", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_OPEN, {ATTR_ENTITY_ID: "lock.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OPEN", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG
)
async def test_setting_blocked_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_blocked_attribute_via_mqtt_json_message(
hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG, MQTT_LOCK_ATTRIBUTES_BLOCKED
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, LOCK_DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_json(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, LOCK_DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, LOCK_DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one lock per unique_id."""
config = {
LOCK_DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, LOCK_DOMAIN, config)
async def test_discovery_removal_lock(hass, mqtt_mock, caplog):
"""Test removal of discovered lock."""
data = '{ "name": "test",' ' "command_topic": "test_topic" }'
await help_test_discovery_removal(hass, mqtt_mock, caplog, LOCK_DOMAIN, data)
async def test_discovery_update_lock(hass, mqtt_mock, caplog):
"""Test update of discovered lock."""
config1 = {
"name": "Beer",
"state_topic": "test_topic",
"command_topic": "command_topic",
"availability_topic": "availability_topic1",
}
config2 = {
"name": "Milk",
"state_topic": "test_topic2",
"command_topic": "command_topic",
"availability_topic": "availability_topic2",
}
await help_test_discovery_update(
hass, mqtt_mock, caplog, LOCK_DOMAIN, config1, config2
)
async def test_discovery_update_unchanged_lock(hass, mqtt_mock, caplog):
"""Test update of discovered lock."""
data1 = (
'{ "name": "Beer",'
' "state_topic": "test_topic",'
' "command_topic": "command_topic" }'
)
with patch(
"homeassistant.components.mqtt.lock.MqttLock.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, LOCK_DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer" }'
data2 = '{ "name": "Milk",' ' "command_topic": "test_topic" }'
await help_test_discovery_broken(hass, mqtt_mock, caplog, LOCK_DOMAIN, data1, data2)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT lock device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT lock device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, LOCK_DOMAIN, DEFAULT_CONFIG
)
@pytest.mark.parametrize(
"service,topic,parameters,payload,template",
[
(
SERVICE_LOCK,
"command_topic",
None,
"LOCK",
None,
),
],
)
async def test_publishing_with_custom_encoding(
hass,
mqtt_mock,
caplog,
service,
topic,
parameters,
payload,
template,
):
"""Test publishing MQTT payload with different encoding."""
domain = LOCK_DOMAIN
config = DEFAULT_CONFIG[domain]
await help_test_publishing_with_custom_encoding(
hass,
mqtt_mock,
caplog,
domain,
config,
service,
topic,
parameters,
payload,
template,
)
async def test_reloadable(hass, mqtt_mock, caplog, tmp_path):
"""Test reloading the MQTT platform."""
domain = LOCK_DOMAIN
config = DEFAULT_CONFIG[domain]
await help_test_reloadable(hass, mqtt_mock, caplog, tmp_path, domain, config)
@pytest.mark.parametrize(
"topic,value,attribute,attribute_value",
[
("state_topic", "LOCKED", None, "locked"),
],
)
async def test_encoding_subscribable_topics(
hass, mqtt_mock, caplog, topic, value, attribute, attribute_value
):
"""Test handling of incoming encoded payload."""
await help_test_encoding_subscribable_topics(
hass,
mqtt_mock,
caplog,
LOCK_DOMAIN,
DEFAULT_CONFIG[LOCK_DOMAIN],
topic,
value,
attribute,
attribute_value,
)
|
|
# -*- coding: utf-8 -*-
import time
import datetime
import mock
from factory import SubFactory
from factory.fuzzy import FuzzyDateTime, FuzzyAttribute, FuzzyChoice
from mock import patch, Mock
import factory
import pytz
from factory.django import DjangoModelFactory
from django.utils import timezone
from django.db.utils import IntegrityError
from faker import Factory
from waffle.models import Flag, Sample, Switch
from website import settings
from website.notifications.constants import NOTIFICATION_TYPES
from osf.utils import permissions
from website.archiver import ARCHIVER_SUCCESS
from website.identifiers.utils import parse_identifiers
from website.settings import FAKE_EMAIL_NAME, FAKE_EMAIL_DOMAIN
from framework.auth.core import Auth
from osf import models
from osf.models.sanctions import Sanction
from osf.utils.names import impute_names_model
from osf.utils.workflows import DefaultStates, DefaultTriggers
from addons.osfstorage.models import OsfStorageFile
fake = Factory.create()
# If tests are run on really old processors without high precision this might fail. Unlikely to occur.
fake_email = lambda: '{}+{}@{}'.format(FAKE_EMAIL_NAME, int(time.clock() * 1000000), FAKE_EMAIL_DOMAIN)
def get_default_metaschema():
"""This needs to be a method so it gets called after the test database is set up"""
return models.MetaSchema.objects.first()
def FakeList(provider, n, *args, **kwargs):
func = getattr(fake, provider)
return [func(*args, **kwargs) for _ in range(n)]
class UserFactory(DjangoModelFactory):
# TODO: Change this to only generate long names and see what breaks
fullname = factory.Sequence(lambda n: 'Freddie Mercury{0}'.format(n))
username = factory.LazyFunction(fake_email)
password = factory.PostGenerationMethodCall('set_password',
'queenfan86')
is_registered = True
is_claimed = True
date_confirmed = factory.Faker('date_time_this_decade', tzinfo=pytz.utc)
merged_by = None
verification_key = None
class Meta:
model = models.OSFUser
@classmethod
def _build(cls, target_class, *args, **kwargs):
emails = kwargs.pop('emails', [])
instance = super(DjangoModelFactory, cls)._build(target_class, *args, **kwargs)
if emails:
# Save for M2M population
instance.set_unusable_password()
instance.save()
for email in emails:
instance.emails.create(address=email)
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
emails = kwargs.pop('emails', [])
instance = super(DjangoModelFactory, cls)._create(target_class, *args, **kwargs)
if emails and not instance.pk:
# Save for M2M population
instance.set_unusable_password()
instance.save()
for email in emails:
instance.emails.create(address=email)
return instance
@factory.post_generation
def set_names(self, create, extracted):
parsed = impute_names_model(self.fullname)
for key, value in parsed.items():
setattr(self, key, value)
if create:
self.save()
@factory.post_generation
def set_emails(self, create, extracted):
if not self.emails.filter(address=self.username).exists():
if not self.id:
if create:
# Perform implicit save to populate M2M
self.save()
else:
# This might lead to strange behavior
return
self.emails.create(address=str(self.username).lower())
class AuthUserFactory(UserFactory):
"""A user that automatically has an api key, for quick authentication.
Example: ::
user = AuthUserFactory()
res = self.app.get(url, auth=user.auth) # user is "logged in"
"""
@factory.post_generation
def add_auth(self, create, extracted):
self.auth = (self.username, 'queenfan86')
class AuthFactory(factory.base.Factory):
class Meta:
model = Auth
user = factory.SubFactory(UserFactory)
class UnregUserFactory(DjangoModelFactory):
email = factory.LazyFunction(fake_email)
fullname = factory.Sequence(lambda n: 'Freddie Mercury{0}'.format(n))
date_registered = factory.Faker('date_time', tzinfo=pytz.utc)
class Meta:
model = models.OSFUser
@classmethod
def _build(cls, target_class, *args, **kwargs):
'''Build an object without saving it.'''
ret = target_class.create_unregistered(email=kwargs.pop('email'), fullname=kwargs.pop('fullname'))
for key, val in kwargs.items():
setattr(ret, key, val)
return ret
@classmethod
def _create(cls, target_class, *args, **kwargs):
ret = target_class.create_unregistered(email=kwargs.pop('email'), fullname=kwargs.pop('fullname'))
for key, val in kwargs.items():
setattr(ret, key, val)
ret.save()
return ret
class UnconfirmedUserFactory(DjangoModelFactory):
"""Factory for a user that has not yet confirmed their primary email
address (username).
"""
class Meta:
model = models.OSFUser
username = factory.LazyFunction(fake_email)
fullname = factory.Sequence(lambda n: 'Freddie Mercury{0}'.format(n))
password = 'lolomglgt'
@classmethod
def _build(cls, target_class, username, password, fullname):
'''Build an object without saving it.'''
instance = target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
instance.date_registered = fake.date_time(tzinfo=pytz.utc)
return instance
@classmethod
def _create(cls, target_class, username, password, fullname):
instance = target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
instance.date_registered = fake.date_time(tzinfo=pytz.utc)
instance.save()
return instance
class BaseNodeFactory(DjangoModelFactory):
title = factory.Faker('catch_phrase')
description = factory.Faker('sentence')
created = factory.LazyFunction(timezone.now)
creator = factory.SubFactory(AuthUserFactory)
class Meta:
model = models.Node
class ProjectFactory(BaseNodeFactory):
category = 'project'
class ProjectWithAddonFactory(ProjectFactory):
"""Factory for a project that has an addon. The addon will be added to
both the Node and the creator records. ::
p = ProjectWithAddonFactory(addon='github')
p.get_addon('github') # => github node settings object
p.creator.get_addon('github') # => github user settings object
"""
# TODO: Should use mock addon objects
@classmethod
def _build(cls, target_class, addon='s3', *args, **kwargs):
'''Build an object without saving it.'''
instance = ProjectFactory._build(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
return instance
@classmethod
def _create(cls, target_class, addon='s3', *args, **kwargs):
instance = ProjectFactory._create(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
instance.save()
return instance
class NodeFactory(BaseNodeFactory):
category = 'hypothesis'
parent = factory.SubFactory(ProjectFactory)
class InstitutionFactory(DjangoModelFactory):
name = factory.Faker('company')
login_url = factory.Faker('url')
logout_url = factory.Faker('url')
domains = FakeList('url', n=3)
email_domains = FakeList('domain_name', n=1)
logo_name = factory.Faker('file_name')
class Meta:
model = models.Institution
class NodeLicenseRecordFactory(DjangoModelFactory):
year = factory.Faker('year')
copyright_holders = FakeList('name', n=3)
class Meta:
model = models.NodeLicenseRecord
@classmethod
def _create(cls, *args, **kwargs):
kwargs['node_license'] = kwargs.get(
'node_license',
models.NodeLicense.objects.get(name='No license')
)
return super(NodeLicenseRecordFactory, cls)._create(*args, **kwargs)
class NodeLogFactory(DjangoModelFactory):
class Meta:
model = models.NodeLog
action = 'file_added'
params = {'path': '/'}
user = SubFactory(UserFactory)
class PrivateLinkFactory(DjangoModelFactory):
class Meta:
model = models.PrivateLink
name = factory.Faker('word')
key = factory.Faker('md5')
anonymous = False
creator = factory.SubFactory(UserFactory)
class CollectionFactory(DjangoModelFactory):
class Meta:
model = models.Collection
is_bookmark_collection = False
title = factory.Faker('catch_phrase')
creator = factory.SubFactory(UserFactory)
class BookmarkCollectionFactory(CollectionFactory):
is_bookmark_collection = True
class RegistrationFactory(BaseNodeFactory):
creator = None
# Default project is created if not provided
category = 'project'
@classmethod
def _build(cls, target_class, *args, **kwargs):
raise Exception('Cannot build registration without saving.')
@classmethod
def _create(cls, target_class, project=None, is_public=False,
schema=None, data=None,
archive=False, embargo=None, registration_approval=None, retraction=None,
*args, **kwargs):
user = None
if project:
user = project.creator
user = kwargs.pop('user', None) or kwargs.get('creator') or user or UserFactory()
kwargs['creator'] = user
# Original project to be registered
project = project or target_class(*args, **kwargs)
if project.has_permission(user, 'admin'):
project.add_contributor(
contributor=user,
permissions=permissions.CREATOR_PERMISSIONS,
log=False,
save=False
)
project.save()
# Default registration parameters
schema = schema or get_default_metaschema()
data = data or {'some': 'data'}
auth = Auth(user=user)
register = lambda: project.register_node(
schema=schema,
auth=auth,
data=data
)
def add_approval_step(reg):
if embargo:
reg.embargo = embargo
elif registration_approval:
reg.registration_approval = registration_approval
elif retraction:
reg.retraction = retraction
else:
reg.require_approval(reg.creator)
reg.save()
reg.sanction.add_authorizer(reg.creator, reg)
reg.sanction.save()
with patch('framework.celery_tasks.handlers.enqueue_task'):
reg = register()
add_approval_step(reg)
if not archive:
with patch.object(reg.archive_job, 'archive_tree_finished', Mock(return_value=True)):
archive_job = reg.archive_job
archive_job.status = ARCHIVER_SUCCESS
archive_job.done = True
reg.sanction.state = Sanction.APPROVED
reg.sanction.save()
if is_public:
reg.is_public = True
reg.save()
return reg
class WithdrawnRegistrationFactory(BaseNodeFactory):
@classmethod
def _create(cls, *args, **kwargs):
registration = kwargs.pop('registration', None)
registration.is_public = True
user = kwargs.pop('user', registration.creator)
registration.retract_registration(user)
withdrawal = registration.retraction
token = withdrawal.approval_state.values()[0]['approval_token']
with patch('osf.models.AbstractNode.update_search'):
withdrawal.approve_retraction(user, token)
withdrawal.save()
return withdrawal
class SanctionFactory(DjangoModelFactory):
class Meta:
abstract = True
@classmethod
def _create(cls, target_class, initiated_by=None, approve=False, *args, **kwargs):
user = kwargs.pop('user', None) or UserFactory()
kwargs['initiated_by'] = initiated_by or user
sanction = super(SanctionFactory, cls)._create(target_class, *args, **kwargs)
reg_kwargs = {
'creator': user,
'user': user,
sanction.SHORT_NAME: sanction
}
RegistrationFactory(**reg_kwargs)
if not approve:
sanction.state = Sanction.UNAPPROVED
sanction.save()
return sanction
class RetractionFactory(SanctionFactory):
class Meta:
model = models.Retraction
user = factory.SubFactory(UserFactory)
class EmbargoFactory(SanctionFactory):
class Meta:
model = models.Embargo
user = factory.SubFactory(UserFactory)
class RegistrationApprovalFactory(SanctionFactory):
class Meta:
model = models.RegistrationApproval
user = factory.SubFactory(UserFactory)
class EmbargoTerminationApprovalFactory(DjangoModelFactory):
FACTORY_STRATEGY = factory.base.CREATE_STRATEGY
@classmethod
def create(cls, registration=None, user=None, embargo=None, *args, **kwargs):
if registration:
if not user:
user = registration.creator
else:
user = user or UserFactory()
if not embargo:
embargo = EmbargoFactory(state=models.Sanction.APPROVED, approve=True)
registration = embargo._get_registration()
else:
registration = RegistrationFactory(creator=user, user=user, embargo=embargo)
with mock.patch('osf.models.sanctions.TokenApprovableSanction.ask', mock.Mock()):
approval = registration.request_embargo_termination(Auth(user))
return approval
class DraftRegistrationFactory(DjangoModelFactory):
class Meta:
model = models.DraftRegistration
@classmethod
def _create(cls, *args, **kwargs):
branched_from = kwargs.get('branched_from')
initiator = kwargs.get('initiator')
registration_schema = kwargs.get('registration_schema')
registration_metadata = kwargs.get('registration_metadata')
if not branched_from:
project_params = {}
if initiator:
project_params['creator'] = initiator
branched_from = ProjectFactory(**project_params)
initiator = branched_from.creator
registration_schema = registration_schema or models.MetaSchema.objects.first()
registration_metadata = registration_metadata or {}
draft = models.DraftRegistration.create_from_node(
branched_from,
user=initiator,
schema=registration_schema,
data=registration_metadata,
)
return draft
class CommentFactory(DjangoModelFactory):
class Meta:
model = models.Comment
content = factory.Sequence(lambda n: 'Comment {0}'.format(n))
@classmethod
def _build(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or models.Guid.load(node._id)
content = kwargs.pop('content', None) or 'Test comment.'
instance = target_class(
node=node,
user=user,
target=target,
content=content,
*args, **kwargs
)
if isinstance(target.referent, target_class):
instance.root_target = target.referent.root_target
else:
instance.root_target = target
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or models.Guid.load(node._id)
content = kwargs.pop('content', None) or 'Test comment.'
instance = target_class(
node=node,
user=user,
target=target,
content=content,
*args, **kwargs
)
if isinstance(target.referent, target_class):
instance.root_target = target.referent.root_target
else:
instance.root_target = target
instance.save()
return instance
class SubjectFactory(DjangoModelFactory):
text = factory.Sequence(lambda n: 'Example Subject #{}'.format(n))
class Meta:
model = models.Subject
@classmethod
def _create(cls, target_class, parent=None, provider=None, bepress_subject=None, *args, **kwargs):
provider = provider or models.PreprintProvider.objects.first() or PreprintProviderFactory(_id='osf')
if provider._id != 'osf' and not bepress_subject:
osf = models.PreprintProvider.load('osf') or PreprintProviderFactory(_id='osf')
bepress_subject = SubjectFactory(provider=osf)
try:
ret = super(SubjectFactory, cls)._create(target_class, parent=parent, provider=provider, bepress_subject=bepress_subject, *args, **kwargs)
except IntegrityError:
ret = models.Subject.objects.get(text=kwargs['text'])
if parent:
ret.parent = parent
return ret
class PreprintProviderFactory(DjangoModelFactory):
name = factory.Faker('company')
description = factory.Faker('bs')
external_url = factory.Faker('url')
class Meta:
model = models.PreprintProvider
@classmethod
def _build(cls, target_class, *args, **kwargs):
instance = super(PreprintProviderFactory, cls)._build(target_class, *args, **kwargs)
if not instance.share_title:
instance.share_title = instance._id
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = super(PreprintProviderFactory, cls)._create(target_class, *args, **kwargs)
if not instance.share_title:
instance.share_title = instance._id
instance.save()
return instance
def sync_set_identifiers(preprint):
ezid_return_value = {
'response': {
'success': '{doi}osf.io/{guid} | {ark}osf.io/{guid}'.format(
doi=settings.DOI_NAMESPACE, ark=settings.ARK_NAMESPACE, guid=preprint._id
)
},
'already_exists': False
}
id_dict = parse_identifiers(ezid_return_value)
preprint.set_identifier_values(doi=id_dict['doi'])
class PreprintFactory(DjangoModelFactory):
class Meta:
model = models.PreprintService
doi = factory.Sequence(lambda n: '10.123/{}'.format(n))
provider = factory.SubFactory(PreprintProviderFactory)
@classmethod
def _build(cls, target_class, *args, **kwargs):
creator = kwargs.pop('creator', None) or UserFactory()
project = kwargs.pop('project', None) or ProjectFactory(creator=creator)
provider = kwargs.pop('provider', None) or PreprintProviderFactory()
instance = target_class(node=project, provider=provider)
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
update_task_patcher = mock.patch('website.preprints.tasks.on_preprint_updated.si')
update_task_patcher.start()
finish = kwargs.pop('finish', True)
is_published = kwargs.pop('is_published', True)
instance = cls._build(target_class, *args, **kwargs)
doi = kwargs.pop('doi', None)
license_details = kwargs.pop('license_details', None)
filename = kwargs.pop('filename', None) or 'preprint_file.txt'
subjects = kwargs.pop('subjects', None) or [[SubjectFactory()._id]]
instance.node.preprint_article_doi = doi
instance.machine_state = kwargs.pop('machine_state', 'initial')
user = kwargs.pop('creator', None) or instance.node.creator
if not instance.node.is_contributor(user):
instance.node.add_contributor(
contributor=user,
permissions=permissions.CREATOR_PERMISSIONS,
log=False,
save=True
)
preprint_file = OsfStorageFile.create(
node=instance.node,
path='/{}'.format(filename),
name=filename,
materialized_path='/{}'.format(filename))
preprint_file.save()
from addons.osfstorage import settings as osfstorage_settings
preprint_file.create_version(user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save()
if finish:
auth = Auth(user)
instance.set_primary_file(preprint_file, auth=auth, save=True)
instance.set_subjects(subjects, auth=auth)
if license_details:
instance.set_preprint_license(license_details, auth=auth)
create_task_patcher = mock.patch('website.preprints.tasks.get_and_set_preprint_identifiers.si')
mock_create_identifier = create_task_patcher.start()
if is_published:
mock_create_identifier.side_effect = sync_set_identifiers(instance)
instance.set_published(is_published, auth=auth)
create_task_patcher.stop()
if not instance.is_published:
instance.node._has_abandoned_preprint = True
instance.node.save()
instance.save()
return instance
class TagFactory(DjangoModelFactory):
class Meta:
model = models.Tag
name = factory.Faker('word')
system = False
class ApiOAuth2PersonalTokenFactory(DjangoModelFactory):
class Meta:
model = models.ApiOAuth2PersonalToken
owner = factory.SubFactory(UserFactory)
scopes = 'osf.full_write osf.full_read'
name = factory.Sequence(lambda n: 'Example OAuth2 Personal Token #{}'.format(n))
class ApiOAuth2ApplicationFactory(DjangoModelFactory):
class Meta:
model = models.ApiOAuth2Application
owner = factory.SubFactory(UserFactory)
name = factory.Sequence(lambda n: 'Example OAuth2 Application #{}'.format(n))
home_url = 'ftp://ftp.ncbi.nlm.nimh.gov/'
callback_url = 'http://example.uk'
class ForkFactory(DjangoModelFactory):
class Meta:
model = models.Node
@classmethod
def _create(cls, *args, **kwargs):
project = kwargs.pop('project', None)
user = kwargs.pop('user', project.creator)
title = kwargs.pop('title', None)
fork = project.fork_node(auth=Auth(user), title=title)
fork.save()
return fork
class IdentifierFactory(DjangoModelFactory):
class Meta:
model = models.Identifier
referent = factory.SubFactory(RegistrationFactory)
value = factory.Sequence(lambda n: 'carp:/2460{}'.format(n))
@classmethod
def _create(cls, *args, **kwargs):
kwargs['category'] = kwargs.get('category', 'carpid')
return super(IdentifierFactory, cls)._create(*args, **kwargs)
class NodeRelationFactory(DjangoModelFactory):
class Meta:
model = models.NodeRelation
child = factory.SubFactory(NodeFactory)
parent = factory.SubFactory(NodeFactory)
class ExternalAccountFactory(DjangoModelFactory):
class Meta:
model = models.ExternalAccount
oauth_key = 'some-silly-key'
oauth_secret = 'some-super-secret'
provider = 'mock2'
provider_id = factory.Sequence(lambda n: 'user-{0}'.format(n))
provider_name = 'Fake Provider'
display_name = factory.Sequence(lambda n: 'user-{0}'.format(n))
profile_url = 'http://wutwut.com/'
refresh_token = 'some-sillier-key'
class MockOAuth2Provider(models.ExternalProvider):
name = 'Mock OAuth 2.0 Provider'
short_name = 'mock2'
client_id = 'mock2_client_id'
client_secret = 'mock2_client_secret'
auth_url_base = 'https://mock2.com/auth'
callback_url = 'https://mock2.com/callback'
auto_refresh_url = 'https://mock2.com/callback'
refresh_time = 300
expiry_time = 9001
def handle_callback(self, response):
return {
'provider_id': 'mock_provider_id'
}
class NotificationSubscriptionFactory(DjangoModelFactory):
class Meta:
model = models.NotificationSubscription
def make_node_lineage():
node1 = NodeFactory()
node2 = NodeFactory(parent=node1)
node3 = NodeFactory(parent=node2)
node4 = NodeFactory(parent=node3)
return [node1._id, node2._id, node3._id, node4._id]
class NotificationDigestFactory(DjangoModelFactory):
timestamp = FuzzyDateTime(datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC))
node_lineage = FuzzyAttribute(fuzzer=make_node_lineage)
user = factory.SubFactory(UserFactory)
send_type = FuzzyChoice(choices=NOTIFICATION_TYPES.keys())
message = fake.text(max_nb_chars=2048)
event = fake.text(max_nb_chars=50)
class Meta:
model = models.NotificationDigest
class ConferenceFactory(DjangoModelFactory):
class Meta:
model = models.Conference
endpoint = factory.Sequence(lambda n: 'conference{0}'.format(n))
name = factory.Faker('catch_phrase')
active = True
is_meeting = True
@factory.post_generation
def admins(self, create, extracted, **kwargs):
self.admins = extracted or [UserFactory()]
class SessionFactory(DjangoModelFactory):
class Meta:
model = models.Session
@classmethod
def _build(cls, target_class, *args, **kwargs):
user = kwargs.pop('user', None)
instance = target_class(*args, **kwargs)
if user:
instance.data['auth_user_username'] = user.username
instance.data['auth_user_id'] = user._primary_key
instance.data['auth_user_fullname'] = user.fullname
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = cls._build(target_class, *args, **kwargs)
instance.save()
return instance
class ArchiveJobFactory(DjangoModelFactory):
class Meta:
model = models.ArchiveJob
class ReviewActionFactory(DjangoModelFactory):
class Meta:
model = models.ReviewAction
trigger = FuzzyChoice(choices=DefaultTriggers.values())
comment = factory.Faker('text')
from_state = FuzzyChoice(choices=DefaultStates.values())
to_state = FuzzyChoice(choices=DefaultStates.values())
target = factory.SubFactory(PreprintFactory)
creator = factory.SubFactory(AuthUserFactory)
is_deleted = False
class ScheduledBannerFactory(DjangoModelFactory):
# Banners are set for 24 hours from start_date if no end date is given
class Meta:
model = models.ScheduledBanner
name = factory.Faker('name')
default_alt_text = factory.Faker('text')
mobile_alt_text = factory.Faker('text')
default_photo = factory.Faker('file_name')
mobile_photo = factory.Faker('file_name')
license = factory.Faker('name')
color = 'white'
start_date = timezone.now()
end_date = factory.LazyAttribute(lambda o: o.start_date)
class FlagFactory(DjangoModelFactory):
name = factory.Faker('catch_phrase')
everyone = True
note = 'This is a waffle test flag'
class Meta:
model = Flag
class SampleFactory(DjangoModelFactory):
name = factory.Faker('catch_phrase')
percent = 100
note = 'This is a waffle test sample'
class Meta:
model = Sample
class SwitchFactory(DjangoModelFactory):
name = factory.Faker('catch_phrase')
active = True
note = 'This is a waffle test switch'
class Meta:
model = Switch
class NodeRequestFactory(DjangoModelFactory):
class Meta:
model = models.NodeRequest
comment = factory.Faker('text')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.