gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# stdlib
import re
import time
# 3p
import pymongo
# project
from checks import AgentCheck
from util import get_hostname
DEFAULT_TIMEOUT = 30
GAUGE = AgentCheck.gauge
RATE = AgentCheck.rate
class MongoDb(AgentCheck):
SERVICE_CHECK_NAME = 'mongodb.can_connect'
SOURCE_TYPE_NAME = 'mongodb'
# METRIC LIST DEFINITION
#
# Format
# ------
# metric_name -> (metric_type, alias)
# or
# metric_name -> metric_type *
# * by default MongoDB metrics are reported under their original metric names
"""
Core metrics collected by default.
"""
BASE_METRICS = {
"asserts.msg": RATE,
"asserts.regular": RATE,
"asserts.rollovers": RATE,
"asserts.user": RATE,
"asserts.warning": RATE,
"backgroundFlushing.average_ms": GAUGE,
"backgroundFlushing.flushes": RATE,
"backgroundFlushing.last_ms": GAUGE,
"backgroundFlushing.total_ms": GAUGE,
"connections.available": GAUGE,
"connections.current": GAUGE,
"connections.totalCreated": GAUGE,
"cursors.timedOut": GAUGE,
"cursors.totalOpen": GAUGE,
"extra_info.heap_usage_bytes": RATE,
"extra_info.page_faults": RATE,
"globalLock.activeClients.readers": GAUGE,
"globalLock.activeClients.total": GAUGE,
"globalLock.activeClients.writers": GAUGE,
"globalLock.currentQueue.readers": GAUGE,
"globalLock.currentQueue.total": GAUGE,
"globalLock.currentQueue.writers": GAUGE,
"globalLock.lockTime": GAUGE,
"globalLock.ratio": GAUGE, # < 2.2
"globalLock.totalTime": GAUGE,
"indexCounters.accesses": RATE,
"indexCounters.btree.accesses": RATE, # < 2.4
"indexCounters.btree.hits": RATE, # < 2.4
"indexCounters.btree.misses": RATE, # < 2.4
"indexCounters.btree.missRatio": GAUGE, # < 2.4
"indexCounters.hits": RATE,
"indexCounters.misses": RATE,
"indexCounters.missRatio": GAUGE,
"indexCounters.resets": RATE,
"mem.bits": GAUGE,
"mem.mapped": GAUGE,
"mem.mappedWithJournal": GAUGE,
"mem.resident": GAUGE,
"mem.virtual": GAUGE,
"metrics.cursor.open.noTimeout": GAUGE,
"metrics.cursor.open.pinned": GAUGE,
"metrics.cursor.open.total": GAUGE,
"metrics.cursor.timedOut": RATE,
"metrics.document.deleted": RATE,
"metrics.document.inserted": RATE,
"metrics.document.returned": RATE,
"metrics.document.updated": RATE,
"metrics.getLastError.wtime.num": RATE,
"metrics.getLastError.wtime.totalMillis": RATE,
"metrics.getLastError.wtimeouts": RATE,
"metrics.operation.fastmod": RATE,
"metrics.operation.idhack": RATE,
"metrics.operation.scanAndOrder": RATE,
"metrics.operation.writeConflicts": RATE,
"metrics.queryExecutor.scanned": RATE,
"metrics.record.moves": RATE,
"metrics.repl.apply.batches.num": RATE,
"metrics.repl.apply.batches.totalMillis": RATE,
"metrics.repl.apply.ops": RATE,
"metrics.repl.buffer.count": GAUGE,
"metrics.repl.buffer.maxSizeBytes": GAUGE,
"metrics.repl.buffer.sizeBytes": GAUGE,
"metrics.repl.network.bytes": RATE,
"metrics.repl.network.getmores.num": RATE,
"metrics.repl.network.getmores.totalMillis": RATE,
"metrics.repl.network.ops": RATE,
"metrics.repl.network.readersCreated": RATE,
"metrics.repl.oplog.insert.num": RATE,
"metrics.repl.oplog.insert.totalMillis": RATE,
"metrics.repl.oplog.insertBytes": RATE,
"metrics.repl.preload.docs.num": RATE,
"metrics.repl.preload.docs.totalMillis": RATE,
"metrics.repl.preload.indexes.num": RATE,
"metrics.repl.preload.indexes.totalMillis": RATE,
"metrics.repl.storage.freelist.search.bucketExhausted": RATE,
"metrics.repl.storage.freelist.search.requests": RATE,
"metrics.repl.storage.freelist.search.scanned": RATE,
"metrics.ttl.deletedDocuments": RATE,
"metrics.ttl.passes": RATE,
"network.bytesIn": RATE,
"network.bytesOut": RATE,
"network.numRequests": RATE,
"opcounters.command": RATE,
"opcounters.delete": RATE,
"opcounters.getmore": RATE,
"opcounters.insert": RATE,
"opcounters.query": RATE,
"opcounters.update": RATE,
"opcountersRepl.command": RATE,
"opcountersRepl.delete": RATE,
"opcountersRepl.getmore": RATE,
"opcountersRepl.insert": RATE,
"opcountersRepl.query": RATE,
"opcountersRepl.update": RATE,
"replSet.health": GAUGE,
"replSet.replicationLag": GAUGE,
"replSet.state": GAUGE,
"stats.avgObjSize": GAUGE,
"stats.collections": GAUGE,
"stats.dataSize": GAUGE,
"stats.fileSize": GAUGE,
"stats.indexes": GAUGE,
"stats.indexSize": GAUGE,
"stats.nsSizeMB": GAUGE,
"stats.numExtents": GAUGE,
"stats.objects": GAUGE,
"stats.storageSize": GAUGE,
"uptime": GAUGE,
}
"""
Journaling-related operations and performance report.
https://docs.mongodb.org/manual/reference/command/serverStatus/#serverStatus.dur
"""
DURABILITY_METRICS = {
"dur.commits": GAUGE,
"dur.commitsInWriteLock": GAUGE,
"dur.compression": GAUGE,
"dur.earlyCommits": GAUGE,
"dur.journaledMB": GAUGE,
"dur.timeMs.dt": GAUGE,
"dur.timeMs.prepLogBuffer": GAUGE,
"dur.timeMs.remapPrivateView": GAUGE,
"dur.timeMs.writeToDataFiles": GAUGE,
"dur.timeMs.writeToJournal": GAUGE,
"dur.writeToDataFilesMB": GAUGE,
# Required version > 3.0.0
"dur.timeMs.commits": GAUGE,
"dur.timeMs.commitsInWriteLock": GAUGE,
}
"""
ServerStatus use of database commands report.
Required version > 3.0.0.
https://docs.mongodb.org/manual/reference/command/serverStatus/#serverStatus.metrics.commands
"""
COMMANDS_METRICS = {
# Required version >
"metrics.commands.count.failed": RATE,
"metrics.commands.count.total": GAUGE,
"metrics.commands.createIndexes.failed": RATE,
"metrics.commands.createIndexes.total": GAUGE,
"metrics.commands.delete.failed": RATE,
"metrics.commands.delete.total": GAUGE,
"metrics.commands.eval.failed": RATE,
"metrics.commands.eval.total": GAUGE,
"metrics.commands.findAndModify.failed": RATE,
"metrics.commands.findAndModify.total": GAUGE,
"metrics.commands.insert.failed": RATE,
"metrics.commands.insert.total": GAUGE,
"metrics.commands.update.failed": RATE,
"metrics.commands.update.total": GAUGE,
}
"""
ServerStatus locks report.
Required version > 3.0.0.
https://docs.mongodb.org/manual/reference/command/serverStatus/#server-status-locks
"""
LOCKS_METRICS = {
"locks.Collection.acquireCount.R": RATE,
"locks.Collection.acquireCount.r": RATE,
"locks.Collection.acquireCount.W": RATE,
"locks.Collection.acquireCount.w": RATE,
"locks.Collection.acquireWaitCount.R": RATE,
"locks.Collection.acquireWaitCount.W": RATE,
"locks.Collection.timeAcquiringMicros.R": RATE,
"locks.Collection.timeAcquiringMicros.W": RATE,
"locks.Database.acquireCount.r": RATE,
"locks.Database.acquireCount.R": RATE,
"locks.Database.acquireCount.w": RATE,
"locks.Database.acquireCount.W": RATE,
"locks.Database.acquireWaitCount.r": RATE,
"locks.Database.acquireWaitCount.R": RATE,
"locks.Database.acquireWaitCount.w": RATE,
"locks.Database.acquireWaitCount.W": RATE,
"locks.Database.timeAcquiringMicros.r": RATE,
"locks.Database.timeAcquiringMicros.R": RATE,
"locks.Database.timeAcquiringMicros.w": RATE,
"locks.Database.timeAcquiringMicros.W": RATE,
"locks.Global.acquireCount.r": RATE,
"locks.Global.acquireCount.R": RATE,
"locks.Global.acquireCount.w": RATE,
"locks.Global.acquireCount.W": RATE,
"locks.Global.acquireWaitCount.r": RATE,
"locks.Global.acquireWaitCount.R": RATE,
"locks.Global.acquireWaitCount.w": RATE,
"locks.Global.acquireWaitCount.W": RATE,
"locks.Global.timeAcquiringMicros.r": RATE,
"locks.Global.timeAcquiringMicros.R": RATE,
"locks.Global.timeAcquiringMicros.w": RATE,
"locks.Global.timeAcquiringMicros.W": RATE,
"locks.Metadata.acquireCount.R": RATE,
"locks.Metadata.acquireCount.W": RATE,
"locks.MMAPV1Journal.acquireCount.r": RATE,
"locks.MMAPV1Journal.acquireCount.w": RATE,
"locks.MMAPV1Journal.acquireWaitCount.r": RATE,
"locks.MMAPV1Journal.acquireWaitCount.w": RATE,
"locks.MMAPV1Journal.timeAcquiringMicros.r": RATE,
"locks.MMAPV1Journal.timeAcquiringMicros.w": RATE,
"locks.oplog.acquireCount.R": RATE,
"locks.oplog.acquireCount.w": RATE,
"locks.oplog.acquireWaitCount.R": RATE,
"locks.oplog.acquireWaitCount.w": RATE,
"locks.oplog.timeAcquiringMicros.R": RATE,
"locks.oplog.timeAcquiringMicros.w": RATE,
}
"""
TCMalloc memory allocator report.
"""
TCMALLOC_METRICS = {
"tcmalloc.generic.current_allocated_bytes": GAUGE,
"tcmalloc.generic.heap_size": GAUGE,
"tcmalloc.tcmalloc.aggressive_memory_decommit": GAUGE,
"tcmalloc.tcmalloc.central_cache_free_bytes": GAUGE,
"tcmalloc.tcmalloc.current_total_thread_cache_bytes": GAUGE,
"tcmalloc.tcmalloc.max_total_thread_cache_bytes": GAUGE,
"tcmalloc.tcmalloc.pageheap_free_bytes": GAUGE,
"tcmalloc.tcmalloc.pageheap_unmapped_bytes": GAUGE,
"tcmalloc.tcmalloc.thread_cache_free_bytes": GAUGE,
"tcmalloc.tcmalloc.transfer_cache_free_bytes": GAUGE,
}
"""
WiredTiger storage engine.
"""
WIREDTIGER_METRICS = {
"wiredTiger.cache.bytes currently in the cache": (GAUGE, "wiredTiger.cache.bytes_currently_in_cache"), # noqa
"wiredTiger.cache.failed eviction of pages that exceeded the in-memory maximum": (RATE, "wiredTiger.cache.failed_eviction_of_pages_exceeding_the_in-memory_maximum"), # noqa
"wiredTiger.cache.in-memory page splits": GAUGE,
"wiredTiger.cache.maximum bytes configured": GAUGE,
"wiredTiger.cache.maximum page size at eviction": GAUGE,
"wiredTiger.cache.pages currently held in the cache": (GAUGE, "wiredTiger.cache.pages_currently_held_in_cache"), # noqa
"wiredTiger.cache.pages evicted because they exceeded the in-memory maximum": (RATE, "wiredTiger.cache.pages_evicted_exceeding_the_in-memory_maximum"), # noqa
"wiredTiger.cache.pages evicted by application threads": RATE,
"wiredTiger.concurrentTransactions.read.available": GAUGE,
"wiredTiger.concurrentTransactions.read.out": GAUGE,
"wiredTiger.concurrentTransactions.read.totalTickets": GAUGE,
"wiredTiger.concurrentTransactions.write.available": GAUGE,
"wiredTiger.concurrentTransactions.write.out": GAUGE,
"wiredTiger.concurrentTransactions.write.totalTickets": GAUGE,
}
"""
Usage statistics for each collection.
https://docs.mongodb.org/v3.0/reference/command/top/
"""
TOP_METRICS = {
"commands.count": GAUGE,
"commands.time": GAUGE,
"getmore.count": GAUGE,
"getmore.time": GAUGE,
"insert.count": GAUGE,
"insert.time": GAUGE,
"queries.count": GAUGE,
"queries.time": GAUGE,
"readLock.count": GAUGE,
"readLock.time": GAUGE,
"remove.count": GAUGE,
"remove.time": GAUGE,
"total.count": GAUGE,
"total.time": GAUGE,
"update.count": GAUGE,
"update.time": GAUGE,
"writeLock.count": GAUGE,
"writeLock.time": GAUGE,
}
"""
Mapping for case-sensitive metric name suffixes.
https://docs.mongodb.org/manual/reference/command/serverStatus/#server-status-locks
"""
CASE_SENSITIVE_METRIC_NAME_SUFFIXES = {
'\.R\\b': ".shared",
'\.r\\b': ".intent_shared",
'\.W\\b': ".exclusive",
'\.w\\b': ".intent_exclusive",
}
"""
Associates with the metric list to collect.
"""
AVAILABLE_METRICS = {
'durability': DURABILITY_METRICS,
'locks': LOCKS_METRICS,
'metrics.commands': COMMANDS_METRICS,
'tcmalloc': TCMALLOC_METRICS,
'wiredtiger': WIREDTIGER_METRICS,
'top': TOP_METRICS,
}
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self._last_state_by_server = {}
self.metrics_to_collect_by_instance = {}
def get_library_versions(self):
return {"pymongo": pymongo.version}
def check_last_state(self, state, clean_server_name, agentConfig):
if self._last_state_by_server.get(clean_server_name, -1) != state:
self._last_state_by_server[clean_server_name] = state
return self.create_event(state, clean_server_name, agentConfig)
def create_event(self, state, clean_server_name, agentConfig):
"""Create an event with a message describing the replication
state of a mongo node"""
def get_state_description(state):
if state == 0:
return 'Starting Up'
elif state == 1:
return 'Primary'
elif state == 2:
return 'Secondary'
elif state == 3:
return 'Recovering'
elif state == 4:
return 'Fatal'
elif state == 5:
return 'Starting up (forking threads)'
elif state == 6:
return 'Unknown'
elif state == 7:
return 'Arbiter'
elif state == 8:
return 'Down'
elif state == 9:
return 'Rollback'
status = get_state_description(state)
hostname = get_hostname(agentConfig)
msg_title = "%s is %s" % (clean_server_name, status)
msg = "MongoDB %s just reported as %s" % (clean_server_name, status)
self.event({
'timestamp': int(time.time()),
'event_type': 'Mongo',
'api_key': agentConfig.get('api_key', ''),
'msg_title': msg_title,
'msg_text': msg,
'host': hostname
})
def _build_metric_list_to_collect(self, additional_metrics):
"""
Build the metric list to collect based on the instance preferences.
"""
metrics_to_collect = {}
# Defaut metrics
metrics_to_collect.update(self.BASE_METRICS)
# Additional metrics metrics
for option in additional_metrics:
additional_metrics = self.AVAILABLE_METRICS.get(option)
if not additional_metrics:
self.log.warning(
u"Failed to extend the list of metrics to collect:"
" unrecognized {option} option".format(
option=option
)
)
continue
self.log.debug(
u"Adding `{option}` corresponding metrics to the list"
" of metrics to collect.".format(
option=option
)
)
metrics_to_collect.update(additional_metrics)
return metrics_to_collect
def _get_metrics_to_collect(self, instance_key, additional_metrics):
"""
Return and cache the list of metrics to collect.
"""
if instance_key not in self.metrics_to_collect_by_instance:
self.metrics_to_collect_by_instance[instance_key] = \
self._build_metric_list_to_collect(additional_metrics)
return self.metrics_to_collect_by_instance[instance_key]
def _resolve_metric(self, original_metric_name, metrics_to_collect, prefix=""):
"""
Return the submit method and the metric name to use.
The metric name is defined as follow:
* If available, the normalized metric name alias
* (Or) the normalized original metric name
"""
submit_method = metrics_to_collect[original_metric_name][0] \
if isinstance(metrics_to_collect[original_metric_name], tuple) \
else metrics_to_collect[original_metric_name]
metric_name = metrics_to_collect[original_metric_name][1] \
if isinstance(metrics_to_collect[original_metric_name], tuple) \
else original_metric_name
return submit_method, self.normalize(metric_name, submit_method, prefix)
def normalize(self, metric_name, submit_method, prefix):
"""
Replace case-sensitive metric name characters, normalize the metric name,
prefix and suffix according to its type.
"""
metric_prefix = "mongodb." if not prefix else "mongodb.{0}.".format(prefix)
metric_suffix = "ps" if submit_method == RATE else ""
# Replace case-sensitive metric name characters
for pattern, repl in self.CASE_SENSITIVE_METRIC_NAME_SUFFIXES.iteritems():
metric_name = re.compile(pattern).sub(repl, metric_name)
# Normalize, and wrap
return u"{metric_prefix}{normalized_metric_name}{metric_suffix}".format(
normalized_metric_name=super(MongoDb, self).normalize(metric_name.lower()),
metric_prefix=metric_prefix, metric_suffix=metric_suffix
)
def check(self, instance):
"""
Returns a dictionary that looks a lot like what's sent back by
db.serverStatus()
"""
if 'server' not in instance:
raise Exception("Missing 'server' in mongo config")
server = instance['server']
ssl_params = {
'ssl': instance.get('ssl', None),
'ssl_keyfile': instance.get('ssl_keyfile', None),
'ssl_certfile': instance.get('ssl_certfile', None),
'ssl_cert_reqs': instance.get('ssl_cert_reqs', None),
'ssl_ca_certs': instance.get('ssl_ca_certs', None)
}
for key, param in ssl_params.items():
if param is None:
del ssl_params[key]
# Configuration a URL, mongodb://user:pass@server/db
parsed = pymongo.uri_parser.parse_uri(server)
username = parsed.get('username')
password = parsed.get('password')
db_name = parsed.get('database')
clean_server_name = server.replace(password, "*" * 5) if password is not None else server
additional_metrics = instance.get('additional_metrics', [])
tags = instance.get('tags', [])
tags.append('server:%s' % clean_server_name)
# Get the list of metrics to collect
collect_tcmalloc_metrics = 'tcmalloc' in additional_metrics
metrics_to_collect = self._get_metrics_to_collect(
server,
additional_metrics
)
# de-dupe tags to avoid a memory leak
tags = list(set(tags))
if not db_name:
self.log.info('No MongoDB database found in URI. Defaulting to admin.')
db_name = 'admin'
service_check_tags = [
"db:%s" % db_name
]
nodelist = parsed.get('nodelist')
if nodelist:
host = nodelist[0][0]
port = nodelist[0][1]
service_check_tags = service_check_tags + [
"host:%s" % host,
"port:%s" % port
]
do_auth = True
if username is None or password is None:
self.log.debug("Mongo: cannot extract username and password from config %s" % server)
do_auth = False
timeout = float(instance.get('timeout', DEFAULT_TIMEOUT)) * 1000
try:
cli = pymongo.mongo_client.MongoClient(
server,
socketTimeoutMS=timeout,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED,
**ssl_params)
# some commands can only go against the admin DB
admindb = cli['admin']
db = cli[db_name]
except Exception:
self.service_check(
self.SERVICE_CHECK_NAME,
AgentCheck.CRITICAL,
tags=service_check_tags)
raise
if do_auth and not db.authenticate(username, password):
message = "Mongo: cannot connect with config %s" % server
self.service_check(
self.SERVICE_CHECK_NAME,
AgentCheck.CRITICAL,
tags=service_check_tags,
message=message)
raise Exception(message)
self.service_check(
self.SERVICE_CHECK_NAME,
AgentCheck.OK,
tags=service_check_tags)
status = db.command('serverStatus', tcmalloc=collect_tcmalloc_metrics)
if status['ok'] == 0:
raise Exception(status['errmsg'].__str__())
status['stats'] = db.command('dbstats')
dbstats = {}
dbstats[db_name] = {'stats': status['stats']}
# Handle replica data, if any
# See
# http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus # noqa
try:
data = {}
dbnames = []
replSet = admindb.command('replSetGetStatus')
if replSet:
primary = None
current = None
# need a new connection to deal with replica sets
setname = replSet.get('set')
cli = pymongo.mongo_client.MongoClient(
server,
socketTimeoutMS=timeout,
replicaset=setname,
read_preference=pymongo.ReadPreference.NEAREST,
**ssl_params)
db = cli[db_name]
if do_auth and not db.authenticate(username, password):
message = ("Mongo: cannot connect with config %s" % server)
self.service_check(
self.SERVICE_CHECK_NAME,
AgentCheck.CRITICAL,
tags=service_check_tags,
message=message)
raise Exception(message)
# find nodes: master and current node (ourself)
for member in replSet.get('members'):
if member.get('self'):
current = member
if int(member.get('state')) == 1:
primary = member
# If we have both we can compute a lag time
if current is not None and primary is not None:
lag = primary['optimeDate'] - current['optimeDate']
# Python 2.7 has this built in, python < 2.7 don't...
if hasattr(lag, 'total_seconds'):
data['replicationLag'] = lag.total_seconds()
else:
data['replicationLag'] = (
lag.microseconds +
(lag.seconds + lag.days * 24 * 3600) * 10**6
) / 10.0**6
if current is not None:
data['health'] = current['health']
data['state'] = replSet['myState']
self.check_last_state(
data['state'],
clean_server_name,
self.agentConfig)
status['replSet'] = data
except Exception as e:
if "OperationFailure" in repr(e) and "replSetGetStatus" in str(e):
pass
else:
raise e
# If these keys exist, remove them for now as they cannot be serialized
try:
status['backgroundFlushing'].pop('last_finished')
except KeyError:
pass
try:
status.pop('localTime')
except KeyError:
pass
dbnames = cli.database_names()
for db_n in dbnames:
db_aux = cli[db_n]
dbstats[db_n] = {'stats': db_aux.command('dbstats')}
# Go through the metrics and save the values
for metric_name in metrics_to_collect:
# each metric is of the form: x.y.z with z optional
# and can be found at status[x][y][z]
value = status
if metric_name.startswith('stats'):
continue
else:
try:
for c in metric_name.split("."):
value = value[c]
except KeyError:
continue
# value is now status[x][y][z]
if not isinstance(value, (int, long, float)):
raise TypeError(
u"{0} value is a {1}, it should be an int, a float or a long instead."
.format(metric_name, type(value)))
# Submit the metric
submit_method, metric_name_alias = self._resolve_metric(metric_name, metrics_to_collect)
submit_method(self, metric_name_alias, value, tags=tags)
for st, value in dbstats.iteritems():
for metric_name in metrics_to_collect:
if not metric_name.startswith('stats.'):
continue
try:
val = value['stats'][metric_name.split('.')[1]]
except KeyError:
continue
# value is now status[x][y][z]
if not isinstance(val, (int, long, float)):
raise TypeError(
u"{0} value is a {1}, it should be an int, a float or a long instead."
.format(metric_name, type(val))
)
# Submit the metric
submit_method, metric_name_alias = \
self._resolve_metric(metric_name, metrics_to_collect)
metrics_tags = tags + ['cluster:db:%s' % st]
submit_method(self, metric_name_alias, val, tags=metrics_tags)
# Report the usage metrics for dbs/collections
if 'top' in additional_metrics:
try:
dbtop = db.command('top')
for ns, ns_metrics in dbtop['totals'].iteritems():
if "." not in ns:
continue
# configure tags for db name and collection name
dbname, collname = ns.split(".", 1)
ns_tags = tags + ["db:%s" % dbname, "collection:%s" % collname]
# iterate over DBTOP metrics
for m in self.TOP_METRICS:
# each metric is of the form: x.y.z with z optional
# and can be found at ns_metrics[x][y][z]
value = ns_metrics
try:
for c in m.split("."):
value = value[c]
except Exception:
continue
# value is now status[x][y][z]
if not isinstance(value, (int, long, float)):
raise TypeError(
u"{0} value is a {1}, it should be an int, a float or a long instead."
.format(m, type(value))
)
# Submit the metric
submit_method, metric_name_alias = \
self._resolve_metric(m, metrics_to_collect, prefix="usage")
submit_method(self, metric_name_alias, value, tags=ns_tags)
except Exception, e:
self.log.warning('Failed to record `top` metrics %s' % str(e))
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python ops defined in sparse_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import googletest
class SparseToIndicatorTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self, dtype):
ind = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return ops.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x3x4(self, dtype):
# Includes two entries with the form [1, 1, x] : 150.
ind = np.array([
[0, 0, 1],
[0, 1, 0],
[0, 1, 2],
[1, 0, 3],
[1, 1, 0],
[1, 1, 1],
[1, 1, 2],
[1, 2, 2]])
val = np.array([1, 10, 12, 103, 150, 149, 150, 122])
shape = np.array([2, 3, 4])
return ops.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
def testInt32(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6(dtypes.int32)
output = sparse_ops.sparse_to_indicator(sp_input, 50).eval()
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = ((0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33))
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testInt64(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6(dtypes.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 50).eval()
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = [(0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testHigherRank(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_2x3x4(dtypes.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 200).eval()
expected_output = np.zeros((2, 3, 200), dtype=np.bool)
expected_trues = [(0, 0, 1), (0, 1, 10), (0, 1, 12),
(1, 0, 103), (1, 1, 149), (1, 1, 150),
(1, 2, 122)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
class SparseMergeTest(test_util.TensorFlowTestCase):
def _SparseTensor_3x50(self, indices_dtype, values_dtype):
ind = np.array([
[0, 0],
[1, 0], [1, 1], [1, 2],
[2, 0], [2, 1]])
# NB: these are not sorted
indices = np.array([0, 13, 10, 14, 32, 33])
values = np.array([-3, 4, 1, 1, 5, 9])
shape = np.array([3, 3])
indices = ops.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(indices, indices_dtype),
constant_op.constant(shape, dtypes.int64))
values = ops.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(values, values_dtype),
constant_op.constant(shape, dtypes.int64))
return indices, values
def testInt32AndFloat32(self):
vocab_size = 50
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(dtypes.int32, dtypes.float32)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self.assertAllEqual(
output.indices,
[[0, 0], [1, 10], [1, 13], [1, 14], [2, 32], [2, 33]])
self.assertAllEqual(
output.values,
[-3, 1, 4, 1, 5, 9])
self.assertAllEqual(
output.shape,
[3, vocab_size])
def testInt64AndFloat32(self):
vocab_size = 50
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(dtypes.int64, dtypes.float32)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self.assertAllEqual(
output.indices,
[[0, 0], [1, 10], [1, 13], [1, 14], [2, 32], [2, 33]])
self.assertAllEqual(
output.values,
[-3, 1, 4, 1, 5, 9])
self.assertAllEqual(
output.shape,
[3, vocab_size])
def testInt64AndFloat64(self):
vocab_size = 50
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(dtypes.int64, dtypes.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self.assertAllEqual(
output.indices,
[[0, 0], [1, 10], [1, 13], [1, 14], [2, 32], [2, 33]])
self.assertAllEqual(
output.values,
[-3, 1, 4, 1, 5, 9])
self.assertAllEqual(
output.shape,
[3, vocab_size])
class SparseRetainTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self):
ind = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return ops.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
def testBasic(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_5x6()
to_retain = np.array([1, 0, 0, 1, 1, 0], dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0], [1, 4], [3, 2]])
self.assertAllEqual(output.values, [0, 14, 32])
self.assertAllEqual(output.shape, [5, 6])
def testRetainNone(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_5x6()
to_retain = np.zeros((6,), dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, np.array([]).reshape((0, 2)))
self.assertAllEqual(output.values, [])
self.assertAllEqual(output.shape, [5, 6])
def testMismatchedRetainShape(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6()
to_retain = np.array([1, 0, 0, 1, 0], dtype=np.bool)
with self.assertRaises(ValueError):
sparse_ops.sparse_retain(sp_input, to_retain)
class SparseFillEmptyRowsTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self):
ind = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return ops.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_String5x6(self):
ind = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]])
val = np.array(["a", "b", "c", "d", "e", "f"])
shape = np.array([5, 6])
return ops.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.string),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4]])
val = np.array([0, 10, 13, 14])
shape = np.array([2, 6])
return ops.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
def testFillNumber(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_5x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
def testFillString(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_String5x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, ""))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values,
[b"a", b"b", b"c", b"d", b"", b"e", b"f", b""])
self.assertAllEqual(output.shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
def testNoEmptyRows(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4]])
self.assertAllEqual(output.values, [0, 10, 13, 14])
self.assertAllEqual(output.shape, [2, 6])
self.assertAllEqual(empty_row_indicator_out, np.zeros(2).astype(np.bool))
if __name__ == "__main__":
googletest.main()
|
|
"""Support for UpCloud."""
import dataclasses
from datetime import timedelta
import logging
from typing import Dict, List
import requests.exceptions
import upcloud_api
import voluptuous as vol
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
STATE_OFF,
STATE_ON,
STATE_PROBLEM,
)
from homeassistant.core import CALLBACK_TYPE
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import CONFIG_ENTRY_UPDATE_SIGNAL_TEMPLATE, DEFAULT_SCAN_INTERVAL, DOMAIN
_LOGGER = logging.getLogger(__name__)
ATTR_CORE_NUMBER = "core_number"
ATTR_HOSTNAME = "hostname"
ATTR_MEMORY_AMOUNT = "memory_amount"
ATTR_STATE = "state"
ATTR_TITLE = "title"
ATTR_UUID = "uuid"
ATTR_ZONE = "zone"
CONF_SERVERS = "servers"
DATA_UPCLOUD = "data_upcloud"
DEFAULT_COMPONENT_NAME = "UpCloud {}"
DEFAULT_COMPONENT_DEVICE_CLASS = "power"
CONFIG_ENTRY_DOMAINS = {BINARY_SENSOR_DOMAIN, SWITCH_DOMAIN}
SIGNAL_UPDATE_UPCLOUD = "upcloud_update"
STATE_MAP = {"error": STATE_PROBLEM, "started": STATE_ON, "stopped": STATE_OFF}
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): cv.time_period,
}
)
},
extra=vol.ALLOW_EXTRA,
)
class UpCloudDataUpdateCoordinator(
DataUpdateCoordinator[Dict[str, upcloud_api.Server]]
):
"""UpCloud data update coordinator."""
def __init__(
self,
hass: HomeAssistantType,
*,
cloud_manager: upcloud_api.CloudManager,
update_interval: timedelta,
username: str,
) -> None:
"""Initialize coordinator."""
super().__init__(
hass, _LOGGER, name=f"{username}@UpCloud", update_interval=update_interval
)
self.cloud_manager = cloud_manager
self.unsub_handlers: List[CALLBACK_TYPE] = []
async def async_update_config(self, config_entry: ConfigEntry) -> None:
"""Handle config update."""
self.update_interval = timedelta(
seconds=config_entry.options[CONF_SCAN_INTERVAL]
)
async def _async_update_data(self) -> Dict[str, upcloud_api.Server]:
return {
x.uuid: x
for x in await self.hass.async_add_executor_job(
self.cloud_manager.get_servers
)
}
@dataclasses.dataclass
class UpCloudHassData:
"""Home Assistant UpCloud runtime data."""
coordinators: Dict[str, UpCloudDataUpdateCoordinator] = dataclasses.field(
default_factory=dict
)
scan_interval_migrations: Dict[str, int] = dataclasses.field(default_factory=dict)
async def async_setup(hass: HomeAssistantType, config) -> bool:
"""Set up UpCloud component."""
domain_config = config.get(DOMAIN)
if not domain_config:
return True
_LOGGER.warning(
"Loading upcloud via top level config is deprecated and no longer "
"necessary as of 0.117. Please remove it from your YAML configuration."
)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_USERNAME: domain_config[CONF_USERNAME],
CONF_PASSWORD: domain_config[CONF_PASSWORD],
},
)
)
if domain_config[CONF_SCAN_INTERVAL]:
hass.data[DATA_UPCLOUD] = UpCloudHassData()
hass.data[DATA_UPCLOUD].scan_interval_migrations[
domain_config[CONF_USERNAME]
] = domain_config[CONF_SCAN_INTERVAL]
return True
def _config_entry_update_signal_name(config_entry: ConfigEntry) -> str:
"""Get signal name for updates to a config entry."""
return CONFIG_ENTRY_UPDATE_SIGNAL_TEMPLATE.format(config_entry.unique_id)
async def _async_signal_options_update(
hass: HomeAssistantType, config_entry: ConfigEntry
) -> None:
"""Signal config entry options update."""
async_dispatcher_send(
hass, _config_entry_update_signal_name(config_entry), config_entry
)
async def async_setup_entry(hass: HomeAssistantType, config_entry: ConfigEntry) -> bool:
"""Set up the UpCloud config entry."""
manager = upcloud_api.CloudManager(
config_entry.data[CONF_USERNAME], config_entry.data[CONF_PASSWORD]
)
try:
await hass.async_add_executor_job(manager.authenticate)
except upcloud_api.UpCloudAPIError:
_LOGGER.error("Authentication failed", exc_info=True)
return False
except requests.exceptions.RequestException as err:
_LOGGER.error("Failed to connect", exc_info=True)
raise ConfigEntryNotReady from err
upcloud_data = hass.data.setdefault(DATA_UPCLOUD, UpCloudHassData())
# Handle pre config entry (0.117) scan interval migration to options
migrated_scan_interval = upcloud_data.scan_interval_migrations.pop(
config_entry.data[CONF_USERNAME], None
)
if migrated_scan_interval and (
not config_entry.options.get(CONF_SCAN_INTERVAL)
or config_entry.options[CONF_SCAN_INTERVAL] == DEFAULT_SCAN_INTERVAL.seconds
):
update_interval = migrated_scan_interval
hass.config_entries.async_update_entry(
config_entry,
options={CONF_SCAN_INTERVAL: update_interval.seconds},
)
elif config_entry.options.get(CONF_SCAN_INTERVAL):
update_interval = timedelta(seconds=config_entry.options[CONF_SCAN_INTERVAL])
else:
update_interval = DEFAULT_SCAN_INTERVAL
coordinator = UpCloudDataUpdateCoordinator(
hass,
update_interval=update_interval,
cloud_manager=manager,
username=config_entry.data[CONF_USERNAME],
)
# Call the UpCloud API to refresh data
await coordinator.async_request_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
# Listen to config entry updates
coordinator.unsub_handlers.append(
config_entry.add_update_listener(_async_signal_options_update)
)
coordinator.unsub_handlers.append(
async_dispatcher_connect(
hass,
_config_entry_update_signal_name(config_entry),
coordinator.async_update_config,
)
)
upcloud_data.coordinators[config_entry.data[CONF_USERNAME]] = coordinator
# Forward entry setup
for domain in CONFIG_ENTRY_DOMAINS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, domain)
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload the config entry."""
for domain in CONFIG_ENTRY_DOMAINS:
await hass.config_entries.async_forward_entry_unload(config_entry, domain)
coordinator: UpCloudDataUpdateCoordinator = hass.data[
DATA_UPCLOUD
].coordinators.pop(config_entry.data[CONF_USERNAME])
while coordinator.unsub_handlers:
coordinator.unsub_handlers.pop()()
return True
class UpCloudServerEntity(CoordinatorEntity):
"""Entity class for UpCloud servers."""
def __init__(self, coordinator, uuid):
"""Initialize the UpCloud server entity."""
super().__init__(coordinator)
self.uuid = uuid
@property
def _server(self) -> upcloud_api.Server:
return self.coordinator.data[self.uuid]
@property
def unique_id(self) -> str:
"""Return unique ID for the entity."""
return self.uuid
@property
def name(self):
"""Return the name of the component."""
try:
return DEFAULT_COMPONENT_NAME.format(self._server.title)
except (AttributeError, KeyError, TypeError):
return DEFAULT_COMPONENT_NAME.format(self.uuid)
@property
def icon(self):
"""Return the icon of this server."""
return "mdi:server" if self.is_on else "mdi:server-off"
@property
def state(self):
"""Return state of the server."""
try:
return STATE_MAP.get(self._server.state, self._server.state)
except AttributeError:
return None
@property
def is_on(self):
"""Return true if the server is on."""
return self.state == STATE_ON
@property
def device_class(self):
"""Return the class of this server."""
return DEFAULT_COMPONENT_DEVICE_CLASS
@property
def device_state_attributes(self):
"""Return the state attributes of the UpCloud server."""
return {
x: getattr(self._server, x, None)
for x in (
ATTR_UUID,
ATTR_TITLE,
ATTR_HOSTNAME,
ATTR_ZONE,
ATTR_CORE_NUMBER,
ATTR_MEMORY_AMOUNT,
)
}
|
|
from JXGServerModule import JXGServerModule
import JXG
import numpy
import os
# Should be changed to something more persistent but must be writable by
# the webserver (usually user www-data)
if not 'MPLCONFIGDIR' in os.environ:
os.environ['MPLCONFIGDIR'] = '/tmp/'
# os.environ['MPLCONFIGDIR'] = 'C:/xampp/tmp'
import matplotlib
matplotlib.use('Agg')
from matplotlib.pyplot import *
from matplotlib.contour import *
import subprocess
import signal
import time
import re
import zlib
import base64
import cStringIO
import cgi
import math
class JXGGeoLociModule(JXGServerModule):
def __init__(self):
############################
#
# Config lines
#
############################
# Command to start cocoa
self.cmd_cocoa = "/share8/opt/cocoa/cocoa"
# If you're using Windows
#cmd_cocoa = r"C:\cocoa\cocoa.bat"
# Shouldn't be changed, except you know what you're doing
self.debug = False
############################
self.debugOutput = cStringIO.StringIO()
JXGServerModule.__init__(self)
return
def init(self, resp):
resp.addHandler(self.lociCoCoA, 'function(data) { }')
return
def lociCoCoA(self, resp, xs, xe, ys, ye, number, polys, sf, rot, transx, transy):
self.output = ''
self.cococa_process = None
cinput = ""
c = math.cos(rot)
s = math.sin(rot)
tx = 0;
# Variable code begins here
# Here indeterminates of polynomial ring have to be adjusted
if number > 0:
cinput += "Use R ::= QQ[u[1..%s],x,y], Xel;" % number
else:
cinput += "Use R ::= QQ[x,y];"
# Of course the polynomials generating the ideal must be adjusted
cinput += "I := Ideal(%s);" % polys
# So have to be the indeterminates to be eliminated
if number > 0:
cinput += "J := Elim(u[1]..u[%s], I); J;" % number
else:
cinput += "J := I; J;"
# and ends here
# Fixed code which hasn't to be adjusted on each run of this script
cinput += "G := ReducedGBasis(J);"
cinput += "Print \"resultsbegin\", NewLine;"
cinput += "For N := 1 To Len(G) Do\n"
cinput += " B := Factor(G[N]);\n"
cinput += " For M := 1 To Len(B) Do\n"
cinput += " StarPrintFold(B[M][1], -1);"
cinput += " Print NewLine;"
cinput += " EndFor;\n"
cinput += "EndFor;\n"
cinput += "Print \"resultsend\", NewLine;"
#cinput = "Ciao;"
if self.debug:
print >>self.debugOutput, "Starting CoCoA with input<br />"
print >>self.debugOutput, cinput + '<br />'
# The suicide pill for the CoCoA process:
# If not done within the following amount
# of seconds, the subprocess will be terminated
time_left = 30
class TimeoutException(Exception): pass
def time_limit(seconds):
def signal_handler(signum, frame):
raise TimeoutException, "Timed out!"
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
#global cocoa_process
#global output
def callCoCoA():
# Global variables aren't that nice, but this time they're useful
#global cocoa_process, output
self.cocoa_process = subprocess.Popen([self.cmd_cocoa], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
self.output = self.cocoa_process.communicate(cinput)[0]
calc_time = time.time()
try:
time_limit(time_left)
callCoCoA()
except TimeoutException, msg:
# This is only tested with linux/unix
# and works ONLY if the cocoa script cd-ing
# to the cocoa dir and starting cocoa executes
# it with
# $ exec ./cocoa_text
# This is NOT YET TESTED WITH WINDOWS! (though
# sharing tests would be nice).
self.cocoa_process.kill()
if self.debug:
print >>self.debugOutput, "Timed out!"
resp.error("Timeout, maybe the system of polynomial is too big or there's an error in it.")
return
calc_time = time.time() - calc_time
resp.addData('exectime', calc_time)
if self.debug:
print >>self.debugOutput, "Reading and Parsing CoCoA output" + '<br />'
print >>self.debugOutput, self.output + '<br />'
# Extract results
if re.search('resultsbegin', self.output) is None:
return
result = re.split('resultsend', re.split('resultsbegin', self.output)[1])[0]
result = re.split('-------------------------------', re.split('-------------------------------', result)[1])[0]
result = result.replace("^", "**")
result = result.replace("\r", "")
polynomials = re.split('\n', result)
if self.debug:
print >>self.debugOutput, "Found the following polynomials:" + '<br />'
for i in range(0,len(polynomials)):
print >>self.debugOutput, "Polynomial ", i+1, ": " + polynomials[i] + '<br />'
datax = []
datay = []
polynomialsReturn = []
for i in range(0,len(polynomials)):
if len(polynomials[i]) == 0:
continue
if ((not "x" in polynomials[i]) and (not "y" in polynomials[i])) or ("W" in polynomials[i]):
continue
polynomialsReturn.append(polynomials[i])
x, y = numpy.meshgrid(numpy.linspace(xs, xe, 500), numpy.linspace(ys, ye, 500))
z = eval(polynomials[i])
C = contour(x, y, z, [0])
if self.debug:
savefig('/tmp/test%s.png' % i)
for i in range(0, len(C.collections[0].get_paths())):
pa = C.collections[0].get_paths()[i].to_polygons()[0]
for i in range(0,len(pa)):
tx = pa[i, 0]
pa[i, 0] = c*pa[i,0] - s*pa[i,1]
pa[i, 1] = s*tx + c*pa[i,1]
datax.append(sf*pa[i,0] + transx)
datay.append(sf*pa[i,1] + transy)
datax.append('null')
datay.append('null')
resp.addData('datax', datax)
resp.addData('datay', datay)
resp.addData('polynomial', polynomialsReturn)
if self.debug:
print >>self.debugOutput, ", ".join(map(str, datax)) + '<br />'
print >>self.debugOutput, ", ".join(map(str, datay)) + '<br />'
print "Content-Type: text/plain\n\n"
print
print
print self.debugOutput.getvalue()
self.debugOutput.close()
return
|
|
#!/usr/bin/env python
"""
Module task
:Company: SwissTech Consulting
:Author: Patrick Glass <[email protected]>
:Copyright: Copyright 2015 SwissTech Consulting
This software is used for flow development and execution of pipelines
"""
import logging
import warnings
import threading
import multiprocessing
import types
from task import Task
from .task_state import PENDING, CHECK, RUNABLE, RUNNING, SUCCESS, FAILED, DISABLED
from .message import Message, red, green, blue, yellow, magenta, cyan
# logger = logging.getLogger(__name__)
logger = Message(__name__)
class Worker(object):
def __init__(self, group=None, target=None, name=None, *args, **kwargs):
self.group = group
self.target = target
self.name = name
self.args = args
self.kwargs = kwargs
def run(self):
return self.target(*self.args, **self.kwargs)
def join(self):
pass
class ProcessWorker(multiprocessing.Process):
pass
class ThreadWorker(threading.Thread):
pass
class TaskManager(object):
def __init__(self, task_base=Task, worker_type=Worker):
self.base_class = task_base
self.worker_type = worker_type
self.worker_results = {}
def _target_to_instance(self, target):
# We allow class names to be specified as a string
# Find the actual class from the string
if isinstance(target, basestring):
target = self.base_class.class_by_name(target)
# Check whether the value is a class or instance
# we want to return only instances
if isinstance(target, (type, types.ClassType)):
return target()
else:
return target
def _get_tasks(self, target):
"""
if target is passed in get only related tasks
otherwise get all defined task instances
"""
if target:
return TaskManager.all_deps(self._target_to_instance(target))
else:
warnings.warn(
"You must specify the target. Discovery is not supported yet",
DeprecationWarning
)
# Get all defined classes and instantiate them.
# Then grab and return all instances.
for task_cls in self.base_class.classes.values():
task_cls()
return self.base_class.instances.values()
# return self.base_class.classes.values()
# def add(self, target):
# logger.debug("Adding target %s to pool." % target)
# warnings.warn(
# "'add' is deprecated. All calls should change to passing target to status, run, clean, and force!",
# DeprecationWarning
# )
# self.targets.append(target)
def status(self, target=None):
tasks = self._get_tasks(target)
for task in tasks:
try:
s = "Task: " + green("'%s'" % task.name)
is_complete = task.complete()
if not is_complete:
s += " needs to be run!\n"
s += yellow(' Inputs:\n')
for i in task.inputs():
s += cyan(" %s\n" % i.to_string(True))
s += yellow(' Outputs:\n')
for i in task.outputs():
s += cyan(" %s\n" % i.to_string(True))
else:
s += " is up to date!"
logger.info(s)
except NotImplementedError:
logger.info("%s is not implemented correctly. Skipping..." % task.name)
def clean(self, target=None, level=0):
"""Clean iterates through all outputs and will call remove() on it if
the level of the file is lower than specified. This allows you to
have different levels of clean.
"""
tasks = self._get_tasks(target)
# go through each task and check the outputs
for task in tasks:
for output in task.outputs():
if output.level <= level:
logging.info("Removing: %s", output)
else:
logging.debug("Preserving: %s", output)
def force(self, target=None):
"""Will force all outputs of specified target to be up to date. This
will have the same effect as `touch`ing the output files.
"""
task = self._target_to_instance(target)
logging.info("Forcing '%s' to be updated!", task)
for output in task.outputs():
logging.debug("Force Update: %s", output)
def run_new_state(self):
for target in self.targets:
for task in TaskManager.all_deps(target):
self.update_state(task)
def update_state(self, task):
if task.state == PENDING:
if len(task.inputs()) == 0:
task.state = CHECK
elif all([t.state for t in task.inputs() if t.state != SUCCESS]):
task.state = CHECK
if task.state == CHECK:
if task.complete():
task.state == SUCCESS
else:
task.state == RUNABLE
elif task.state == RUNNING:
# Check with the results dict if this task has results posted
if task.name in self.worker_results:
result = self.worker_results[task.name]
is_complete = task.complete()
if not result:
logging.warn(
"%s failed with return code: %s",
task.name, result)
task.state == FAILED
elif not is_complete():
task.state == FAILED
logging.error(
RuntimeError,
"%s failed complete() task check. This should "
"never occur since the task should be able to "
"determine success without relying on job return "
"code. Please report this error!",
task.name)
# raise RuntimeError()
else:
logging.debug(
"%s completed successfully!", task.name)
task.state == SUCCESS
# Final states are SUCCESS, FAILED, DISABLED. Do Nothing
# for state RUNABLE external process will queue the run and
# change it status once completed.
@staticmethod
def all_deps(task):
# logger.debug("Flatten Task: %s", task)
for dependent in task.depends():
# logger.debug("Flatten SubTask: %s", dependent)
for subtask in TaskManager.all_deps(dependent):
yield subtask
# logger.debug("Yield Main incomplete Task: %s", task)
yield task
@staticmethod
def flatten_incomplete_tasks(task):
# logger.debug("Flatten Task: %s", task)
for dependent in task.depends():
# logger.debug("Flatten SubTask: %s", dependent)
for subtask in TaskManager.flatten_incomplete_tasks(dependent):
yield subtask
if not task.state == DISABLED and not task.complete():
# logger.debug("Yield Main incomplete Task: %s", task)
yield task
def run(self, target=None):
target = self._target_to_instance(target)
for task in TaskManager.flatten_incomplete_tasks(target):
logger.info("Running Task: %s", task.name)
worker = self.worker_type(target=task.run, name=task.name)
worker.join()
ret_code = worker.run()
if ret_code and task.complete():
logger.info("\t\t'%s' Completed", task.name)
task.state = SUCCESS
else:
logger.error("*** '%s' FAILED", task.name)
print(task)
task.state = FAILED
raise RuntimeError(
"Task '%s' failed to run or had bad exit code. "
"Exiting run...", task)
|
|
# Authors: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Nelle Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import interpolate
from scipy.stats import spearmanr
import warnings
import math
from .base import BaseEstimator, TransformerMixin, RegressorMixin
from .utils import check_array, check_consistent_length
from .utils.validation import _check_sample_weight
from ._isotonic import _inplace_contiguous_isotonic_regression, _make_unique
__all__ = ["check_increasing", "isotonic_regression", "IsotonicRegression"]
def check_increasing(x, y):
"""Determine whether y is monotonically correlated with x.
y is found increasing or decreasing with respect to x based on a Spearman
correlation test.
Parameters
----------
x : array-like of shape (n_samples,)
Training data.
y : array-like of shape (n_samples,)
Training target.
Returns
-------
increasing_bool : boolean
Whether the relationship is increasing or decreasing.
Notes
-----
The Spearman correlation coefficient is estimated from the data, and the
sign of the resulting estimate is used as the result.
In the event that the 95% confidence interval based on Fisher transform
spans zero, a warning is raised.
References
----------
Fisher transformation. Wikipedia.
https://en.wikipedia.org/wiki/Fisher_transformation
"""
# Calculate Spearman rho estimate and set return accordingly.
rho, _ = spearmanr(x, y)
increasing_bool = rho >= 0
# Run Fisher transform to get the rho CI, but handle rho=+/-1
if rho not in [-1.0, 1.0] and len(x) > 3:
F = 0.5 * math.log((1.0 + rho) / (1.0 - rho))
F_se = 1 / math.sqrt(len(x) - 3)
# Use a 95% CI, i.e., +/-1.96 S.E.
# https://en.wikipedia.org/wiki/Fisher_transformation
rho_0 = math.tanh(F - 1.96 * F_se)
rho_1 = math.tanh(F + 1.96 * F_se)
# Warn if the CI spans zero.
if np.sign(rho_0) != np.sign(rho_1):
warnings.warn(
"Confidence interval of the Spearman "
"correlation coefficient spans zero. "
"Determination of ``increasing`` may be "
"suspect."
)
return increasing_bool
def isotonic_regression(
y, *, sample_weight=None, y_min=None, y_max=None, increasing=True
):
"""Solve the isotonic regression model.
Read more in the :ref:`User Guide <isotonic>`.
Parameters
----------
y : array-like of shape (n_samples,)
The data.
sample_weight : array-like of shape (n_samples,), default=None
Weights on each point of the regression.
If None, weight is set to 1 (equal weights).
y_min : float, default=None
Lower bound on the lowest predicted value (the minimum value may
still be higher). If not set, defaults to -inf.
y_max : float, default=None
Upper bound on the highest predicted value (the maximum may still be
lower). If not set, defaults to +inf.
increasing : bool, default=True
Whether to compute ``y_`` is increasing (if set to True) or decreasing
(if set to False).
Returns
-------
y_ : list of floats
Isotonic fit of y.
References
----------
"Active set algorithms for isotonic regression; A unifying framework"
by Michael J. Best and Nilotpal Chakravarti, section 3.
"""
order = np.s_[:] if increasing else np.s_[::-1]
y = check_array(y, ensure_2d=False, input_name="y", dtype=[np.float64, np.float32])
y = np.array(y[order], dtype=y.dtype)
sample_weight = _check_sample_weight(sample_weight, y, dtype=y.dtype, copy=True)
sample_weight = np.ascontiguousarray(sample_weight[order])
_inplace_contiguous_isotonic_regression(y, sample_weight)
if y_min is not None or y_max is not None:
# Older versions of np.clip don't accept None as a bound, so use np.inf
if y_min is None:
y_min = -np.inf
if y_max is None:
y_max = np.inf
np.clip(y, y_min, y_max, y)
return y[order]
class IsotonicRegression(RegressorMixin, TransformerMixin, BaseEstimator):
"""Isotonic regression model.
Read more in the :ref:`User Guide <isotonic>`.
.. versionadded:: 0.13
Parameters
----------
y_min : float, default=None
Lower bound on the lowest predicted value (the minimum value may
still be higher). If not set, defaults to -inf.
y_max : float, default=None
Upper bound on the highest predicted value (the maximum may still be
lower). If not set, defaults to +inf.
increasing : bool or 'auto', default=True
Determines whether the predictions should be constrained to increase
or decrease with `X`. 'auto' will decide based on the Spearman
correlation estimate's sign.
out_of_bounds : {'nan', 'clip', 'raise'}, default='nan'
Handles how `X` values outside of the training domain are handled
during prediction.
- 'nan', predictions will be NaN.
- 'clip', predictions will be set to the value corresponding to
the nearest train interval endpoint.
- 'raise', a `ValueError` is raised.
Attributes
----------
X_min_ : float
Minimum value of input array `X_` for left bound.
X_max_ : float
Maximum value of input array `X_` for right bound.
X_thresholds_ : ndarray of shape (n_thresholds,)
Unique ascending `X` values used to interpolate
the y = f(X) monotonic function.
.. versionadded:: 0.24
y_thresholds_ : ndarray of shape (n_thresholds,)
De-duplicated `y` values suitable to interpolate the y = f(X)
monotonic function.
.. versionadded:: 0.24
f_ : function
The stepwise interpolating function that covers the input domain ``X``.
increasing_ : bool
Inferred value for ``increasing``.
See Also
--------
sklearn.linear_model.LinearRegression : Ordinary least squares Linear
Regression.
sklearn.ensemble.HistGradientBoostingRegressor : Gradient boosting that
is a non-parametric model accepting monotonicity constraints.
isotonic_regression : Function to solve the isotonic regression model.
Notes
-----
Ties are broken using the secondary method from de Leeuw, 1977.
References
----------
Isotonic Median Regression: A Linear Programming Approach
Nilotpal Chakravarti
Mathematics of Operations Research
Vol. 14, No. 2 (May, 1989), pp. 303-308
Isotone Optimization in R : Pool-Adjacent-Violators
Algorithm (PAVA) and Active Set Methods
de Leeuw, Hornik, Mair
Journal of Statistical Software 2009
Correctness of Kruskal's algorithms for monotone regression with ties
de Leeuw, Psychometrica, 1977
Examples
--------
>>> from sklearn.datasets import make_regression
>>> from sklearn.isotonic import IsotonicRegression
>>> X, y = make_regression(n_samples=10, n_features=1, random_state=41)
>>> iso_reg = IsotonicRegression().fit(X, y)
>>> iso_reg.predict([.1, .2])
array([1.8628..., 3.7256...])
"""
def __init__(self, *, y_min=None, y_max=None, increasing=True, out_of_bounds="nan"):
self.y_min = y_min
self.y_max = y_max
self.increasing = increasing
self.out_of_bounds = out_of_bounds
def _check_input_data_shape(self, X):
if not (X.ndim == 1 or (X.ndim == 2 and X.shape[1] == 1)):
msg = (
"Isotonic regression input X should be a 1d array or "
"2d array with 1 feature"
)
raise ValueError(msg)
def _build_f(self, X, y):
"""Build the f_ interp1d function."""
# Handle the out_of_bounds argument by setting bounds_error
if self.out_of_bounds not in ["raise", "nan", "clip"]:
raise ValueError(
"The argument ``out_of_bounds`` must be in "
"'nan', 'clip', 'raise'; got {0}".format(self.out_of_bounds)
)
bounds_error = self.out_of_bounds == "raise"
if len(y) == 1:
# single y, constant prediction
self.f_ = lambda x: y.repeat(x.shape)
else:
self.f_ = interpolate.interp1d(
X, y, kind="linear", bounds_error=bounds_error
)
def _build_y(self, X, y, sample_weight, trim_duplicates=True):
"""Build the y_ IsotonicRegression."""
self._check_input_data_shape(X)
X = X.reshape(-1) # use 1d view
# Determine increasing if auto-determination requested
if self.increasing == "auto":
self.increasing_ = check_increasing(X, y)
else:
self.increasing_ = self.increasing
# If sample_weights is passed, removed zero-weight values and clean
# order
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
mask = sample_weight > 0
X, y, sample_weight = X[mask], y[mask], sample_weight[mask]
order = np.lexsort((y, X))
X, y, sample_weight = [array[order] for array in [X, y, sample_weight]]
unique_X, unique_y, unique_sample_weight = _make_unique(X, y, sample_weight)
X = unique_X
y = isotonic_regression(
unique_y,
sample_weight=unique_sample_weight,
y_min=self.y_min,
y_max=self.y_max,
increasing=self.increasing_,
)
# Handle the left and right bounds on X
self.X_min_, self.X_max_ = np.min(X), np.max(X)
if trim_duplicates:
# Remove unnecessary points for faster prediction
keep_data = np.ones((len(y),), dtype=bool)
# Aside from the 1st and last point, remove points whose y values
# are equal to both the point before and the point after it.
keep_data[1:-1] = np.logical_or(
np.not_equal(y[1:-1], y[:-2]), np.not_equal(y[1:-1], y[2:])
)
return X[keep_data], y[keep_data]
else:
# The ability to turn off trim_duplicates is only used to it make
# easier to unit test that removing duplicates in y does not have
# any impact the resulting interpolation function (besides
# prediction speed).
return X, y
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples,) or (n_samples, 1)
Training data.
.. versionchanged:: 0.24
Also accepts 2d array with 1 feature.
y : array-like of shape (n_samples,)
Training target.
sample_weight : array-like of shape (n_samples,), default=None
Weights. If set to None, all weights will be set to 1 (equal
weights).
Returns
-------
self : object
Returns an instance of self.
Notes
-----
X is stored for future use, as :meth:`transform` needs X to interpolate
new input data.
"""
check_params = dict(accept_sparse=False, ensure_2d=False)
X = check_array(
X, input_name="X", dtype=[np.float64, np.float32], **check_params
)
y = check_array(y, input_name="y", dtype=X.dtype, **check_params)
check_consistent_length(X, y, sample_weight)
# Transform y by running the isotonic regression algorithm and
# transform X accordingly.
X, y = self._build_y(X, y, sample_weight)
# It is necessary to store the non-redundant part of the training set
# on the model to make it possible to support model persistence via
# the pickle module as the object built by scipy.interp1d is not
# picklable directly.
self.X_thresholds_, self.y_thresholds_ = X, y
# Build the interpolation function
self._build_f(X, y)
return self
def transform(self, T):
"""Transform new data by linear interpolation.
Parameters
----------
T : array-like of shape (n_samples,) or (n_samples, 1)
Data to transform.
.. versionchanged:: 0.24
Also accepts 2d array with 1 feature.
Returns
-------
y_pred : ndarray of shape (n_samples,)
The transformed data.
"""
if hasattr(self, "X_thresholds_"):
dtype = self.X_thresholds_.dtype
else:
dtype = np.float64
T = check_array(T, dtype=dtype, ensure_2d=False)
self._check_input_data_shape(T)
T = T.reshape(-1) # use 1d view
# Handle the out_of_bounds argument by clipping if needed
if self.out_of_bounds not in ["raise", "nan", "clip"]:
raise ValueError(
"The argument ``out_of_bounds`` must be in "
"'nan', 'clip', 'raise'; got {0}".format(self.out_of_bounds)
)
if self.out_of_bounds == "clip":
T = np.clip(T, self.X_min_, self.X_max_)
res = self.f_(T)
# on scipy 0.17, interp1d up-casts to float64, so we cast back
res = res.astype(T.dtype)
return res
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like of shape (n_samples,) or (n_samples, 1)
Data to transform.
Returns
-------
y_pred : ndarray of shape (n_samples,)
Transformed data.
"""
return self.transform(T)
# We implement get_feature_names_out here instead of using
# `_ClassNamePrefixFeaturesOutMixin`` because `input_features` are ignored.
# `input_features` are ignored because `IsotonicRegression` accepts 1d
# arrays and the semantics of `feature_names_in_` are not clear for 1d arrays.
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Ignored.
Returns
-------
feature_names_out : ndarray of str objects
An ndarray with one string i.e. ["isotonicregression0"].
"""
class_name = self.__class__.__name__.lower()
return np.asarray([f"{class_name}0"], dtype=object)
def __getstate__(self):
"""Pickle-protocol - return state of the estimator."""
state = super().__getstate__()
# remove interpolation method
state.pop("f_", None)
return state
def __setstate__(self, state):
"""Pickle-protocol - set state of the estimator.
We need to rebuild the interpolation function.
"""
super().__setstate__(state)
if hasattr(self, "X_thresholds_") and hasattr(self, "y_thresholds_"):
self._build_f(self.X_thresholds_, self.y_thresholds_)
def _more_tags(self):
return {"X_types": ["1darray"]}
|
|
# -*- coding: utf-8 -*-
import json
from urllib.parse import unquote
from scrapy.utils.misc import load_object
from scrapy.utils.serialize import ScrapyJSONEncoder
from twisted.internet.defer import Deferred
from twisted.python.failure import Failure
from twisted.web import resource, server
from twisted.web.error import Error, UnsupportedMethod
from . import log
from .conf import app_settings
from .utils import extract_scrapy_request_args, to_bytes
class AdaptedScrapyJSONEncoder(ScrapyJSONEncoder):
def default(self, o):
if isinstance(o, bytes):
return o.decode('utf8')
else:
return super().default(o)
# XXX super() calls won't work wihout object mixin in Python 2
# maybe this can be removed at some point?
class ServiceResource(resource.Resource, object):
json_encoder = AdaptedScrapyJSONEncoder()
def __init__(self, root=None):
resource.Resource.__init__(self)
self.root = root
def render(self, request):
try:
result = resource.Resource.render(self, request)
except Exception as e:
result = self.handle_error(e, request)
if not isinstance(result, Deferred):
return self.render_object(result, request)
# deferred result - add appropriate callbacks and errbacks
result.addErrback(self.handle_error, request)
def finish_request(obj):
request.write(self.render_object(obj, request))
request.finish()
result.addCallback(finish_request)
return server.NOT_DONE_YET
def handle_error(self, exception_or_failure, request):
"""Override this method to add custom exception handling.
:param request: twisted.web.server.Request
:param exception_or_failure: Exception or
twisted.python.failure.Failure
:return: dict which will be converted to JSON error response
"""
failure = None
if isinstance(exception_or_failure, Exception):
exception = exception_or_failure
elif isinstance(exception_or_failure, Failure):
exception = exception_or_failure.value
failure = exception_or_failure
else:
raise TypeError(
'Expected Exception or {} instances, got {}'.format(
Failure,
exception_or_failure.__class__
))
if request.code == 200:
# Default code - means that error wasn't handled
if isinstance(exception, UnsupportedMethod):
request.setResponseCode(405)
elif isinstance(exception, Error):
code = int(exception.status)
request.setResponseCode(code)
else:
request.setResponseCode(500)
if request.code == 500:
log.err(failure)
return self.format_error_response(exception, request)
def format_error_response(self, exception, request):
# Python exceptions don't have message attribute in Python 3+ anymore.
# Twisted HTTP Error objects still have 'message' attribute even in 3+
# and they fail on str(exception) call.
msg = exception.message if hasattr(
exception, 'message') else str(exception)
return {
"status": "error",
"message": msg,
"code": request.code
}
def render_object(self, obj, request):
r = self.json_encoder.encode(obj) + "\n"
request.setHeader('Content-Type', 'application/json')
request.setHeader('Access-Control-Allow-Origin', '*')
request.setHeader('Access-Control-Allow-Methods',
', '.join(getattr(self, 'allowedMethods', [])))
request.setHeader('Access-Control-Allow-Headers', 'X-Requested-With')
request.setHeader('Content-Length', str(len(r)))
return r.encode("utf8")
class RealtimeApi(ServiceResource):
def __init__(self, **kwargs):
super(RealtimeApi, self).__init__(self)
for route, resource_path in app_settings.RESOURCES.items():
resource_cls = load_object(resource_path)
route = to_bytes(route)
self.putChild(route, resource_cls(self, **kwargs))
class CrawlResource(ServiceResource):
isLeaf = True
allowedMethods = ['GET', 'POST']
def render_GET(self, request, **kwargs):
"""Request querysting must contain following keys: url, spider_name.
At the moment kwargs for scrapy request are not supported in GET.
They are supported in POST handler.
"""
api_params = dict(
(name.decode('utf-8'), value[0].decode('utf-8'))
for name, value in request.args.items()
)
scrapy_request_args = extract_scrapy_request_args(api_params,
raise_error=False)
self.validate_options(scrapy_request_args, api_params)
return self.prepare_crawl(api_params, scrapy_request_args, **kwargs)
def render_POST(self, request, **kwargs):
"""
:param request:
body should contain JSON
Required keys in JSON posted:
:spider_name: string
name of spider to be scheduled.
:request: json object
request to be scheduled with spider.
Note: request must contain url for spider.
It may contain kwargs to scrapy request.
"""
request_body = request.content.getvalue()
try:
api_params = json.loads(request_body)
except Exception as e:
message = "Invalid JSON in POST body. {}"
message = message.format(e)
# TODO should be integer not string?
raise Error('400', message=message)
log.msg("{}".format(api_params))
if api_params.get("start_requests"):
# start requests passed so 'request' argument is optional
_request = api_params.get("request", {})
else:
# no start_requests, 'request' is required
_request = self.get_required_argument(api_params, "request")
try:
scrapy_request_args = extract_scrapy_request_args(
_request, raise_error=True
)
except ValueError as e:
raise Error('400', str(e))
self.validate_options(scrapy_request_args, api_params)
return self.prepare_crawl(api_params, scrapy_request_args, **kwargs)
def validate_options(self, scrapy_request_args, api_params):
url = scrapy_request_args.get("url")
start_requests = api_params.get("start_requests")
if not url and not start_requests:
raise Error('400',
"'url' is required if start_requests are disabled")
def get_required_argument(self, api_params, name, error_msg=None):
"""Get required API key from dict-like object.
:param dict api_params:
dictionary with names and values of parameters supplied to API.
:param str name:
required key that must be found in api_params
:return: value of required param
:raises Error: Bad Request response
"""
if error_msg is None:
error_msg = 'Missing required parameter: {}'.format(repr(name))
try:
value = api_params[name]
except KeyError:
raise Error('400', message=error_msg)
if not value:
raise Error('400', message=error_msg)
return value
def prepare_crawl(self, api_params, scrapy_request_args, *args, **kwargs):
"""Schedule given spider with CrawlManager.
:param dict api_params:
arguments needed to find spider and set proper api parameters
for crawl (max_requests for example)
:param dict scrapy_request_args:
should contain positional and keyword arguments for Scrapy
Request object that will be created
"""
spider_name = self.get_required_argument(api_params, 'spider_name')
start_requests = api_params.get("start_requests", False)
try:
max_requests = api_params['max_requests']
except (KeyError, IndexError):
max_requests = None
crawl_args = api_params.get("crawl_args")
if isinstance(crawl_args, str):
try:
crawl_args = json.loads(unquote(crawl_args))
except Exception as e:
msg = "crawl_args must be valid url encoded JSON"
msg += " this string cannot be decoded with JSON"
msg += f' {str(e)}'
raise Error('400', message=msg)
dfd = self.run_crawl(
spider_name, scrapy_request_args, max_requests,
start_requests=start_requests,
crawl_args=crawl_args,
*args,
**kwargs)
dfd.addCallback(
self.prepare_response, request_data=api_params, *args, **kwargs)
return dfd
def run_crawl(self, spider_name, scrapy_request_args,
max_requests=None, crawl_args=None,
start_requests=False, *args, **kwargs):
crawl_manager_cls = load_object(app_settings.CRAWL_MANAGER)
manager = crawl_manager_cls(
spider_name, scrapy_request_args, max_requests, start_requests=start_requests)
if crawl_args:
kwargs.update(crawl_args)
dfd = manager.crawl(*args, **kwargs)
return dfd
def prepare_response(self, result, *args, **kwargs):
items = result.get("items")
response = {
"status": "ok",
"items": items,
"items_dropped": result.get("items_dropped", []),
"stats": result.get("stats"),
"spider_name": result.get("spider_name"),
}
errors = result.get("errors")
if errors:
response["errors"] = errors
return response
|
|
# !/usr/bin/python
# coding=utf-8
# original code "https://gist.github.com/rygwdn/394885"
################################################################################
import os
import sys
import curses
import getopt
from curses import wrapper
################################################################################
__version__='0.1.5'
ESC = 27
result = ''
start = '.'
show_hidden = False
################################################################################
def pad(data, width):
# ToDo: this won't work with UTF-8
return data + ' ' * (width - len(data))
################################################################################
def list_dir_only(d):
sds = []
for sd in os.listdir(d):
if os.path.isdir(os.path.join(d, sd)):
if not show_hidden and sd not in ('.', '..') and sd[0] == '.':
continue
sds.append(sd)
return sorted(sds)
################################################################################
class Dir(object):
# ==========================================================================
def __init__(self, name):
# File.__init__(self, name)
self.name = name
try:
self.kidnames = list_dir_only(name)
except:
self.kidnames = None # probably permission denied
self.kids = None
self.expanded = False
# ==========================================================================
def render(self, depth, width):
return pad('%s%s %s' % (' ' * 4 * depth, self.icon(),
os.path.basename(self.name)), width)
# ==========================================================================
def children(self):
if self.kidnames is None: return []
if self.kids is None:
self.kids = [factory(os.path.join(self.name, kid))
for kid in self.kidnames]
return self.kids
# ==========================================================================
def icon(self):
if self.expanded:
return '[-]'
elif self.kidnames is None:
return '[?]'
elif self.children():
return '[+]'
else:
return '[ ]'
# ==========================================================================
def expand(self):
self.expanded = True
# ==========================================================================
def collapse(self):
self.expanded = False
# ==========================================================================
def traverse(self):
yield self, 0
if not self.expanded: return
for child in self.children():
for kid, depth in child.traverse():
yield kid, depth + 1
################################################################################
def factory(name):
return Dir(name)
################################################################################
def c_main(stdscr):
cargo_cult_routine(stdscr)
stdscr.nodelay(0)
mydir = factory(start)
mydir.expand()
curidx = 3
pending_action = None
pending_save = False
while True:
stdscr.clear()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE)
line = 0
offset = max(0, curidx - curses.LINES + 3)
for data, depth in mydir.traverse():
if line == curidx:
stdscr.attrset(curses.color_pair(1) | curses.A_BOLD)
if pending_action:
getattr(data, pending_action)()
pending_action = None
elif pending_save:
global result
result = data.name
return
else:
stdscr.attrset(curses.color_pair(0))
if 0 <= line - offset < curses.LINES - 1:
stdscr.addstr(line - offset, 0,
data.render(depth, curses.COLS))
line += 1
stdscr.refresh()
ch = stdscr.getch()
if ch == curses.KEY_UP:
curidx -= 1
elif ch == curses.KEY_DOWN:
curidx += 1
elif ch == curses.KEY_PPAGE:
curidx -= curses.LINES
if curidx < 0: curidx = 0
elif ch == curses.KEY_NPAGE:
curidx += curses.LINES
if curidx >= line: curidx = line - 1
elif ch == curses.KEY_RIGHT:
pending_action = 'expand'
elif ch == curses.KEY_LEFT:
pending_action = 'collapse'
elif ch == ESC:
return
elif ch == ord('\n'):
pending_save = True
curidx %= line
################################################################################
def cargo_cult_routine(win):
win.clear()
win.refresh()
curses.nl()
curses.noecho()
win.timeout(0)
################################################################################
def open_tty():
saved_stdin = os.dup(0)
saved_stdout = os.dup(1)
os.close(0)
os.close(1)
stdin = os.open('/dev/tty', os.O_RDONLY)
stdout = os.open('/dev/tty', os.O_RDWR)
return saved_stdin, saved_stdout
################################################################################
def restore_stdio(saved_stdin, saved_stdout):
os.close(0)
os.close(1)
os.dup(saved_stdin)
os.dup(saved_stdout)
################################################################################
def usage(msg=None):
"""
usage for this search program
:param msg:
:return:
"""
if msg:
sys.stderr.write('%s\n' % msg)
print('''
usage: {0} [options] query_string
query and search result from CVE
options are:
-h, --help : show this message
-s, --show_hidden : show hidden directory
'''.format(sys.argv[0]))
sys.exit(1)
################################################################################
def main():
global start
global show_hidden
saved_fds = (os.dup(0), os.dup(1))
try:
opts, args = getopt.getopt(
sys.argv[1:], "hs",
["help", "show_hidden"]
)
for o, a in opts:
if o in ("-h", "--help"):
usage()
elif o in ("-s", "--show_hidden"):
show_hidden = True
if len(args) > 0:
start = args[0]
if not os.path.isdir(start):
sys.stderr.write('Error: directory needed!\n')
sys.exit(9)
saved_fds = open_tty()
wrapper(c_main)
restore_stdio(*saved_fds)
print(result)
except Exception as e:
restore_stdio(*saved_fds)
usage(str(e))
################################################################################
if __name__ == '__main__':
main()
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import signal
import sys
import time
import numpy as np
import pytest
import ray
import ray.ray_constants as ray_constants
from ray.tests.cluster_utils import Cluster
from ray.tests.utils import (run_string_as_driver_nonblocking,
RayTestTimeoutException)
# This test checks that when a worker dies in the middle of a get, the plasma
# store and raylet will not die.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Not working with new GCS API.")
def test_dying_worker_get(ray_start_2_cpus):
@ray.remote
def sleep_forever():
time.sleep(10**6)
@ray.remote
def get_worker_pid():
return os.getpid()
x_id = sleep_forever.remote()
time.sleep(0.01) # Try to wait for the sleep task to get scheduled.
# Get the PID of the other worker.
worker_pid = ray.get(get_worker_pid.remote())
@ray.remote
def f(id_in_a_list):
ray.get(id_in_a_list[0])
# Have the worker wait in a get call.
result_id = f.remote([x_id])
time.sleep(1)
# Make sure the task hasn't finished.
ready_ids, _ = ray.wait([result_id], timeout=0)
assert len(ready_ids) == 0
# Kill the worker.
os.kill(worker_pid, signal.SIGKILL)
time.sleep(0.1)
# Make sure the sleep task hasn't finished.
ready_ids, _ = ray.wait([x_id], timeout=0)
assert len(ready_ids) == 0
# Seal the object so the store attempts to notify the worker that the
# get has been fulfilled.
ray.worker.global_worker.put_object(x_id, 1)
time.sleep(0.1)
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
# This test checks that when a driver dies in the middle of a get, the plasma
# store and raylet will not die.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Not working with new GCS API.")
def test_dying_driver_get(ray_start_regular):
# Start the Ray processes.
address_info = ray_start_regular
@ray.remote
def sleep_forever():
time.sleep(10**6)
x_id = sleep_forever.remote()
driver = """
import ray
ray.init("{}")
ray.get(ray.ObjectID(ray.utils.hex_to_binary("{}")))
""".format(address_info["redis_address"], x_id.hex())
p = run_string_as_driver_nonblocking(driver)
# Make sure the driver is running.
time.sleep(1)
assert p.poll() is None
# Kill the driver process.
p.kill()
p.wait()
time.sleep(0.1)
# Make sure the original task hasn't finished.
ready_ids, _ = ray.wait([x_id], timeout=0)
assert len(ready_ids) == 0
# Seal the object so the store attempts to notify the worker that the
# get has been fulfilled.
ray.worker.global_worker.put_object(x_id, 1)
time.sleep(0.1)
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
# This test checks that when a worker dies in the middle of a wait, the plasma
# store and raylet will not die.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Not working with new GCS API.")
def test_dying_worker_wait(ray_start_2_cpus):
@ray.remote
def sleep_forever():
time.sleep(10**6)
@ray.remote
def get_pid():
return os.getpid()
x_id = sleep_forever.remote()
# Get the PID of the worker that block_in_wait will run on (sleep a little
# to make sure that sleep_forever has already started).
time.sleep(0.1)
worker_pid = ray.get(get_pid.remote())
@ray.remote
def block_in_wait(object_id_in_list):
ray.wait(object_id_in_list)
# Have the worker wait in a wait call.
block_in_wait.remote([x_id])
time.sleep(0.1)
# Kill the worker.
os.kill(worker_pid, signal.SIGKILL)
time.sleep(0.1)
# Create the object.
ray.worker.global_worker.put_object(x_id, 1)
time.sleep(0.1)
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
# This test checks that when a driver dies in the middle of a wait, the plasma
# store and raylet will not die.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Not working with new GCS API.")
def test_dying_driver_wait(ray_start_regular):
# Start the Ray processes.
address_info = ray_start_regular
@ray.remote
def sleep_forever():
time.sleep(10**6)
x_id = sleep_forever.remote()
driver = """
import ray
ray.init("{}")
ray.wait([ray.ObjectID(ray.utils.hex_to_binary("{}"))])
""".format(address_info["redis_address"], x_id.hex())
p = run_string_as_driver_nonblocking(driver)
# Make sure the driver is running.
time.sleep(1)
assert p.poll() is None
# Kill the driver process.
p.kill()
p.wait()
time.sleep(0.1)
# Make sure the original task hasn't finished.
ready_ids, _ = ray.wait([x_id], timeout=0)
assert len(ready_ids) == 0
# Seal the object so the store attempts to notify the worker that the
# wait can return.
ray.worker.global_worker.put_object(x_id, 1)
time.sleep(0.1)
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
@pytest.fixture(params=[(1, 4), (4, 4)])
def ray_start_workers_separate_multinode(request):
num_nodes = request.param[0]
num_initial_workers = request.param[1]
# Start the Ray processes.
cluster = Cluster()
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_initial_workers)
ray.init(address=cluster.address)
yield num_nodes, num_initial_workers
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
def test_worker_failed(ray_start_workers_separate_multinode):
num_nodes, num_initial_workers = (ray_start_workers_separate_multinode)
@ray.remote
def get_pids():
time.sleep(0.25)
return os.getpid()
start_time = time.time()
pids = set()
while len(pids) < num_nodes * num_initial_workers:
new_pids = ray.get([
get_pids.remote()
for _ in range(2 * num_nodes * num_initial_workers)
])
for pid in new_pids:
pids.add(pid)
if time.time() - start_time > 60:
raise RayTestTimeoutException(
"Timed out while waiting to get worker PIDs.")
@ray.remote
def f(x):
time.sleep(0.5)
return x
# Submit more tasks than there are workers so that all workers and
# cores are utilized.
object_ids = [f.remote(i) for i in range(num_initial_workers * num_nodes)]
object_ids += [f.remote(object_id) for object_id in object_ids]
# Allow the tasks some time to begin executing.
time.sleep(0.1)
# Kill the workers as the tasks execute.
for pid in pids:
os.kill(pid, signal.SIGKILL)
time.sleep(0.1)
# Make sure that we either get the object or we get an appropriate
# exception.
for object_id in object_ids:
try:
ray.get(object_id)
except (ray.exceptions.RayTaskError, ray.exceptions.RayWorkerError):
pass
def _test_component_failed(cluster, component_type):
"""Kill a component on all worker nodes and check workload succeeds."""
# Submit many tasks with many dependencies.
@ray.remote
def f(x):
return x
@ray.remote
def g(*xs):
return 1
# Kill the component on all nodes except the head node as the tasks
# execute. Do this in a loop while submitting tasks between each
# component failure.
time.sleep(0.1)
worker_nodes = cluster.list_all_nodes()[1:]
assert len(worker_nodes) > 0
for node in worker_nodes:
process = node.all_processes[component_type][0].process
# Submit a round of tasks with many dependencies.
x = 1
for _ in range(1000):
x = f.remote(x)
xs = [g.remote(1)]
for _ in range(100):
xs.append(g.remote(*xs))
xs.append(g.remote(1))
# Kill a component on one of the nodes.
process.terminate()
time.sleep(1)
process.kill()
process.wait()
assert not process.poll() is None
# Make sure that we can still get the objects after the
# executing tasks died.
ray.get(x)
ray.get(xs)
def check_components_alive(cluster, component_type, check_component_alive):
"""Check that a given component type is alive on all worker nodes."""
worker_nodes = cluster.list_all_nodes()[1:]
assert len(worker_nodes) > 0
for node in worker_nodes:
process = node.all_processes[component_type][0].process
if check_component_alive:
assert process.poll() is None
else:
print("waiting for " + component_type + " with PID " +
str(process.pid) + "to terminate")
process.wait()
print("done waiting for " + component_type + " with PID " +
str(process.pid) + "to terminate")
assert not process.poll() is None
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_cpus": 8,
"num_nodes": 4,
"_internal_config": json.dumps({
"num_heartbeats_timeout": 100
}),
}],
indirect=True)
def test_raylet_failed(ray_start_cluster):
cluster = ray_start_cluster
# Kill all raylets on worker nodes.
_test_component_failed(cluster, ray_constants.PROCESS_TYPE_RAYLET)
# The plasma stores should still be alive on the worker nodes.
check_components_alive(cluster, ray_constants.PROCESS_TYPE_PLASMA_STORE,
True)
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with new GCS API.")
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_cpus": 8,
"num_nodes": 2,
"_internal_config": json.dumps({
"num_heartbeats_timeout": 100
}),
}],
indirect=True)
def test_plasma_store_failed(ray_start_cluster):
cluster = ray_start_cluster
# Kill all plasma stores on worker nodes.
_test_component_failed(cluster, ray_constants.PROCESS_TYPE_PLASMA_STORE)
# No processes should be left alive on the worker nodes.
check_components_alive(cluster, ray_constants.PROCESS_TYPE_PLASMA_STORE,
False)
check_components_alive(cluster, ray_constants.PROCESS_TYPE_RAYLET, False)
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_cpus": 4,
"num_nodes": 3,
"do_init": True
}],
indirect=True)
def test_actor_creation_node_failure(ray_start_cluster):
# TODO(swang): Refactor test_raylet_failed, etc to reuse the below code.
cluster = ray_start_cluster
@ray.remote
class Child(object):
def __init__(self, death_probability):
self.death_probability = death_probability
def ping(self):
# Exit process with some probability.
exit_chance = np.random.rand()
if exit_chance < self.death_probability:
sys.exit(-1)
num_children = 50
# Children actors will die about half the time.
death_probability = 0.5
children = [Child.remote(death_probability) for _ in range(num_children)]
while len(cluster.list_all_nodes()) > 1:
for j in range(2):
# Submit some tasks on the actors. About half of the actors will
# fail.
children_out = [child.ping.remote() for child in children]
# Wait a while for all the tasks to complete. This should trigger
# reconstruction for any actor creation tasks that were forwarded
# to nodes that then failed.
ready, _ = ray.wait(
children_out, num_returns=len(children_out), timeout=5 * 60.0)
assert len(ready) == len(children_out)
# Replace any actors that died.
for i, out in enumerate(children_out):
try:
ray.get(out)
except ray.exceptions.RayActorError:
children[i] = Child.remote(death_probability)
# Remove a node. Any actor creation tasks that were forwarded to this
# node must be reconstructed.
cluster.remove_node(cluster.list_all_nodes()[-1])
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with new GCS API.")
def test_driver_lives_sequential(ray_start_regular):
ray.worker._global_node.kill_raylet()
ray.worker._global_node.kill_plasma_store()
ray.worker._global_node.kill_log_monitor()
ray.worker._global_node.kill_monitor()
ray.worker._global_node.kill_raylet_monitor()
# If the driver can reach the tearDown method, then it is still alive.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with new GCS API.")
def test_driver_lives_parallel(ray_start_regular):
all_processes = ray.worker._global_node.all_processes
process_infos = (all_processes[ray_constants.PROCESS_TYPE_PLASMA_STORE] +
all_processes[ray_constants.PROCESS_TYPE_RAYLET] +
all_processes[ray_constants.PROCESS_TYPE_LOG_MONITOR] +
all_processes[ray_constants.PROCESS_TYPE_MONITOR] +
all_processes[ray_constants.PROCESS_TYPE_RAYLET_MONITOR])
assert len(process_infos) == 5
# Kill all the components in parallel.
for process_info in process_infos:
process_info.process.terminate()
time.sleep(0.1)
for process_info in process_infos:
process_info.process.kill()
for process_info in process_infos:
process_info.process.wait()
# If the driver can reach the tearDown method, then it is still alive.
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Version 2 of class Optimizer."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import six
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.distribute import values as distributed_values
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
def _deduplicate_indexed_slices(values, indices):
"""Sums `values` associated with any non-unique `indices`.
Args:
values: A `Tensor` with rank >= 1.
indices: A one-dimensional integer `Tensor`, indexing into the first
dimension of `values` (as in an IndexedSlices object).
Returns:
A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
de-duplicated version of `indices` and `summed_values` contains the sum of
`values` slices associated with each unique index.
"""
unique_indices, new_index_positions = array_ops.unique(indices)
summed_values = math_ops.unsorted_segment_sum(
values, new_index_positions,
array_ops.shape(unique_indices)[0])
return (summed_values, unique_indices)
@six.add_metaclass(abc.ABCMeta)
@keras_export("keras.optimizers.Optimizer")
class OptimizerV2(trackable.Trackable):
"""Updated base class for optimizers.
This class defines the API to add Ops to train a model. You never use this
class directly, but instead instantiate one of its subclasses such as
`tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`.
### Usage
```python
# Create an optimizer with the desired parameters.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# `loss` is a callable that takes no argument and returns the value
# to minimize.
loss = lambda: 3 * var1 * var1 + 2 * var2 * var2
# In graph mode, returns op that minimizes the loss by updating the listed
# variables.
opt_op = opt.minimize(loss, var_list=[var1, var2])
opt_op.run()
# In eager mode, simply call minimize to update the list of variables.
opt.minimize(loss, var_list=[var1, var2])
```
### Processing gradients before applying them.
Calling `minimize()` takes care of both computing the gradients and
applying them to the variables. If you want to process the gradients
before applying them you can instead use the optimizer in three steps:
1. Compute the gradients with `tf.GradientTape`.
2. Process the gradients as you wish.
3. Apply the processed gradients with `apply_gradients()`.
Example:
```python
# Create an optimizer.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# Compute the gradients for a list of variables.
with tf.GradientTape() as tape:
loss = <call_loss_function>
vars = <list_of_variables>
grads = tape.gradient(loss, vars)
processed_grads = [process_gradient(g) for g in grads]
grads_and_vars = zip(processed_grads, var_list)
# grads_and_vars is a list of tuples (gradient, variable). Do whatever you
# need to the 'gradient' part, for example cap them, etc.
capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars]
# Ask the optimizer to apply the capped gradients.
opt.apply_gradients(capped_grads_and_vars)
```
### Use with `tf.distribute.Strategy`.
This optimizer class is `tf.distribute.Strategy` aware, which means it
automatically sums gradients across all replicas. To average gradients,
you divide your loss by the global batch size, which is done automatically
if you use a member of `tf.keras.losses` or `tf.losses`. See the
`reduction` argument of your loss which should be set to
`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or
`tf.keras.losses.Reduction.SUM` for not.
If you are not using these and you want to average gradients, you should use
`tf.math.reduce_sum` to add up your per-example losses and then divide by the
global batch size. Note that when using `tf.distribute.Strategy`, the first
component of a tensor's shape is the *replica-local* batch size, which is off
by a factor equal to the number of replicas being used to compute a single
step. As a result, using `tf.math.reduce_mean` will give the wrong answer,
resulting in gradients that can be many times too big.
### Variable Constraint
All Keras optimizers respect variable constraints. If constraint function is
passed to any variable, the constraint will be applied to the variable after
the gradient has been applied to the variable.
Important: If gradient is sparse tensor, variable constraint is not supported.
### Thread Compatibility
The entire optimizer is currently thread compatible, not thread-safe. The user
needs to perform synchronization if necessary.
### Slots
Many optimizer subclasses, such as `Adam` and `Adagrad` allocate and manage
additional variables associated with the variables to train. These are called
<i>Slots</i>. Slots have names and you can ask the optimizer for the names of
the slots that it uses. Once you have a slot name you can ask the optimizer
for the variable it created to hold the slot value.
This can be useful if you want to log debug a training algorithm, report stats
about the slots, etc.
### Hyper parameters
These are arguments passed to the optimizer subclass constructor
(the `__init__` method), and then passed to `self._set_hyper()`.
They can be either regular Python values (like 1.0), tensors, or
callables. If they are callable, the callable will be called during
`apply_gradients()` to get the value for the hyper parameter.
Hyper parameters can be overwritten through user code:
Example:
```python
# Create an optimizer with the desired parameters.
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# `loss` is a callable that takes no argument and returns the value
# to minimize.
loss = lambda: 3 * var1 + 2 * var2
# In eager mode, simply call minimize to update the list of variables.
opt.minimize(loss, var_list=[var1, var2])
# update learning rate
opt.learning_rate = 0.05
opt.minimize(loss, var_list=[var1, var2])
```
### Write a customized optimizer.
If you intend to create your own optimization algorithm, simply inherit from
this class and override the following methods:
- resource_apply_dense (update variable given gradient tensor is dense)
- resource_apply_sparse (update variable given gradient tensor is sparse)
- create_slots (if your optimizer algorithm requires additional variables)
- get_config (serialization of the optimizer, include all hyper parameters)
"""
def __init__(self, name, **kwargs):
"""Create a new Optimizer.
This must be called by the constructors of subclasses.
Note that Optimizer instances should not bind to a single graph,
and so shouldn't keep Tensors as member variables. Generally
you should be able to use the _set_hyper()/state.get_hyper()
facility instead.
This class in stateful and thread-compatible.
Args:
name: A non-empty string. The name to use for accumulators created
for the optimizer.
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
Raises:
ValueError: If name is malformed.
RuntimeError: If _create_slots has been overridden instead of
_create_vars.
"""
allowed_kwargs = {"clipnorm", "clipvalue", "lr", "decay"}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError("Unexpected keyword argument "
"passed to optimizer: " + str(k))
# checks that all keyword arguments are non-negative.
if kwargs[k] < 0:
raise ValueError("Expected {} >= 0, received: {}".format(k, kwargs[k]))
self._use_locking = True
self._name = name
self._hyper = {}
# dict: {variable name : {slot name : variable}}
self._slots = {}
self._slot_names = []
self._weights = []
self._iterations = None
# For implementing Trackable. Stores information about how to restore
# slot variables which have not yet been created
# (trackable._CheckpointPosition objects).
# {slot_name :
# {_var_key(variable_to_train): [checkpoint_position, ... ], ... },
# ... }
self._deferred_slot_restorations = {}
decay = kwargs.pop("decay", 0.0)
if decay < 0.:
raise ValueError("decay cannot be less than 0: {}".format(decay))
self._initial_decay = decay
if "clipnorm" in kwargs:
self.clipnorm = kwargs.pop("clipnorm")
if "clipvalue" in kwargs:
self.clipvalue = kwargs.pop("clipvalue")
self._hypers_created = False
def minimize(self, loss, var_list, grad_loss=None, name=None):
"""Add operations to minimize `loss` by updating `var_list`.
This method simply computes gradient using `tf.GradientTape` and calls
`apply_gradients()`. If you want to process the gradient before applying
then call `tf.GradientTape` and `apply_gradients()` explicitly instead
of using this function.
Args:
loss: A callable taking no arguments which returns the value to minimize.
var_list: list or tuple of `Variable` objects to update to minimize
`loss`.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
name: Optional name for the returned operation.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
Raises:
ValueError: If some of the variables are not `Variable` objects.
@compatibility(eager)
When eager execution is enabled, `loss` should be a Python function that
takes no arguments and computes the value to be minimized. Minimization (and
gradient computation) is done with respect to the elements of `var_list`.
`grad_loss` is ignored when eager execution is enabled.
@end_compatibility
"""
grads_and_vars = self._compute_gradients(
loss, var_list=var_list, grad_loss=grad_loss)
return self.apply_gradients(grads_and_vars, name=name)
def _compute_gradients(self, loss, var_list, grad_loss=None):
"""Compute gradients of `loss` for the variables in `var_list`.
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A callable taking no arguments which returns the value to minimize.
var_list: List or tuple of `tf.Variable` to update to minimize
`loss`. Defaults to the list of variables collected in the graph under
the key `GraphKeys.TRAINABLE_VARIABLES`.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid, or var_list is None.
"""
var_list = nest.flatten(var_list)
# TODO(josh11b): Test that we handle weight decay in a reasonable way.
with backprop.GradientTape() as tape:
tape.watch(var_list)
loss_value = loss()
grads = tape.gradient(loss_value, var_list, grad_loss)
if hasattr(self, "clipnorm"):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, "clipvalue"):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
grads_and_vars = list(zip(grads, var_list))
self._assert_valid_dtypes([
v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource
])
return grads_and_vars
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Arguments:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
with backend.get_graph().as_default():
grads = gradients.gradients(loss, params)
if None in grads:
raise ValueError("An operation has `None` for gradient. "
"Please make sure that all of your ops have a "
"gradient defined (i.e. are differentiable). "
"Common ops without gradient: "
"K.argmax, K.round, K.eval.")
if hasattr(self, "clipnorm"):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, "clipvalue"):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
return grads
def apply_gradients(self, grads_and_vars, name=None):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs.
name: Optional name for the returned operation. Default to the name
passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
"""
grads_and_vars = _filter_grads(grads_and_vars)
var_list = [v for (_, v) in grads_and_vars]
# Create iteration if necessary.
_ = self.iterations
self._create_hypers()
with ops.init_scope():
self._create_slots(var_list)
self._prepare(var_list)
return distribute_ctx.get_replica_context().merge_call(
self._distributed_apply, args=(grads_and_vars,), kwargs={"name": name})
def _distributed_apply(self, distribution, grads_and_vars, name):
"""`apply_gradients` using a `DistributionStrategy`."""
reduced_grads = distribution.extended.batch_reduce_to(
ds_reduce_util.ReduceOp.SUM, grads_and_vars)
var_list = [v for _, v in grads_and_vars]
grads_and_vars = zip(reduced_grads, var_list)
def apply_grad_to_update_var(var, grad):
"""Apply gradient to variable."""
if isinstance(var, ops.Tensor):
raise NotImplementedError("Trying to update a Tensor ", var)
if isinstance(grad, ops.IndexedSlices):
if var.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
return self._resource_apply_sparse_duplicate_indices(
grad.values, var, grad.indices)
update_op = self._resource_apply_dense(grad, var)
if var.constraint is not None:
with ops.control_dependencies([update_op]):
return var.assign(var.constraint(var))
else:
return update_op
update_ops = []
with ops.name_scope(name, self._name) as name:
for grad, var in grads_and_vars:
scope_name = ("" if ops.executing_eagerly_outside_functions() else
"_" + var.op.name)
with ops.name_scope("update" + scope_name):
update_ops.extend(
distribution.extended.update(
var, apply_grad_to_update_var, args=(grad,), group=False))
any_symbolic = any(isinstance(i, ops.Operation) or
tf_utils.is_symbolic_tensor(i) for i in update_ops)
if not context.executing_eagerly() or any_symbolic:
# If the current context is graph mode or any of the update ops are
# symbolic then the step update should be carried out under a graph
# context. (eager updates execute immediately)
with ops._get_graph_from_inputs(update_ops).as_default(): # pylint: disable=protected-access
with ops.control_dependencies(update_ops):
return self._iterations.assign_add(1).op
return self._iterations.assign_add(1)
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
grads_and_vars = list(zip(grads, params))
self._assert_valid_dtypes([
v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource
])
return [self.apply_gradients(grads_and_vars)]
def _set_hyper(self, name, value):
"""set hyper `name` to value. value can be callable, tensor, numeric."""
if name not in self._hyper:
self._hyper[name] = value
else:
prev_value = self._hyper[name]
if (callable(prev_value)
or isinstance(prev_value,
(ops.Tensor, int, float,
learning_rate_schedule.LearningRateSchedule))
or isinstance(value, learning_rate_schedule.LearningRateSchedule)):
self._hyper[name] = value
else:
backend.set_value(self._hyper[name], value)
def _get_hyper(self, name, dtype=None):
if not self._hypers_created:
self._create_hypers()
value = self._hyper[name]
if isinstance(value, learning_rate_schedule.LearningRateSchedule):
return value
if callable(value):
value = value()
if dtype:
return math_ops.cast(value, dtype)
else:
return value
def __getattribute__(self, name):
"""Overridden to support hyperparameter access."""
try:
return super(OptimizerV2, self).__getattribute__(name)
except AttributeError as e:
# Needed to avoid infinite recursion with __setattr__.
if name == "_hyper":
raise e
# Backwards compatibility with Keras optimizers.
if name == "lr":
name = "learning_rate"
if name in self._hyper:
return self._get_hyper(name)
raise e
def __setattr__(self, name, value):
"""Override setattr to support dynamic hyperparameter setting."""
# Backwards compatibility with Keras optimizers.
if name == "lr":
name = "learning_rate"
if hasattr(self, "_hyper") and name in self._hyper:
self._set_hyper(name, value)
else:
super(OptimizerV2, self).__setattr__(name, value)
def get_slot_names(self):
"""A list of names for this optimizer's slots."""
return self._slot_names
def add_slot(self, var, slot_name, initializer="zeros"):
"""Add a new slot variable for `var`."""
if slot_name not in self._slot_names:
self._slot_names.append(slot_name)
var_key = _var_key(var)
slot_dict = self._slots.setdefault(var_key, {})
weight = slot_dict.get(slot_name, None)
if weight is None:
if isinstance(initializer, six.string_types) or callable(initializer):
initializer = initializers.get(initializer)
initial_value = functools.partial(
initializer, shape=var.shape, dtype=var.dtype)
else:
initial_value = initializer
strategy = distribute_ctx.get_strategy()
with strategy.colocate_vars_with(var):
weight = tf_variables.Variable(
name="%s/%s" % (var._shared_name, slot_name), # pylint: disable=protected-access
dtype=var.dtype,
trainable=False,
initial_value=initial_value)
backend.track_variable(weight)
slot_dict[slot_name] = weight
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=weight)
self._weights.append(weight)
return weight
def get_slot(self, var, slot_name):
var_key = _var_key(var)
slot_dict = self._slots[var_key]
return slot_dict[slot_name]
def _prepare(self, var_list):
pass
def _create_hypers(self):
if self._hypers_created:
return
# Iterate hyper values deterministically.
for name, value in sorted(self._hyper.items()):
if isinstance(value, ops.Tensor) or callable(value):
continue
else:
self._hyper[name] = self.add_weight(
name,
shape=[],
trainable=False,
initializer=value,
aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)
self._hypers_created = True
@property
def iterations(self):
"""Variable. The number of training steps this Optimizer has run."""
if self._iterations is None:
self._iterations = self.add_weight(
"iter",
shape=[],
dtype=dtypes.int64,
trainable=False,
aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)
self._weights.append(self._iterations)
return self._iterations
@iterations.setter
def iterations(self, variable):
if self._iterations is not None:
raise RuntimeError("Cannot set `iterations` to a new Variable after"
"the Optimizer weights have been created")
self._iterations = variable
self._weights.append(self._iterations)
def _decayed_lr(self, var_dtype):
"""Get decayed learning rate as a Tensor with dtype=var_dtype."""
lr_t = self._get_hyper("learning_rate", var_dtype)
if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule):
local_step = math_ops.cast(self.iterations, var_dtype)
lr_t = math_ops.cast(lr_t(local_step), var_dtype)
if self._initial_decay > 0.:
local_step = math_ops.cast(self.iterations, var_dtype)
decay_t = self._get_hyper("decay", var_dtype)
lr_t = lr_t / (1. + decay_t * local_step)
return lr_t
@abc.abstractmethod
def get_config(self):
"""Returns the config of the optimimizer.
An optimizer config is a Python dictionary (serializable)
containing the configuration of an optimizer.
The same optimizer can be reinstantiated later
(without any saved state) from this configuration.
Returns:
Python dictionary.
"""
config = {"name": self._name}
if hasattr(self, "clipnorm"):
config["clipnorm"] = self.clipnorm
if hasattr(self, "clipvalue"):
config["clipvalue"] = self.clipvalue
return config
@classmethod
def from_config(cls, config, custom_objects=None):
"""Creates an optimizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same optimizer from the config
dictionary.
Arguments:
config: A Python dictionary, typically the output of get_config.
custom_objects: A Python dictionary mapping names to additional Python
objects used to create this optimizer, such as a function used for a
hyperparameter.
Returns:
An optimizer instance.
"""
if "lr" in config:
config["learning_rate"] = config.pop("lr")
if "learning_rate" in config:
if isinstance(config["learning_rate"], dict):
config["learning_rate"] = learning_rate_schedule.deserialize(
config["learning_rate"])
return cls(**config)
def _serialize_hyperparameter(self, hyperparameter_name):
"""Serialize a hyperparameter that can be a float, callable, or Tensor."""
value = self._hyper[hyperparameter_name]
if isinstance(value, learning_rate_schedule.LearningRateSchedule):
return learning_rate_schedule.serialize(value)
if callable(value):
return value()
if isinstance(value, (ops.Tensor, tf_variables.Variable,
distributed_values.TPUMirroredVariable,
distributed_values.DistributedVariable)):
return backend.get_value(value)
return value
def variables(self):
"""Returns variables of this Optimizer based on the order created."""
return self._weights
@property
def weights(self):
"""Returns variables of this Optimizer based on the order created."""
return self._weights
def get_weights(self):
params = self.weights
return backend.batch_get_value(params)
# TODO(tanzheny): Maybe share this logic with base_layer.
def set_weights(self, weights):
params = self.weights
if len(params) != len(weights):
raise ValueError(
"You called `set_weights(weights)` on optimizer " + self._name +
" with a weight list of length " + str(len(weights)) +
", but the optimizer was expecting " + str(len(params)) +
" weights. Provided weights: " + str(weights)[:50] + "...")
if not params:
return
weight_value_tuples = []
param_values = backend.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError("Optimizer weight shape " + str(pv.shape) +
" not compatible with "
"provided weight shape " + str(w.shape))
weight_value_tuples.append((p, w))
backend.batch_set_value(weight_value_tuples)
def add_weight(self,
name,
shape,
dtype=None,
initializer="zeros",
trainable=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE):
if dtype is None:
dtype = dtypes.float32
if isinstance(initializer, six.string_types) or callable(initializer):
initializer = initializers.get(initializer)
if synchronization == tf_variables.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
"Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ.")
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
variable = self._add_variable_with_custom_getter(
name=name,
shape=shape,
getter=base_layer_utils.make_variable,
overwrite=True,
initializer=initializer,
dtype=dtype,
trainable=trainable,
use_resource=True,
synchronization=synchronization,
aggregation=aggregation)
backend.track_variable(variable)
return variable
def _assert_valid_dtypes(self, tensors):
"""Asserts tensors are all valid types (see `_valid_dtypes`).
Args:
tensors: Tensors to check.
Raises:
ValueError: If any tensor is not a valid type.
"""
valid_dtypes = self._valid_dtypes()
for t in tensors:
dtype = t.dtype.base_dtype
if dtype not in valid_dtypes:
raise ValueError("Invalid type %r for %s, expected: %s." %
(dtype, t.name, [v for v in valid_dtypes]))
def _valid_dtypes(self):
"""Valid types for loss, variables and gradients.
Subclasses should override to allow other float types.
Returns:
Valid types for loss, variables and gradients.
"""
return set(
[dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64])
def _call_if_callable(self, param):
"""Call the function if param is callable."""
return param() if callable(param) else param
def _resource_apply_dense(self, grad, handle):
"""Add ops to apply dense gradients to the variable `handle`.
Args:
grad: a `Tensor` representing the gradient.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
"""Add ops to apply sparse gradients to `handle`, with repeated indices.
Optimizers which override this method must deal with repeated indices. See
the docstring of `_apply_sparse_duplicate_indices` for details. By default
the correct behavior, to sum non-unique indices and their associated
gradients, is enforced by first pre-processing `grad` and `indices` and
passing them on to `_resource_apply_sparse`. Optimizers which deal correctly
with duplicate indices may instead override this method to avoid the
overhead of summing.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
indices: a `Tensor` of integral type representing the indices for which
the gradient is nonzero. Indices may be repeated.
Returns:
An `Operation` which updates the value of the variable.
"""
summed_grad, unique_indices = _deduplicate_indexed_slices(
values=grad, indices=indices)
return self._resource_apply_sparse(summed_grad, handle, unique_indices)
def _resource_apply_sparse(self, grad, handle, indices):
"""Add ops to apply sparse gradients to the variable `handle`.
Similar to `_apply_sparse`, the `indices` argument to this method has been
de-duplicated. Optimizers which deal correctly with non-unique indices may
instead override `_resource_apply_sparse_duplicate_indices` to avoid this
overhead.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable to be
updated.
indices: a `Tensor` of integral type representing the indices for which
the gradient is nonzero. Indices are unique.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
return x.value()
def _resource_scatter_update(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_update(x.handle, i, v)]):
return x.value()
# ---------------
# For implementing the trackable interface
# ---------------
def _restore_slot_variable(self, slot_name, variable, slot_variable):
"""Restore a newly created slot variable's value."""
variable_key = _var_key(variable)
deferred_restorations = self._deferred_slot_restorations.get(
slot_name, {}).pop(variable_key, [])
# Iterate over restores, highest restore UID first to minimize the number
# of assignments.
deferred_restorations.sort(key=lambda position: position.restore_uid,
reverse=True)
for checkpoint_position in deferred_restorations:
checkpoint_position.restore(slot_variable)
def _create_or_restore_slot_variable(
self, slot_variable_position, slot_name, variable):
"""Restore a slot variable's value, possibly creating it.
Called when a variable which has an associated slot variable is created or
restored. When executing eagerly, we create the slot variable with a
restoring initializer.
No new variables are created when graph building. Instead,
_restore_slot_variable catches these after normal creation and adds restore
ops to the graph. This method is nonetheless important when graph building
for the case when a slot variable has already been created but `variable`
has just been added to a dependency graph (causing us to realize that the
slot variable needs to be restored).
Args:
slot_variable_position: A `trackable._CheckpointPosition` object
indicating the slot variable `Trackable` object to be restored.
slot_name: The name of this `Optimizer`'s slot to restore into.
variable: The variable object this slot is being created for.
"""
variable_key = _var_key(variable)
slot_dict = self._slots.get(variable_key, {})
slot_variable = slot_dict.get(slot_name, None)
if (slot_variable is None and context.executing_eagerly() and
slot_variable_position.is_simple_variable()
# Defer slot variable creation if there is an active variable creator
# scope. Generally we'd like to eagerly create/restore slot variables
# when possible, but this may mean that scopes intended to catch
# `variable` also catch its eagerly created slot variable
# unintentionally (specifically make_template would add a dependency on
# a slot variable if not for this case). Deferring is mostly harmless
# (aside from double initialization), and makes variable creator scopes
# behave the same way they do when graph building.
and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access
initializer = trackable.CheckpointInitialValue(
checkpoint_position=slot_variable_position)
slot_variable = self.add_slot(
var=variable,
initializer=initializer,
slot_name=slot_name)
# Slot variables are not owned by any one object (because we don't want to
# save the slot variable if the optimizer is saved without the non-slot
# variable, or if the non-slot variable is saved without the optimizer;
# it's a dependency hypergraph with edges of the form (optimizer, non-slot
# variable, variable)). So we don't _track_ slot variables anywhere, and
# instead special-case this dependency and otherwise pretend it's a normal
# graph.
if slot_variable is not None:
# If we've either made this slot variable, or if we've pulled out an
# existing slot variable, we should restore it.
slot_variable_position.restore(slot_variable)
else:
# We didn't make the slot variable. Defer restoring until it gets created
# normally. We keep a list rather than the one with the highest restore
# UID in case slot variables have their own dependencies, in which case
# those could differ between restores.
self._deferred_slot_restorations.setdefault(
slot_name, {}).setdefault(variable_key, []).append(
slot_variable_position)
def _filter_grads(grads_and_vars):
"""Filter out iterable with grad equal to None."""
grads_and_vars = tuple(grads_and_vars)
if not grads_and_vars:
return grads_and_vars
filtered = []
vars_with_empty_grads = []
for grad, var in grads_and_vars:
if grad is None:
vars_with_empty_grads.append(var)
else:
filtered.append((grad, var))
filtered = tuple(filtered)
if not filtered:
raise ValueError("No gradients provided for any variable: %s." %
([v.name for _, v in grads_and_vars],))
if vars_with_empty_grads:
logging.warning(
("Gradients does not exist for variables %s when minimizing the loss."),
([v.name for v in vars_with_empty_grads]))
return filtered
def _var_key(var):
"""Key for representing a primary variable, for looking up slots.
In graph mode the name is derived from the var shared name.
In eager mode the name is derived from the var unique id.
If distribution strategy exists, get the primary variable first.
Args:
var: the variable.
Returns:
the unique name of the variable.
"""
# pylint: disable=protected-access
# Get the distributed variable if it exists.
if getattr(var, "_distributed_container", None) is not None:
var = var._distributed_container()
if var._in_graph_mode:
return var._shared_name
return var._unique_id
def _get_slot_key_from_var(var, slot_name):
"""Get the slot key for the variable: var_name/slot_name."""
name = _var_key(var)
return name + "/" + slot_name
|
|
"""
This module implements the "new" binary OpenEphys format.
In this format channels are interleaved in one file.
See
https://open-ephys.github.io/gui-docs/User-Manual/Recording-data/Binary-format.html
Author: Julia Sprenger and Samuel Garcia
"""
import os
import re
import json
from pathlib import Path
import numpy as np
from .baserawio import (BaseRawIO, _signal_channel_dtype, _signal_stream_dtype,
_spike_channel_dtype, _event_channel_dtype)
class OpenEphysBinaryRawIO(BaseRawIO):
"""
Handle several Blocks and several Segments.
# Correspondencies
Neo OpenEphys
block[n-1] experiment[n] New device start/stop
segment[s-1] recording[s] New recording start/stop
This IO handles several signal streams.
Special event (npy) data are represented as array_annotations.
The current implementation does not handle spiking data, this will be added upon user request
"""
extensions = []
rawmode = 'one-dir'
def __init__(self, dirname=''):
BaseRawIO.__init__(self)
self.dirname = dirname
def _source_name(self):
return self.dirname
def _parse_header(self):
all_streams, nb_block, nb_segment_per_block = explore_folder(self.dirname)
sig_stream_names = sorted(list(all_streams[0][0]['continuous'].keys()))
event_stream_names = sorted(list(all_streams[0][0]['events'].keys()))
# first loop to reasign stream by "stream_index" instead of "stream_name"
self._sig_streams = {}
self._evt_streams = {}
for block_index in range(nb_block):
self._sig_streams[block_index] = {}
self._evt_streams[block_index] = {}
for seg_index in range(nb_segment_per_block[block_index]):
self._sig_streams[block_index][seg_index] = {}
self._evt_streams[block_index][seg_index] = {}
for stream_index, stream_name in enumerate(sig_stream_names):
d = all_streams[block_index][seg_index]['continuous'][stream_name]
d['stream_name'] = stream_name
self._sig_streams[block_index][seg_index][stream_index] = d
for i, stream_name in enumerate(event_stream_names):
d = all_streams[block_index][seg_index]['events'][stream_name]
d['stream_name'] = stream_name
self._evt_streams[block_index][seg_index][i] = d
# signals zone
# create signals channel map: several channel per stream
signal_channels = []
for stream_index, stream_name in enumerate(sig_stream_names):
# stream_index is the index in vector sytream names
stream_id = str(stream_index)
d = self._sig_streams[0][0][stream_index]
new_channels = []
for chan_info in d['channels']:
chan_id = chan_info['channel_name']
new_channels.append((chan_info['channel_name'],
chan_id, float(d['sample_rate']), d['dtype'], chan_info['units'],
chan_info['bit_volts'], 0., stream_id))
signal_channels.extend(new_channels)
signal_channels = np.array(signal_channels, dtype=_signal_channel_dtype)
signal_streams = []
for stream_index, stream_name in enumerate(sig_stream_names):
stream_id = str(stream_index)
signal_streams.append((stream_name, stream_id))
signal_streams = np.array(signal_streams, dtype=_signal_stream_dtype)
# create memmap for signals
for block_index in range(nb_block):
for seg_index in range(nb_segment_per_block[block_index]):
for stream_index, d in self._sig_streams[block_index][seg_index].items():
num_channels = len(d['channels'])
print(d['raw_filename'])
memmap_sigs = np.memmap(d['raw_filename'], d['dtype'],
order='C', mode='r').reshape(-1, num_channels)
d['memmap'] = memmap_sigs
# events zone
# channel map: one channel one stream
event_channels = []
for stream_ind, stream_name in enumerate(event_stream_names):
d = self._evt_streams[0][0][stream_ind]
event_channels.append((d['channel_name'], stream_ind, 'event'))
event_channels = np.array(event_channels, dtype=_event_channel_dtype)
# create memmap
for stream_ind, stream_name in enumerate(event_stream_names):
# inject memmap loaded into main dict structure
d = self._evt_streams[0][0][stream_ind]
for name in _possible_event_stream_names:
if name + '_npy' in d:
data = np.load(d[name + '_npy'], mmap_mode='r')
d[name] = data
# check that events have timestamps
assert 'timestamps' in d
# for event the neo "label" will change depending the nature
# of event (ttl, text, binary)
# and this is transform into unicode
# all theses data are put in event array annotations
if 'text' in d:
# text case
d['labels'] = d['text'].astype('U')
elif 'metadata' in d:
# binary case
d['labels'] = d['channels'].astype('U')
elif 'channels' in d:
# ttl case use channels
d['labels'] = d['channels'].astype('U')
else:
raise ValueError(f'There is no possible labels for this event: {stream_name}')
# no spike read yet
# can be implemented on user demand
spike_channels = np.array([], dtype=_spike_channel_dtype)
# loop for t_start/t_stop on segment browse all object
self._t_start_segments = {}
self._t_stop_segments = {}
for block_index in range(nb_block):
self._t_start_segments[block_index] = {}
self._t_stop_segments[block_index] = {}
for seg_index in range(nb_segment_per_block[block_index]):
global_t_start = None
global_t_stop = None
# loop over signals
for stream_index, d in self._sig_streams[block_index][seg_index].items():
t_start = d['t_start']
dur = d['memmap'].shape[0] / float(d['sample_rate'])
t_stop = t_start + dur
if global_t_start is None or global_t_start > t_start:
global_t_start = t_start
if global_t_stop is None or global_t_stop < t_stop:
global_t_stop = t_stop
# loop over events
for stream_index, stream_name in enumerate(event_stream_names):
d = self._evt_streams[0][0][stream_index]
if d['timestamps'].size == 0:
continue
t_start = d['timestamps'][0] / d['sample_rate']
t_stop = d['timestamps'][-1] / d['sample_rate']
if global_t_start is None or global_t_start > t_start:
global_t_start = t_start
if global_t_stop is None or global_t_stop < t_stop:
global_t_stop = t_stop
self._t_start_segments[block_index][seg_index] = global_t_start
self._t_stop_segments[block_index][seg_index] = global_t_stop
# main header
self.header = {}
self.header['nb_block'] = nb_block
self.header['nb_segment'] = nb_segment_per_block
self.header['signal_streams'] = signal_streams
self.header['signal_channels'] = signal_channels
self.header['spike_channels'] = spike_channels
self.header['event_channels'] = event_channels
# Annotate some objects from continuous files
self._generate_minimal_annotations()
for block_index in range(nb_block):
bl_ann = self.raw_annotations['blocks'][block_index]
for seg_index in range(nb_segment_per_block[block_index]):
seg_ann = bl_ann['segments'][seg_index]
# array annotations for signal channels
for stream_index, stream_name in enumerate(sig_stream_names):
sig_ann = seg_ann['signals'][stream_index]
d = self._sig_streams[0][0][stream_index]
for k in ('identifier', 'history', 'source_processor_index',
'recorded_processor_index'):
if k in d['channels'][0]:
values = np.array([chan_info[k] for chan_info in d['channels']])
sig_ann['__array_annotations__'][k] = values
# array annotations for event channels
# use other possible data in _possible_event_stream_names
for stream_index, stream_name in enumerate(event_stream_names):
ev_ann = seg_ann['events'][stream_index]
d = self._evt_streams[0][0][stream_index]
for k in _possible_event_stream_names:
if k in ('timestamps', ):
continue
if k in d:
# split custom dtypes into separate annotations
if d[k].dtype.names:
for name in d[k].dtype.names:
ev_ann['__array_annotations__'][name] = d[k][name].flatten()
else:
ev_ann['__array_annotations__'][k] = d[k]
def _segment_t_start(self, block_index, seg_index):
return self._t_start_segments[block_index][seg_index]
def _segment_t_stop(self, block_index, seg_index):
return self._t_stop_segments[block_index][seg_index]
def _channels_to_group_id(self, channel_indexes):
if channel_indexes is None:
channel_indexes = slice(None)
channels = self.header['signal_channels']
group_ids = channels[channel_indexes]['group_id']
assert np.unique(group_ids).size == 1
group_id = group_ids[0]
return group_id
def _get_signal_size(self, block_index, seg_index, stream_index):
sigs = self._sig_streams[block_index][seg_index][stream_index]['memmap']
return sigs.shape[0]
def _get_signal_t_start(self, block_index, seg_index, stream_index):
t_start = self._sig_streams[block_index][seg_index][stream_index]['t_start']
return t_start
def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop,
stream_index, channel_indexes):
sigs = self._sig_streams[block_index][seg_index][stream_index]['memmap']
sigs = sigs[i_start:i_stop, :]
if channel_indexes is not None:
sigs = sigs[:, channel_indexes]
return sigs
def _spike_count(self, block_index, seg_index, unit_index):
pass
def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
pass
def _rescale_spike_timestamp(self, spike_timestamps, dtype):
pass
def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop):
pass
def _event_count(self, block_index, seg_index, event_channel_index):
d = self._evt_streams[0][0][event_channel_index]
return d['timestamps'].size
def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
d = self._evt_streams[0][0][event_channel_index]
timestamps = d['timestamps']
durations = None
labels = d['labels']
# slice it if needed
if t_start is not None:
ind_start = int(t_start * d['sample_rate'])
mask = timestamps >= ind_start
timestamps = timestamps[mask]
labels = labels[mask]
if t_stop is not None:
ind_stop = int(t_stop * d['sample_rate'])
mask = timestamps < ind_stop
timestamps = timestamps[mask]
labels = labels[mask]
return timestamps, durations, labels
def _rescale_event_timestamp(self, event_timestamps, dtype, event_channel_index):
d = self._evt_streams[0][0][event_channel_index]
event_times = event_timestamps.astype(dtype) / float(d['sample_rate'])
return event_times
def _rescale_epoch_duration(self, raw_duration, dtype):
pass
_possible_event_stream_names = ('timestamps', 'channels', 'text',
'full_word', 'channel_states', 'data_array', 'metadata')
def explore_folder(dirname):
"""
Exploring the OpenEphys folder structure and structure.oebin
Returns nested dictionary structure:
[block_index][seg_index][stream_type][stream_information]
where
- node_name is the open ephys node id
- block_index is the neo Block index
- segment_index is the neo Segment index
- stream_type can be 'continuous'/'events'/'spikes'
- stream_information is a dictionionary containing e.g. the sampling rate
Parmeters
---------
dirname (str): Root folder of the dataset
Returns
-------
nested dictionaries containing structure and stream information
"""
nb_block = 0
nb_segment_per_block = []
# nested dictionary: block_index > seg_index > data_type > stream_name
all_streams = {}
for root, dirs, files in os.walk(dirname):
for file in files:
if not file == 'structure.oebin':
continue
root = Path(root)
node_name = root.parents[1].stem
if not node_name.startswith('Record'):
# before version 5.x.x there was not multi Node recording
# so no node_name
node_name = ''
block_index = int(root.parents[0].stem.replace('experiment', '')) - 1
if block_index not in all_streams:
all_streams[block_index] = {}
if block_index >= nb_block:
nb_block = block_index + 1
nb_segment_per_block.append(0)
seg_index = int(root.stem.replace('recording', '')) - 1
if seg_index not in all_streams[block_index]:
all_streams[block_index][seg_index] = {
'continuous': {},
'events': {},
'spikes': {},
}
if seg_index >= nb_segment_per_block[block_index]:
nb_segment_per_block[block_index] = seg_index + 1
# metadata
with open(root / 'structure.oebin', encoding='utf8', mode='r') as f:
structure = json.load(f)
if (root / 'continuous').exists() and len(structure['continuous']) > 0:
for d in structure['continuous']:
# when multi Record Node the stream name also contains
# the node name to make it unique
stream_name = node_name + '#' + d['folder_name']
raw_filename = root / 'continuous' / d['folder_name'] / 'continuous.dat'
timestamp_file = root / 'continuous' / d['folder_name'] / 'timestamps.npy'
timestamps = np.load(str(timestamp_file), mmap_mode='r')
timestamp0 = timestamps[0]
t_start = timestamp0 / d['sample_rate']
# TODO for later : gap checking
signal_stream = d.copy()
signal_stream['raw_filename'] = str(raw_filename)
signal_stream['dtype'] = 'int16'
signal_stream['timestamp0'] = timestamp0
signal_stream['t_start'] = t_start
all_streams[block_index][seg_index]['continuous'][stream_name] = signal_stream
if (root / 'events').exists() and len(structure['events']) > 0:
for d in structure['events']:
stream_name = node_name + '#' + d['folder_name']
event_stream = d.copy()
for name in _possible_event_stream_names:
npz_filename = root / 'events' / d['folder_name'] / f'{name}.npy'
if npz_filename.is_file():
event_stream[f'{name}_npy'] = str(npz_filename)
all_streams[block_index][seg_index]['events'][stream_name] = event_stream
# TODO for later: check stream / channel consistency across segment
return all_streams, nb_block, nb_segment_per_block
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Dataflow client utility functions."""
import codecs
import getpass
import json
import logging
import os
import re
import time
from StringIO import StringIO
from datetime import datetime
from apitools.base.py import encoding
from apitools.base.py import exceptions
from apache_beam import utils
from apache_beam.internal.gcp.auth import get_service_credentials
from apache_beam.internal.gcp.json_value import to_json_value
from apache_beam.io.gcp.internal.clients import storage
from apache_beam.runners.dataflow.internal import dependency
from apache_beam.runners.dataflow.internal.clients import dataflow
from apache_beam.runners.dataflow.internal.dependency import get_required_container_version
from apache_beam.runners.dataflow.internal.dependency import get_sdk_name_and_version
from apache_beam.runners.dataflow.internal.names import PropertyNames
from apache_beam.transforms import cy_combiners
from apache_beam.transforms.display import DisplayData
from apache_beam.utils import retry
from apache_beam.utils.pipeline_options import DebugOptions
from apache_beam.utils.pipeline_options import GoogleCloudOptions
from apache_beam.utils.pipeline_options import StandardOptions
from apache_beam.utils.pipeline_options import WorkerOptions
class Step(object):
"""Wrapper for a dataflow Step protobuf."""
def __init__(self, step_kind, step_name, additional_properties=None):
self.step_kind = step_kind
self.step_name = step_name
self.proto = dataflow.Step(kind=step_kind, name=step_name)
self.proto.properties = {}
self._additional_properties = []
if additional_properties is not None:
for (n, v, t) in additional_properties:
self.add_property(n, v, t)
def add_property(self, name, value, with_type=False):
self._additional_properties.append((name, value, with_type))
self.proto.properties.additionalProperties.append(
dataflow.Step.PropertiesValue.AdditionalProperty(
key=name, value=to_json_value(value, with_type=with_type)))
def _get_outputs(self):
"""Returns a list of all output labels for a step."""
outputs = []
for p in self.proto.properties.additionalProperties:
if p.key == PropertyNames.OUTPUT_INFO:
for entry in p.value.array_value.entries:
for entry_prop in entry.object_value.properties:
if entry_prop.key == PropertyNames.OUTPUT_NAME:
outputs.append(entry_prop.value.string_value)
return outputs
def __reduce__(self):
"""Reduce hook for pickling the Step class more easily."""
return (Step, (self.step_kind, self.step_name, self._additional_properties))
def get_output(self, tag=None):
"""Returns name if it is one of the outputs or first output if name is None.
Args:
tag: tag of the output as a string or None if we want to get the
name of the first output.
Returns:
The name of the output associated with the tag or the first output
if tag was None.
Raises:
ValueError: if the tag does not exist within outputs.
"""
outputs = self._get_outputs()
if tag is None:
return outputs[0]
else:
name = '%s_%s' % (PropertyNames.OUT, tag)
if name not in outputs:
raise ValueError(
'Cannot find named output: %s in %s.' % (name, outputs))
return name
class Environment(object):
"""Wrapper for a dataflow Environment protobuf."""
def __init__(self, packages, options, environment_version):
self.standard_options = options.view_as(StandardOptions)
self.google_cloud_options = options.view_as(GoogleCloudOptions)
self.worker_options = options.view_as(WorkerOptions)
self.debug_options = options.view_as(DebugOptions)
self.proto = dataflow.Environment()
self.proto.clusterManagerApiService = GoogleCloudOptions.COMPUTE_API_SERVICE
self.proto.dataset = '{}/cloud_dataflow'.format(
GoogleCloudOptions.BIGQUERY_API_SERVICE)
self.proto.tempStoragePrefix = (
self.google_cloud_options.temp_location.replace(
'gs:/',
GoogleCloudOptions.STORAGE_API_SERVICE))
# User agent information.
self.proto.userAgent = dataflow.Environment.UserAgentValue()
self.local = 'localhost' in self.google_cloud_options.dataflow_endpoint
if self.google_cloud_options.service_account_email:
self.proto.serviceAccountEmail = (
self.google_cloud_options.service_account_email)
sdk_name, version_string = get_sdk_name_and_version()
self.proto.userAgent.additionalProperties.extend([
dataflow.Environment.UserAgentValue.AdditionalProperty(
key='name',
value=to_json_value(sdk_name)),
dataflow.Environment.UserAgentValue.AdditionalProperty(
key='version', value=to_json_value(version_string))])
# Version information.
self.proto.version = dataflow.Environment.VersionValue()
if self.standard_options.streaming:
job_type = 'PYTHON_STREAMING'
else:
job_type = 'PYTHON_BATCH'
self.proto.version.additionalProperties.extend([
dataflow.Environment.VersionValue.AdditionalProperty(
key='job_type',
value=to_json_value(job_type)),
dataflow.Environment.VersionValue.AdditionalProperty(
key='major', value=to_json_value(environment_version))])
# Experiments
if self.debug_options.experiments:
for experiment in self.debug_options.experiments:
self.proto.experiments.append(experiment)
# Worker pool(s) information.
package_descriptors = []
for package in packages:
package_descriptors.append(
dataflow.Package(
location='%s/%s' % (
self.google_cloud_options.staging_location.replace(
'gs:/', GoogleCloudOptions.STORAGE_API_SERVICE),
package),
name=package))
pool = dataflow.WorkerPool(
kind='local' if self.local else 'harness',
packages=package_descriptors,
taskrunnerSettings=dataflow.TaskRunnerSettings(
parallelWorkerSettings=dataflow.WorkerSettings(
baseUrl=GoogleCloudOptions.DATAFLOW_ENDPOINT,
servicePath=self.google_cloud_options.dataflow_endpoint)))
pool.autoscalingSettings = dataflow.AutoscalingSettings()
# Set worker pool options received through command line.
if self.worker_options.num_workers:
pool.numWorkers = self.worker_options.num_workers
if self.worker_options.max_num_workers:
pool.autoscalingSettings.maxNumWorkers = (
self.worker_options.max_num_workers)
if self.worker_options.autoscaling_algorithm:
values_enum = dataflow.AutoscalingSettings.AlgorithmValueValuesEnum
pool.autoscalingSettings.algorithm = {
'NONE': values_enum.AUTOSCALING_ALGORITHM_NONE,
'THROUGHPUT_BASED': values_enum.AUTOSCALING_ALGORITHM_BASIC,
}.get(self.worker_options.autoscaling_algorithm)
if self.worker_options.machine_type:
pool.machineType = self.worker_options.machine_type
if self.worker_options.disk_size_gb:
pool.diskSizeGb = self.worker_options.disk_size_gb
if self.worker_options.disk_type:
pool.diskType = self.worker_options.disk_type
if self.worker_options.zone:
pool.zone = self.worker_options.zone
if self.worker_options.network:
pool.network = self.worker_options.network
if self.worker_options.worker_harness_container_image:
pool.workerHarnessContainerImage = (
self.worker_options.worker_harness_container_image)
else:
# Default to using the worker harness container image for the current SDK
# version.
pool.workerHarnessContainerImage = (
'dataflow.gcr.io/v1beta3/python:%s' %
get_required_container_version())
if self.worker_options.use_public_ips is not None:
if self.worker_options.use_public_ips:
pool.ipConfiguration = (
dataflow.WorkerPool
.IpConfigurationValueValuesEnum.WORKER_IP_PUBLIC)
else:
pool.ipConfiguration = (
dataflow.WorkerPool
.IpConfigurationValueValuesEnum.WORKER_IP_PRIVATE)
if self.standard_options.streaming:
# Use separate data disk for streaming.
disk = dataflow.Disk()
if self.local:
disk.diskType = 'local'
# TODO(ccy): allow customization of disk.
pool.dataDisks.append(disk)
self.proto.workerPools.append(pool)
sdk_pipeline_options = options.get_all_options()
if sdk_pipeline_options:
self.proto.sdkPipelineOptions = (
dataflow.Environment.SdkPipelineOptionsValue())
options_dict = {k: v
for k, v in sdk_pipeline_options.iteritems()
if v is not None}
options_dict['_options_id'] = options._options_id
self.proto.sdkPipelineOptions.additionalProperties.append(
dataflow.Environment.SdkPipelineOptionsValue.AdditionalProperty(
key='options', value=to_json_value(options_dict)))
dd = DisplayData.create_from_options(options)
items = [item.get_dict() for item in dd.items]
self.proto.sdkPipelineOptions.additionalProperties.append(
dataflow.Environment.SdkPipelineOptionsValue.AdditionalProperty(
key='display_data', value=to_json_value(items)))
class Job(object):
"""Wrapper for a dataflow Job protobuf."""
def __str__(self):
def encode_shortstrings(input_buffer, errors='strict'):
"""Encoder (from Unicode) that suppresses long base64 strings."""
original_len = len(input_buffer)
if original_len > 150:
if self.base64_str_re.match(input_buffer):
input_buffer = '<string of %d bytes>' % original_len
input_buffer = input_buffer.encode('ascii', errors=errors)
else:
matched = self.coder_str_re.match(input_buffer)
if matched:
input_buffer = '%s<string of %d bytes>' % (
matched.group(1), matched.end(2) - matched.start(2))
input_buffer = input_buffer.encode('ascii', errors=errors)
return input_buffer, original_len
def decode_shortstrings(input_buffer, errors='strict'):
"""Decoder (to Unicode) that suppresses long base64 strings."""
shortened, length = encode_shortstrings(input_buffer, errors)
return unicode(shortened), length
def shortstrings_registerer(encoding_name):
if encoding_name == 'shortstrings':
return codecs.CodecInfo(name='shortstrings',
encode=encode_shortstrings,
decode=decode_shortstrings)
return None
codecs.register(shortstrings_registerer)
# Use json "dump string" method to get readable formatting;
# further modify it to not output too-long strings, aimed at the
# 10,000+ character hex-encoded "serialized_fn" values.
return json.dumps(
json.loads(encoding.MessageToJson(self.proto), encoding='shortstrings'),
indent=2, sort_keys=True)
@staticmethod
def _build_default_job_name(user_name):
"""Generates a default name for a job.
user_name is lowercased, and any characters outside of [-a-z0-9]
are removed. If necessary, the user_name is truncated to shorten
the job name to 63 characters."""
user_name = re.sub('[^-a-z0-9]', '', user_name.lower())
date_component = datetime.utcnow().strftime('%m%d%H%M%S-%f')
app_user_name = 'beamapp-{}'.format(user_name)
job_name = '{}-{}'.format(app_user_name, date_component)
if len(job_name) > 63:
job_name = '{}-{}'.format(app_user_name[:-(len(job_name) - 63)],
date_component)
return job_name
@staticmethod
def default_job_name(job_name):
if job_name is None:
job_name = Job._build_default_job_name(getpass.getuser())
return job_name
def __init__(self, options):
self.options = options
self.google_cloud_options = options.view_as(GoogleCloudOptions)
if not self.google_cloud_options.job_name:
self.google_cloud_options.job_name = self.default_job_name(
self.google_cloud_options.job_name)
required_google_cloud_options = ['project', 'job_name', 'temp_location']
missing = [
option for option in required_google_cloud_options
if not getattr(self.google_cloud_options, option)]
if missing:
raise ValueError(
'Missing required configuration parameters: %s' % missing)
if not self.google_cloud_options.staging_location:
logging.info('Defaulting to the temp_location as staging_location: %s',
self.google_cloud_options.temp_location)
(self.google_cloud_options
.staging_location) = self.google_cloud_options.temp_location
# Make the staging and temp locations job name and time specific. This is
# needed to avoid clashes between job submissions using the same staging
# area or team members using same job names. This method is not entirely
# foolproof since two job submissions with same name can happen at exactly
# the same time. However the window is extremely small given that
# time.time() has at least microseconds granularity. We add the suffix only
# for GCS staging locations where the potential for such clashes is high.
if self.google_cloud_options.staging_location.startswith('gs://'):
path_suffix = '%s.%f' % (self.google_cloud_options.job_name, time.time())
self.google_cloud_options.staging_location = utils.path.join(
self.google_cloud_options.staging_location, path_suffix)
self.google_cloud_options.temp_location = utils.path.join(
self.google_cloud_options.temp_location, path_suffix)
self.proto = dataflow.Job(name=self.google_cloud_options.job_name)
if self.options.view_as(StandardOptions).streaming:
self.proto.type = dataflow.Job.TypeValueValuesEnum.JOB_TYPE_STREAMING
else:
self.proto.type = dataflow.Job.TypeValueValuesEnum.JOB_TYPE_BATCH
self.base64_str_re = re.compile(r'^[A-Za-z0-9+/]*=*$')
self.coder_str_re = re.compile(r'^([A-Za-z]+\$)([A-Za-z0-9+/]*=*)$')
def json(self):
return encoding.MessageToJson(self.proto)
def __reduce__(self):
"""Reduce hook for pickling the Job class more easily."""
return (Job, (self.options,))
class DataflowApplicationClient(object):
"""A Dataflow API client used by application code to create and query jobs."""
def __init__(self, options, environment_version):
"""Initializes a Dataflow API client object."""
self.standard_options = options.view_as(StandardOptions)
self.google_cloud_options = options.view_as(GoogleCloudOptions)
self.environment_version = environment_version
if self.google_cloud_options.no_auth:
credentials = None
else:
credentials = get_service_credentials()
self._client = dataflow.DataflowV1b3(
url=self.google_cloud_options.dataflow_endpoint,
credentials=credentials,
get_credentials=(not self.google_cloud_options.no_auth))
self._storage_client = storage.StorageV1(
url='https://www.googleapis.com/storage/v1',
credentials=credentials,
get_credentials=(not self.google_cloud_options.no_auth))
# TODO(silviuc): Refactor so that retry logic can be applied.
@retry.no_retries # Using no_retries marks this as an integration point.
def _gcs_file_copy(self, from_path, to_path):
to_folder, to_name = os.path.split(to_path)
with open(from_path, 'rb') as f:
self.stage_file(to_folder, to_name, f)
def stage_file(self, gcs_or_local_path, file_name, stream,
mime_type='application/octet-stream'):
"""Stages a file at a GCS or local path with stream-supplied contents."""
if not gcs_or_local_path.startswith('gs://'):
local_path = os.path.join(gcs_or_local_path, file_name)
logging.info('Staging file locally to %s', local_path)
with open(local_path, 'wb') as f:
f.write(stream.read())
return
gcs_location = gcs_or_local_path + '/' + file_name
bucket, name = gcs_location[5:].split('/', 1)
request = storage.StorageObjectsInsertRequest(
bucket=bucket, name=name)
logging.info('Starting GCS upload to %s...', gcs_location)
upload = storage.Upload(stream, mime_type)
try:
response = self._storage_client.objects.Insert(request, upload=upload)
except exceptions.HttpError as e:
reportable_errors = {
403: 'access denied',
404: 'bucket not found',
}
if e.status_code in reportable_errors:
raise IOError(('Could not upload to GCS path %s: %s. Please verify '
'that credentials are valid and that you have write '
'access to the specified path. Stale credentials can be '
'refreshed by executing "gcloud auth login".') %
(gcs_or_local_path, reportable_errors[e.status_code]))
raise
logging.info('Completed GCS upload to %s', gcs_location)
return response
# TODO(silviuc): Refactor so that retry logic can be applied.
@retry.no_retries # Using no_retries marks this as an integration point.
def create_job(self, job):
"""Creates job description. May stage and/or submit for remote execution."""
self.create_job_description(job)
# Stage and submit the job when necessary
dataflow_job_file = job.options.view_as(DebugOptions).dataflow_job_file
template_location = (
job.options.view_as(GoogleCloudOptions).template_location)
job_location = template_location or dataflow_job_file
if job_location:
gcs_or_local_path = os.path.dirname(job_location)
file_name = os.path.basename(job_location)
self.stage_file(gcs_or_local_path, file_name, StringIO(job.json()))
if not template_location:
return self.submit_job_description(job)
else:
return None
def create_job_description(self, job):
"""Creates a job described by the workflow proto."""
resources = dependency.stage_job_resources(
job.options, file_copy=self._gcs_file_copy)
job.proto.environment = Environment(
packages=resources, options=job.options,
environment_version=self.environment_version).proto
# TODO(silviuc): Remove the debug logging eventually.
logging.info('JOB: %s', job)
@retry.with_exponential_backoff(num_retries=3, initial_delay_secs=3)
def get_job_metrics(self, job_id):
request = dataflow.DataflowProjectsLocationsJobsGetMetricsRequest()
request.jobId = job_id
request.location = self.google_cloud_options.region
request.projectId = self.google_cloud_options.project
try:
response = self._client.projects_locations_jobs.GetMetrics(request)
except exceptions.BadStatusCodeError as e:
logging.error('HTTP status %d. Unable to query metrics',
e.response.status)
raise
return response
def submit_job_description(self, job):
"""Creates and excutes a job request."""
request = dataflow.DataflowProjectsLocationsJobsCreateRequest()
request.projectId = self.google_cloud_options.project
request.location = self.google_cloud_options.region
request.job = job.proto
try:
response = self._client.projects_locations_jobs.Create(request)
except exceptions.BadStatusCodeError as e:
logging.error('HTTP status %d trying to create job'
' at dataflow service endpoint %s',
e.response.status,
self.google_cloud_options.dataflow_endpoint)
logging.fatal('details of server error: %s', e)
raise
logging.info('Create job: %s', response)
# The response is a Job proto with the id for the new job.
logging.info('Created job with id: [%s]', response.id)
logging.info(
'To access the Dataflow monitoring console, please navigate to '
'https://console.developers.google.com/project/%s/dataflow/job/%s',
self.google_cloud_options.project, response.id)
return response
@retry.with_exponential_backoff() # Using retry defaults from utils/retry.py
def modify_job_state(self, job_id, new_state):
"""Modify the run state of the job.
Args:
job_id: The id of the job.
new_state: A string representing the new desired state. It could be set to
either 'JOB_STATE_DONE', 'JOB_STATE_CANCELLED' or 'JOB_STATE_DRAINING'.
Returns:
True if the job was modified successfully.
"""
if new_state == 'JOB_STATE_DONE':
new_state = dataflow.Job.RequestedStateValueValuesEnum.JOB_STATE_DONE
elif new_state == 'JOB_STATE_CANCELLED':
new_state = dataflow.Job.RequestedStateValueValuesEnum.JOB_STATE_CANCELLED
elif new_state == 'JOB_STATE_DRAINING':
new_state = dataflow.Job.RequestedStateValueValuesEnum.JOB_STATE_DRAINING
else:
# Other states could only be set by the service.
return False
request = dataflow.DataflowProjectsLocationsJobsUpdateRequest()
request.jobId = job_id
request.projectId = self.google_cloud_options.project
request.location = self.google_cloud_options.region
request.job = dataflow.Job(requestedState=new_state)
self._client.projects_jobs.Update(request)
return True
@retry.with_exponential_backoff() # Using retry defaults from utils/retry.py
def get_job(self, job_id):
"""Gets the job status for a submitted job.
Args:
job_id: A string representing the job_id for the workflow as returned
by the a create_job() request.
Returns:
A Job proto. See below for interesting fields.
The Job proto returned from a get_job() request contains some interesting
fields:
currentState: An object representing the current state of the job. The
string representation of the object (str() result) has the following
possible values: JOB_STATE_UNKNONW, JOB_STATE_STOPPED,
JOB_STATE_RUNNING, JOB_STATE_DONE, JOB_STATE_FAILED,
JOB_STATE_CANCELLED.
createTime: UTC time when the job was created
(e.g. '2015-03-10T00:01:53.074Z')
currentStateTime: UTC time for the current state of the job.
"""
request = dataflow.DataflowProjectsLocationsJobsGetRequest()
request.jobId = job_id
request.projectId = self.google_cloud_options.project
request.location = self.google_cloud_options.region
response = self._client.projects_locations_jobs.Get(request)
return response
@retry.with_exponential_backoff() # Using retry defaults from utils/retry.py
def list_messages(
self, job_id, start_time=None, end_time=None, page_token=None,
minimum_importance=None):
"""List messages associated with the execution of a job.
Args:
job_id: A string representing the job_id for the workflow as returned
by the a create_job() request.
start_time: If specified, only messages generated after the start time
will be returned, otherwise all messages since job started will be
returned. The value is a string representing UTC time
(e.g., '2015-08-18T21:03:50.644Z')
end_time: If specified, only messages generated before the end time
will be returned, otherwise all messages up to current time will be
returned. The value is a string representing UTC time
(e.g., '2015-08-18T21:03:50.644Z')
page_token: A string to be used as next page token if the list call
returned paginated results.
minimum_importance: Filter for messages based on importance. The possible
string values in increasing order of importance are: JOB_MESSAGE_DEBUG,
JOB_MESSAGE_DETAILED, JOB_MESSAGE_BASIC, JOB_MESSAGE_WARNING,
JOB_MESSAGE_ERROR. For example, a filter set on warning will allow only
warnings and errors and exclude all others.
Returns:
A tuple consisting of a list of JobMessage instances and a
next page token string.
Raises:
RuntimeError: if an unexpected value for the message_importance argument
is used.
The JobMessage objects returned by the call contain the following fields:
id: A unique string identifier for the message.
time: A string representing the UTC time of the message
(e.g., '2015-08-18T21:03:50.644Z')
messageImportance: An enumeration value for the message importance. The
value if converted to string will have the following possible values:
JOB_MESSAGE_DEBUG, JOB_MESSAGE_DETAILED, JOB_MESSAGE_BASIC,
JOB_MESSAGE_WARNING, JOB_MESSAGE_ERROR.
messageText: A message string.
"""
request = dataflow.DataflowProjectsLocationsJobsMessagesListRequest(
jobId=job_id, location=self.google_cloud_options.region,
projectId=self.google_cloud_options.project)
if page_token is not None:
request.pageToken = page_token
if start_time is not None:
request.startTime = start_time
if end_time is not None:
request.endTime = end_time
if minimum_importance is not None:
if minimum_importance == 'JOB_MESSAGE_DEBUG':
request.minimumImportance = (
dataflow.DataflowProjectsLocationsJobsMessagesListRequest
.MinimumImportanceValueValuesEnum
.JOB_MESSAGE_DEBUG)
elif minimum_importance == 'JOB_MESSAGE_DETAILED':
request.minimumImportance = (
dataflow.DataflowProjectsLocationsJobsMessagesListRequest
.MinimumImportanceValueValuesEnum
.JOB_MESSAGE_DETAILED)
elif minimum_importance == 'JOB_MESSAGE_BASIC':
request.minimumImportance = (
dataflow.DataflowProjectsLocationsJobsMessagesListRequest
.MinimumImportanceValueValuesEnum
.JOB_MESSAGE_BASIC)
elif minimum_importance == 'JOB_MESSAGE_WARNING':
request.minimumImportance = (
dataflow.DataflowProjectsLocationsJobsMessagesListRequest
.MinimumImportanceValueValuesEnum
.JOB_MESSAGE_WARNING)
elif minimum_importance == 'JOB_MESSAGE_ERROR':
request.minimumImportance = (
dataflow.DataflowProjectsLocationsJobsMessagesListRequest
.MinimumImportanceValueValuesEnum
.JOB_MESSAGE_ERROR)
else:
raise RuntimeError(
'Unexpected value for minimum_importance argument: %r',
minimum_importance)
response = self._client.projects_locations_jobs_messages.List(request)
return response.jobMessages, response.nextPageToken
class MetricUpdateTranslators(object):
"""Translators between accumulators and dataflow metric updates."""
@staticmethod
def translate_boolean(accumulator, metric_update_proto):
metric_update_proto.boolean = accumulator.value
@staticmethod
def translate_scalar_mean_int(accumulator, metric_update_proto):
if accumulator.count:
metric_update_proto.integerMean = dataflow.IntegerMean()
metric_update_proto.integerMean.sum = to_split_int(accumulator.sum)
metric_update_proto.integerMean.count = to_split_int(accumulator.count)
else:
metric_update_proto.nameAndKind.kind = None
@staticmethod
def translate_scalar_mean_float(accumulator, metric_update_proto):
if accumulator.count:
metric_update_proto.floatingPointMean = dataflow.FloatingPointMean()
metric_update_proto.floatingPointMean.sum = accumulator.sum
metric_update_proto.floatingPointMean.count = to_split_int(
accumulator.count)
else:
metric_update_proto.nameAndKind.kind = None
@staticmethod
def translate_scalar_counter_int(accumulator, metric_update_proto):
metric_update_proto.integer = to_split_int(accumulator.value)
@staticmethod
def translate_scalar_counter_float(accumulator, metric_update_proto):
metric_update_proto.floatingPoint = accumulator.value
def to_split_int(n):
res = dataflow.SplitInt64()
res.lowBits = n & 0xffffffff
res.highBits = n >> 32
return res
def translate_distribution(distribution_update, metric_update_proto):
"""Translate metrics DistributionUpdate to dataflow distribution update."""
dist_update_proto = dataflow.DistributionUpdate()
dist_update_proto.min = to_split_int(distribution_update.min)
dist_update_proto.max = to_split_int(distribution_update.max)
dist_update_proto.count = to_split_int(distribution_update.count)
dist_update_proto.sum = to_split_int(distribution_update.sum)
metric_update_proto.distribution = dist_update_proto
def translate_value(value, metric_update_proto):
metric_update_proto.integer = to_split_int(value)
def translate_scalar(accumulator, metric_update):
metric_update.scalar = to_json_value(accumulator.value, with_type=True)
def translate_mean(accumulator, metric_update):
if accumulator.count:
metric_update.meanSum = to_json_value(accumulator.sum, with_type=True)
metric_update.meanCount = to_json_value(accumulator.count, with_type=True)
else:
# A denominator of 0 will raise an error in the service.
# What it means is we have nothing to report yet, so don't.
metric_update.kind = None
# To enable a counter on the service, add it to this dictionary.
metric_translations = {
cy_combiners.CountCombineFn: ('sum', translate_scalar),
cy_combiners.SumInt64Fn: ('sum', translate_scalar),
cy_combiners.MinInt64Fn: ('min', translate_scalar),
cy_combiners.MaxInt64Fn: ('max', translate_scalar),
cy_combiners.MeanInt64Fn: ('mean', translate_mean),
cy_combiners.SumFloatFn: ('sum', translate_scalar),
cy_combiners.MinFloatFn: ('min', translate_scalar),
cy_combiners.MaxFloatFn: ('max', translate_scalar),
cy_combiners.MeanFloatFn: ('mean', translate_mean),
cy_combiners.AllCombineFn: ('and', translate_scalar),
cy_combiners.AnyCombineFn: ('or', translate_scalar),
}
counter_translations = {
cy_combiners.CountCombineFn: (
dataflow.NameAndKind.KindValueValuesEnum.SUM,
MetricUpdateTranslators.translate_scalar_counter_int),
cy_combiners.SumInt64Fn: (
dataflow.NameAndKind.KindValueValuesEnum.SUM,
MetricUpdateTranslators.translate_scalar_counter_int),
cy_combiners.MinInt64Fn: (
dataflow.NameAndKind.KindValueValuesEnum.MIN,
MetricUpdateTranslators.translate_scalar_counter_int),
cy_combiners.MaxInt64Fn: (
dataflow.NameAndKind.KindValueValuesEnum.MAX,
MetricUpdateTranslators.translate_scalar_counter_int),
cy_combiners.MeanInt64Fn: (
dataflow.NameAndKind.KindValueValuesEnum.MEAN,
MetricUpdateTranslators.translate_scalar_mean_int),
cy_combiners.SumFloatFn: (
dataflow.NameAndKind.KindValueValuesEnum.SUM,
MetricUpdateTranslators.translate_scalar_counter_float),
cy_combiners.MinFloatFn: (
dataflow.NameAndKind.KindValueValuesEnum.MIN,
MetricUpdateTranslators.translate_scalar_counter_float),
cy_combiners.MaxFloatFn: (
dataflow.NameAndKind.KindValueValuesEnum.MAX,
MetricUpdateTranslators.translate_scalar_counter_float),
cy_combiners.MeanFloatFn: (
dataflow.NameAndKind.KindValueValuesEnum.MEAN,
MetricUpdateTranslators.translate_scalar_mean_float),
cy_combiners.AllCombineFn: (
dataflow.NameAndKind.KindValueValuesEnum.AND,
MetricUpdateTranslators.translate_boolean),
cy_combiners.AnyCombineFn: (
dataflow.NameAndKind.KindValueValuesEnum.OR,
MetricUpdateTranslators.translate_boolean),
}
|
|
import logging
from lib import constant_pool
from lib import instruction
from lib import read_bytes
SAME = 0
SAME_LOCALS_1_STACK_ITEM = 64
SAME_LOCALS_1_STACK_ITEM_EXTENDED = 247
CHOP = 248
SAME_FRAME_EXTENDED = 251
APPEND = 252
FULL_FRAME = 255
def parse(fd, class_file):
'''Parse attributes
'''
count = read_bytes.read_u2_int(fd)
attributes = []
for _ in range(count):
attr = parse_attr(fd, class_file)
attributes.append(attr)
return (count, attributes)
def parse_attr(fd, class_file):
name_index = read_bytes.read_u2_int(fd)
length = read_bytes.read_u4_int(fd)
name_constant = class_file.constant_pool[name_index]
assert type(name_constant) == constant_pool.ConstantUtf8, 'Attribute name constant is not CONSTANT_Utf8_info.'
attribute_type = {
'ConstantValue': ConstantValueAttribute,
'Code': CodeAttribute,
'StackMapTable': StackMapTableAttribute,
'Exceptions': ExceptionsAttribute,
'BootstrapMethods': BootstrapMethodsAttribute
}.get(name_constant.value(), Attribute)
attr = attribute_type(name_constant.value(), length)
attr.parse_info(fd, class_file)
return attr
class Attribute(object):
def __init__(self, name, length):
self.name = name
self.length = length
def parse_info(self, fd, class_file):
self.info = fd.read(self.length)
def debug_info(self, prefix=''):
logging.debug(prefix + 'Attribute name:' + str(self.name))
logging.debug(prefix + 'Attribute length:' + str(self.length))
'''Five critical attributes to correct interpretation of the class file'''
class ConstantValueAttribute(Attribute):
pass
class CodeAttribute(Attribute):
def code_to_instructions(self):
self.instructions = [None for _ in range(self.code_length)]
pos = 0
while pos < self.code_length:
opcode = self.code[pos]
if opcode not in instruction.OPCODES:
logging.warning(
'Not recognized instruction 0x{:02X} at pos {pos}, ignore the following parts in code.'.format(
opcode, pos=pos
)
)
break
inst = instruction.OPCODES[opcode](pos)
operands_start = pos + 1
operands_end = operands_start + inst.len_of_operand()
operands = bytes(self.code[operands_start:operands_end])
inst.put_operands(operands)
self.instructions[pos] = inst
pos = operands_end
def parse_info(self, fd, class_file):
self.max_stack = read_bytes.read_u2_int(fd)
self.max_locals = read_bytes.read_u2_int(fd)
self.code_length = read_bytes.read_u4_int(fd)
self.code = fd.read(self.code_length)
self.exception_table_length = read_bytes.read_u2_int(fd)
self.exception_table = []
for _ in range(self.exception_table_length):
start_pc = read_bytes.read_u2_int(fd)
end_pc = read_bytes.read_u2_int(fd)
handler_pc = read_bytes.read_u2_int(fd)
catch_type = read_bytes.read_u2_int(fd)
self.exception_table.append(
(start_pc, end_pc, handler_pc, catch_type))
(self.attributes_count, self.attributes) = parse(fd, class_file)
self.code_to_instructions()
def debug_info(self, prefix=''):
super().debug_info(prefix)
logging.debug(prefix + 'max stack:' + str(self.max_stack))
logging.debug(prefix + 'max locals:' + str(self.max_locals))
logging.debug(prefix + 'code length:' + str(self.code_length))
logging.debug(prefix + 'code: ' + ' '.join('0x{:02X}'.format(i) for i in self.code))
logging.debug(prefix + 'attribute count:' + str(self.attributes_count))
for attr in self.attributes:
attr.debug_info(prefix + ' - ')
class StackMapTableAttribute(Attribute):
'''A StackMapTable attribute is used during the process of
verification by type checking, which maybe means, I can ignore
it now.
'''
def parse_info(self, fd, class_file):
self.number_of_entries = read_bytes.read_u2_int(fd)
self.stack_map_frame_entries = []
for _ in range(self.number_of_entries):
frame = parse_stack_map_frame(fd)
self.stack_map_frame_entries.append(frame)
def debug_info(self, prefix=''):
super().debug_info(prefix)
logging.debug(prefix + 'Num of entries:' + str(self.number_of_entries))
for frame in self.stack_map_frame_entries:
frame.debug_info(prefix + ' - ')
class ExceptionsAttribute(Attribute):
pass
class BootstrapMethodsAttribute(Attribute):
pass
class StackMapFrame(object):
def __init__(self, frame_type):
self.frame_type = frame_type
self.offset_delta = 0
def parse(self, fd):
raise NotImplementedError('Parse not implemented for generic stack map frame.')
def debug_info(self, prefix):
logging.debug(prefix + type(self).__name__ + ', offset delta {offset}'.format(offset=self.offset_delta))
class SameFrame(StackMapFrame):
def __init__(self, frame_type):
'''The offset_delta value for the frame is the value
of the tag item, frame_type.
'''
super().__init__(SAME)
self.offset_delta = frame_type
def parse(self, fd):
pass
class SameLocals1StackItemFrame(StackMapFrame):
def __init__(self, frame_type):
'''The offset_delta value for the frame is given by
the formula frame_type - 64
'''
super().__init__(SAME_LOCALS_1_STACK_ITEM)
self.offset_delta = frame_type - 64
def parse(self, fd):
self.verification_type_info = parse_verification_type_info(fd)
class SameLocals1StackItemFrameExtended(StackMapFrame):
def __init__(self, frame_type):
super().__init__(SAME_LOCALS_1_STACK_ITEM_EXTENDED)
def parse(self, fd):
self.offset_delta = read_bytes.read_u2_int(fd)
self.verification_type_info = parse_verification_type_info(fd)
class ChopFrame(StackMapFrame):
def __init__(self, frame_type):
super().__init__(CHOP)
self.num_of_absent = 251 - frame_type
def parse(self, fd):
self.offset_delta = read_bytes.read_u2_int(fd)
class SameFrameExtended(StackMapFrame):
def __init__(self, frame_type):
super().__init__(SAME_FRAME_EXTENDED)
def parse(self, fd):
self.offset_delta = read_bytes.read_u2_int(fd)
class AppendFrame(StackMapFrame):
def __init__(self, frame_type):
super().__init__(APPEND)
self.locals = []
self.num_of_additional = frame_type - 251
def parse(self, fd):
self.offset_delta = read_bytes.read_u2_int(fd)
for _ in range(self.num_of_additional):
v_type_info = parse_verification_type_info(fd)
self.locals.append(v_type_info)
class FullFrame(StackMapFrame):
def __init__(self, frame_type):
super().__init__(FULL_FRAME)
self.locals = []
self.stack = []
def parse(self, fd):
self.offset_delta = read_bytes.read_u2_int(fd)
self.number_of_locals = read_bytes.read_u2_int(fd)
for _ in range(self.number_of_locals):
v_type_info = parse_verification_type_info(fd)
self.locals.append(v_type_info)
self.number_of_stack_items = read_bytes.read_u2_int(fd)
for _ in range(self.number_of_stack_items):
v_type_info = parse_verification_type_info(fd)
self.stack.append(v_type_info)
_frame_type = {
SAME: SameFrame,
SAME_LOCALS_1_STACK_ITEM: SameLocals1StackItemFrame,
SAME_LOCALS_1_STACK_ITEM_EXTENDED: SameLocals1StackItemFrameExtended,
CHOP: ChopFrame,
SAME_FRAME_EXTENDED: SameFrameExtended,
APPEND: AppendFrame,
FULL_FRAME: FullFrame
}
ITEM_Top = 0
ITEM_Integer = 1
ITEM_Float = 2
ITEM_Null = 5
ITEM_UninitializedThis = 6
ITEM_Object = 7
ITEM_Uninitialized = 8
ITEM_Long = 4
ITEM_Double = 3
def _convert_frame_type(frame_type):
if frame_type in range(0, 64):
return SAME
if frame_type in range(64, 128):
return SAME_LOCALS_1_STACK_ITEM
if frame_type == 247:
return SAME_LOCALS_1_STACK_ITEM_EXTENDED
if frame_type in range(248, 251):
return CHOP
if frame_type == 251:
return SAME_FRAME_EXTENDED
if frame_type in range(252, 255):
return APPEND
if frame_type == 255:
return FULL_FRAME
raise ValueError('Invalid frame type value {0}'.format(frame_type))
def parse_stack_map_frame(fd):
frame_type = read_bytes.read_u1_int(fd)
cvt_type = _convert_frame_type(frame_type)
frame_class = _frame_type[cvt_type]
frame = frame_class(frame_type)
frame.parse(fd)
return frame
def parse_verification_type_info(fd):
tag = read_bytes.read_u1_int(fd)
if tag in (
ITEM_Top,
ITEM_Integer,
ITEM_Float,
ITEM_Null,
ITEM_UninitializedThis,
ITEM_Long,
ITEM_Double
):
return (tag, None)
if tag == ITEM_Object:
cpool_index = read_bytes.read_u2_int(fd)
return (tag, cpool_index)
if tag == ITEM_Uninitialized:
offset = read_bytes.read_u2_int(fd)
return (tag, offset)
raise ValueError('Invalid verification_type_info tag value {0}'.format(tag))
|
|
"""Class to run redmapper on a single pixel, for distributed runs.
"""
import os
import numpy as np
import glob
from ..configuration import Configuration
from ..utilities import make_lockfile
from ..run_firstpass import RunFirstPass
from ..run_likelihoods import RunLikelihoods
from ..run_percolation import RunPercolation
from ..run_randoms_zmask import RunRandomsZmask
from ..run_zscan import RunZScan
from ..utilities import getMemoryString
class RunRedmapperPixelTask(object):
"""
Class to run redmapper on a single healpix pixel, for distributed runs.
"""
def __init__(self, configfile, pixel, nside, path=None):
"""
Instantiate a RunRedmapperPixelTask.
Parameters
----------
configfile: `str`
Configuration yaml filename.
pixel: `int`
Healpix pixel to run on.
nside: `int`
Healpix nside associated with pixel.
path: `str`, optional
Output path. Default is None, use same absolute
path as configfile.
"""
if path is None:
outpath = os.path.dirname(os.path.abspath(configfile))
else:
outpath = path
self.config = Configuration(configfile, outpath=path)
self.pixel = pixel
self.nside = nside
def run(self):
"""
Run redmapper on a single healpix pixel.
This method will check if files already exist, and will
skip any steps that already exist. The border radius
will automatically be calculated based on the richest
possible cluster at the lowest possible redshift.
All files will be placed in self.config.outpath (see
self.__init__)
"""
# need to think about outpath
# Make sure all files are here and okay...
if not self.config.galfile_pixelized:
raise ValueError("Code only runs with pixelized galfile.")
self.config.check_files(check_zredfile=True, check_bkgfile=True, check_bkgfile_components=True, check_parfile=True, check_zlambdafile=True)
# Compute the border size
self.config.border = self.config.compute_border()
self.config.d.hpix = [self.pixel]
self.config.d.nside = self.nside
self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)
# Do the run
self.config.start_file_logging()
self.config.logger.info("Running redMaPPer on pixel %d" % (self.pixel))
firstpass = RunFirstPass(self.config)
if not os.path.isfile(firstpass.filename):
firstpass.run()
firstpass.output(savemembers=False, withversion=False)
else:
self.config.logger.info("Firstpass file %s already present. Skipping..." % (firstpass.filename))
self.config.catfile = firstpass.filename
# Clear out the firstpass memory
del firstpass
like = RunLikelihoods(self.config)
if not os.path.isfile(like.filename):
like.run()
like.output(savemembers=False, withversion=False)
else:
self.config.logger.info("Likelihood file %s already present. Skipping..." % (like.filename))
self.config.catfile = like.filename
# Clear out the likelihood memory
del like
perc = RunPercolation(self.config)
if not os.path.isfile(perc.filename):
perc.run()
perc.output(savemembers=True, withversion=False)
else:
self.config.logger.info("Percolation file %s already present. Skipping..." % (perc.filename))
self.config.stop_file_logging()
class RuncatPixelTask(object):
"""
Class to run richness computation (runcat) on a single healpix pixel, for
distributed runs.
"""
def __init__(self, configfile, pixel, nside, path=None):
"""
Instantiate a RuncatPixelTask.
Parameters
----------
configfile: `str`
Configuration yaml filename.
pixel: `int`
Healpix pixel to run on.
nside: `int`
Healpix nside associated with pixel.
path: `str`, optional
Output path. Default is None, use same absolute
path as configfile.
percolation_masking: `bool`, optional
Do percolation masking when computing richnesses
"""
if path is None:
outpath = os.path.dirname(os.path.abspath(configfile))
else:
outpath = path
self.config = Configuration(configfile, outpath=path)
self.pixel = pixel
self.nside = nside
def run(self):
"""
Run runcat on a single healpix pixel.
All files will be placed in self.config.outpath (see
self.__init__)
"""
if not self.config.galfile_pixelized:
raise ValueError("Code only runs with pixelized galfile.")
self.config.check_files(check_zredfile=False, check_bkgfile=True, check_bkgfile_components=False, check_parfile=True, check_zlambdafile=True)
# Compute the border size
self.config.border = self.config.compute_border()
self.config.d.hpix = [self.pixel]
self.config.d.nside = self.nside
self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)
# Do the run
self.config.start_file_logging()
self.config.logger.info("Running runcat on pixel %d" % (self.pixel))
runcat = RunCatalog(self.config)
if not os.path.isfile(runcat.filename):
runcat.run(do_percolation_masking=self.config.runcat_percolation_masking)
runcat.output(savemembers=True, withversion=True)
self.config.stop_file_logging()
class RunZmaskPixelTask(object):
"""
Class to run redmapper zmask randoms on a single healpix pixel, for
distributed runs.
"""
def __init__(self, configfile, pixel, nside, path=None):
"""
Instantiate a RunZmaskPixelTask.
Parameters
----------
configfile: `str`
Configuration yaml filename.
pixel: `int`
Healpix pixel to run on.
nside: `int`
Healpix nside associated with pixel.
path: `str`, optional
Output path. Default is None, use same absolute
path as configfile.
"""
if path is None:
outpath = os.path.dirname(os.path.abspath(configfile))
else:
outpath = path
self.config = Configuration(configfile, outpath=path)
self.pixel = pixel
self.nside = nside
def run(self):
"""
Run zmask on a single healpix pixel.
This method will check if files already exist, and will
skip any steps that already exist. The border radius
will automatically be calculated based on the richest
possible cluster at the lowest possible redshift.
All files will be placed in self.config.outpath (see
self.__init__)
"""
if not self.config.galfile_pixelized:
raise ValueError("Code only runs with pixelized galfile.")
self.config.check_files(check_zredfile=False, check_bkgfile=True,
check_parfile=True, check_randfile=True)
# Compute the border size
self.config.border = self.config.compute_border()
self.config.d.hpix = [self.pixel]
self.config.d.nside = self.nside
self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)
self.config.start_file_logging()
self.config.logger.info("Running zmask on pixel %d" % (self.pixel))
rand_zmask = RunRandomsZmask(self.config)
if not os.path.isfile(rand_zmask.filename):
rand_zmask.run()
rand_zmask.output(savemembers=False, withversion=False)
# All done
self.config.stop_file_logging()
class RunZScanPixelTask(object):
"""Class to run redshift-scanning (zscan) on a single healpix pixel, for
distributed runs.
"""
def __init__(self, configfile, pixel, nside, path=None):
"""Instantiate a RunZScanPixelTask.
Parameters
----------
configfile: `str`
Configuration yaml filename.
pixel: `int`
Healpix pixel to run on.
nside: `int`
Healpix nside associated with pixel.
path: `str`, optional
Output path. Default is None, use same absolute
path as configfile.
percolation_masking: `bool`, optional
Do percolation masking when computing richnesses
"""
if path is None:
outpath = os.path.dirname(os.path.abspath(configfile))
else:
outpath = path
self.config = Configuration(configfile, outpath=path)
self.pixel = pixel
self.nside = nside
def run(self):
"""Run zscan on a single healpix pixel.
All files will be placed in self.config.outpath (see
self.__init__)
"""
if not self.config.galfile_pixelized:
raise ValueError("Code only runs with pixelized galfile.")
self.config.check_files(check_zredfile=True, check_bkgfile=True, check_bkgfile_components=True, check_parfile=True, check_zlambdafile=True)
# Compute the border size
self.config.border = self.config.compute_border()
self.config.d.hpix = [self.pixel]
self.config.d.nside = self.nside
self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)
# Do the run
self.config.start_file_logging()
self.config.logger.info("Running zscan on pixel %d" % (self.pixel))
runzscan = RunZScan(self.config)
if not os.path.isfile(runzscan.filename):
runzscan.run()
runzscan.output(savemembers=True, withversion=True)
self.config.stop_file_logging()
|
|
import datetime
from rest_framework import generics, status
from rest_framework.response import Response
from openbudgets.apps.international.utilities import translated_fields
from openbudgets.apps.sheets import serializers
from openbudgets.apps.sheets import models
from openbudgets.apps.accounts.models import Account
from openbudgets.apps.sheets.serializers import SheetTimeline
class TemplateList(generics.ListAPIView):
"""API endpoint that represents a list of templates."""
model = models.Template
queryset = model.objects.related_map_min()
serializer_class = serializers.TemplateMin
ordering = ['id', 'name', 'period_start', 'created_on', 'last_modified']
search_fields = ['name', 'description'] + translated_fields(model)
def get_queryset(self):
queryset = super(TemplateList, self).get_queryset()
### FILTERS
domains = self.request.QUERY_PARAMS.get('domains', None)
divisions = self.request.QUERY_PARAMS.get('divisions', None)
entities = self.request.QUERY_PARAMS.get('entities', None)
# DOMAINS: return templates used in the given domain(s).
if domains:
domains = domains.split(',')
queryset = queryset.filter(divisions__domain__in=domains).distinct()
# DIVISIONS: return templates used in the given division(s).
if divisions:
divisions = divisions.split(',')
queryset = queryset.filter(divisions__in=divisions)
# ENTITIES: return templates used by the given entity(-ies).
if entities:
entities = entities.split(',')
queryset = queryset.filter(sheets__entity__in=entities)
# DEFAULT: We just want to return "official" templates, unless a
# specific filter requires otherwise
if not self.request.QUERY_PARAMS:
queryset = queryset.exclude(divisions=None)
return queryset
class TemplateDetail(generics.RetrieveAPIView):
"""API endpoint that represents a single template."""
model = models.Template
queryset = model.objects.related_map()
serializer_class = serializers.Template
class TemplateNodeList(generics.ListAPIView):
"""API endpoint that represents a list of template nodes."""
model = models.TemplateNode
queryset = model.objects.related_map()
serializer_class = serializers.TemplateNode
ordering = ['id', 'name', 'description', 'created_on', 'last_modified']
search_fields = ['name', 'description'] + translated_fields(model)
def get_queryset(self):
queryset = super(TemplateNodeList, self).get_queryset()
### FILTERS
templates = self.request.QUERY_PARAMS.get('templates', None)
entities = self.request.QUERY_PARAMS.get('entities', None)
parents = self.request.QUERY_PARAMS.get('parents', None)
# for latest query only:
entity = self.request.QUERY_PARAMS.get('entity', None)
latest = self.request.QUERY_PARAMS.get('latest', None)
# TEMPLATES: return template nodes used in the given template(s).
if templates:
templates = templates.split(',')
queryset = queryset.filter(templates__in=templates)
# ENTITIES: return template nodes of templates used by the given entity(-ies).
if entities:
entities = entities.split(',')
queryset = queryset.filter(sheets__entity__in=entities)
# PARENTS: return nodes that are children of given parent(s).
if parents and parents == 'none':
queryset = queryset.filter(parent__isnull=True)
elif parents:
parents = parents.split(',')
queryset = queryset.filter(parent__in=parents)
# Check about this
# was implemented for timeline. Have a feeling we can do it more
# efficiently elsewhere.
if entity is not None:
if latest:
queryset = models.Template.objects.latest_of(entity=entity).nodes
else:
pass
return queryset
class TemplateNodeDetail(generics.RetrieveAPIView):
"""API endpoint that represents a single template node."""
model = models.TemplateNode
queryset = model.objects.related_map()
serializer_class = serializers.TemplateNode
class SheetList(generics.ListAPIView):
"""API endpoint that represents a list of budget sheets."""
model = models.Sheet
queryset = model.objects.related_map()
serializer_class = serializers.SheetMin
ordering = ['id', 'entity__name', 'period_start', 'created_on', 'last_modified']
search_fields = ['entity__name', 'description', 'period_start',
'period_end'] + translated_fields(model)
def get_queryset(self):
queryset = super(SheetList, self).get_queryset()
### FILTERS
entities = self.request.QUERY_PARAMS.get('entities', None)
divisions = self.request.QUERY_PARAMS.get('divisions', None)
templates = self.request.QUERY_PARAMS.get('templates', None)
budget_gt = self.request.QUERY_PARAMS.get('budget_gt', None)
budget_gte = self.request.QUERY_PARAMS.get('budget_gte', None)
budget_lt = self.request.QUERY_PARAMS.get('budget_gt', None)
budget_lte = self.request.QUERY_PARAMS.get('budget_gte', None)
actual_gt = self.request.QUERY_PARAMS.get('actual_gt', None)
actual_gte = self.request.QUERY_PARAMS.get('actual_gte', None)
actual_lt = self.request.QUERY_PARAMS.get('actual_gt', None)
actual_lte = self.request.QUERY_PARAMS.get('actual_gte', None)
latest = self.request.QUERY_PARAMS.get('latest', None)
periods = self.request.QUERY_PARAMS.get('periods', None)
# ENTITIES: return sheets that belong to the given entity(-ies).
if entities:
entities = entities.split(',')
queryset = queryset.filter(entity__in=entities)
# DIVISIONS: return sheets that are under the given division(s).
if divisions:
divisions = divisions.split(',')
queryset = queryset.filter(entity__division_id__in=divisions)
# TEMPLATES: return sheets that use the given template(s).
if templates:
templates = templates.split(',')
queryset = queryset.filter(template__in=templates)
# BUDGET_GT: return sheet items with a budget amount greater than the
# given amount.
if budget_gt:
queryset = queryset.filter(budget__gt=budget_gt)
# BUDGET_LT: return sheet items with a budget amount less than the
# given amount.
if budget_lt:
queryset = queryset.filter(budget__lt=budget_lt)
# BUDGET_GTE: return sheets with a budget amount greater than or
# equal to the given amount.
if budget_gte:
queryset = queryset.filter(budget__gte=budget_gte)
# BUDGET_LTE: return sheets with a budget amount less than or
# equal to the given amount.
if budget_lte:
queryset = queryset.filter(budget__lte=budget_lte)
# ACTUAL_GT: return sheets with an actual amount greater than the
# given amount.
if actual_gt:
queryset = queryset.filter(actual__gt=actual_gt)
# ACTUAL_LT: return sheets with an actual amount less than the
# given amount.
if actual_lt:
queryset = queryset.filter(budget__lt=actual_lt)
# ACTUAL_GTE: return sheets with an actual amount greater than or
# equal to the given amount.
if actual_gte:
queryset = queryset.filter(budget__gte=actual_gte)
# ACTUAL_LTE: return sheets with an actual amount less than or
# equal to the given amount.
if actual_lte:
queryset = queryset.filter(budget__lte=actual_lte)
# LATEST: returns the latest sheet only, matching the rest of the query.
if latest == 'true':
queryset = queryset.latest('period_start')
# PERIODS: return contexts matching the given period(s).
if periods:
periods = [datetime.date(int(p), 1, 1) for p in periods.split(',')]
queryset = queryset.filter(period_start__in=periods)
return queryset
class SheetDetail(generics.RetrieveAPIView):
"""API endpoint that represents a single budget."""
model = models.Sheet
queryset = model.objects.related_map()
serializer_class = serializers.Sheet
class SheetItemList(generics.ListAPIView):
"""API endpoint that represents a list of budget items."""
model = models.SheetItem
queryset = model.objects.related_map()
serializer_class = serializers.SheetItem
ordering = ['id', 'sheet__entity__name', 'node__code', 'created_on',
'last_modified']
search_fields = ['sheet__entity__name', 'node__code', 'node__name',
'description'] + translated_fields(model)
def get_queryset(self):
queryset = super(SheetItemList, self).get_queryset()
### FILTERS
has_comments = self.request.QUERY_PARAMS.get('has_comments', None)
sheets = self.request.QUERY_PARAMS.get('sheets', None)
entities = self.request.QUERY_PARAMS.get('entities', None)
divisions = self.request.QUERY_PARAMS.get('divisions', None)
parents = self.request.QUERY_PARAMS.get('parents', None)
nodes = self.request.QUERY_PARAMS.get('nodes', None)
node_parents = self.request.QUERY_PARAMS.get('node_parents', None)
direction = self.request.QUERY_PARAMS.get('direction', None)
codes = self.request.QUERY_PARAMS.get('codes', None)
budget_gt = self.request.QUERY_PARAMS.get('budget_gt', None)
budget_gte = self.request.QUERY_PARAMS.get('budget_gte', None)
budget_lt = self.request.QUERY_PARAMS.get('budget_lt', None)
budget_lte = self.request.QUERY_PARAMS.get('budget_lte', None)
actual_gt = self.request.QUERY_PARAMS.get('actual_gt', None)
actual_gte = self.request.QUERY_PARAMS.get('actual_gte', None)
actual_lt = self.request.QUERY_PARAMS.get('actual_lt', None)
actual_lte = self.request.QUERY_PARAMS.get('actual_lte', None)
periods = self.request.QUERY_PARAMS.get('periods', None)
# HAS_COMMENTS: return sheet items that have user discussion.
matches = []
if has_comments == 'true':
for obj in queryset:
if obj.discussion.all():
matches.append(obj.pk)
queryset = queryset.filter(pk__in=matches)
elif has_comments == 'false':
for obj in queryset:
if not obj.discussion.all():
matches.append(obj.pk)
queryset = queryset.filter(pk__in=matches)
# SHEETS: return sheet items that belong to the given entity(-ies).
if sheets:
sheets = sheets.split(',')
queryset = queryset.filter(sheet__in=sheets)
# ENTITIES: return sheet items that belong to the given entity(-ies).
if entities:
entities = entities.split(',')
queryset = queryset.filter(sheet__entity__in=entities)
# DIVISIONS: return sheet items that are under the given division(s).
if divisions:
divisions = divisions.split(',')
queryset = queryset.filter(sheet__entity__division_id__in=divisions)
# DIRECTION: return sheet items in the given direction.
if direction:
direction = direction.upper()
queryset = queryset.filter(node__direction=direction)
# CODES: return sheet items that match the given code(s).
if codes:
codes = codes.split(',')
queryset = queryset.filter(node__code__in=codes)
# PARENTS: return items that are children of given parent(s).
if parents and parents == 'none':
queryset = queryset.filter(parent__isnull=True)
elif parents:
parents = parents.split(',')
queryset = queryset.filter(parent__pk__in=parents)
# NODES: return sheet items that belong to the given node(s).
if nodes:
nodes = nodes.split(',')
queryset = queryset.filter(node__in=nodes)
# NODE PARENTS: return items that are children of given node parent(s).
if node_parents and node_parents == 'none':
queryset = queryset.filter(node__parent__isnull=True)
elif node_parents:
node_parents = node_parents.split(',')
queryset = queryset.filter(node__parent__pk__in=node_parents)
# BUDGET_GT: return sheet items with a budget amount greater than the
# given amount.
if budget_gt:
queryset = queryset.filter(budget__gt=budget_gt)
# BUDGET_LT: return sheet items with a budget amount less than the
# given amount.
if budget_lt:
queryset = queryset.filter(budget__lt=budget_lt)
# BUDGET_GTE: return sheet items with a budget amount greater than or
# equal to the given amount.
if budget_gte:
queryset = queryset.filter(budget__gte=budget_gte)
# BUDGET_LTE: return sheet items with a budget amount less than or
# equal to the given amount.
if budget_lte:
queryset = queryset.filter(budget__lte=budget_lte)
# ACTUAL_GT: return sheet items with an actual amount greater than the
# given amount.
if actual_gt:
queryset = queryset.filter(actual__gt=actual_gt)
# ACTUAL_LT: return sheet items with an actual amount less than the
# given amount.
if actual_lt:
queryset = queryset.filter(budget__lt=actual_lt)
# ACTUAL_GTE: return sheet items with an actual amount greater than or
# equal to the given amount.
if actual_gte:
queryset = queryset.filter(budget__gte=actual_gte)
# ACTUAL_LTE: return sheet items with an actual amount less than or
# equal to the given amount.
if actual_lte:
queryset = queryset.filter(budget__lte=actual_lte)
# PERIODS: return contexts matching the given period(s).
if periods:
periods = [datetime.date(int(p), 1, 1) for p in periods.split(',')]
queryset = queryset.filter(sheet__period_start__in=periods)
return queryset
class SheetItemDetail(generics.RetrieveAPIView):
"""API endpoint that represents a single budget item."""
model = models.SheetItem
queryset = model.objects.related_map()
serializer_class = serializers.SheetItem
class SheetItemTimeline(generics.ListAPIView):
"""API endpoint that retrieves a timeline of sheet items.
The timeline is created according to the given entity, node(s)
"""
def get(self, request, entity_pk, *args, **kwargs):
"""GET handler for retrieving all budget items and actual items of the node's timeline, filtered by entity"""
nodes = self.request.QUERY_PARAMS.get('nodes', None)
if nodes:
nodes = nodes.split(',')
else:
# Provide a sensible default.
# If there is no node query param, let's return the top level nodes,
# as used in the latest Sheet.
#nodes = [for models.Sheet.objects.latest_of(entity_pk).shee]
#TODO: handle case of no nodes specified
pass
items = models.SheetItem.objects.timeline(nodes, entity_pk)
serialized_timeline = SheetTimeline(items, many=True).data
return Response(serialized_timeline)
class SheetItemCommentEmbeddedList(generics.ListCreateAPIView):
"""
Called via an API endpoint using GET it represents a list of SheetItemComments.
Called via an API endpoint using POST it creates a new of SheetItemComment.
"""
model = models.SheetItemComment
queryset = model.objects.related_map()
# serializer_class = serializers.SheetItemCommentBaseSerializer
search_fields = ['user__first_name', 'user__last_name', 'comment']
def get_serializer_class(self):
if self.request.method == 'POST':
# base serializer for creating SheetItemComment
return serializers.SheetItemCommentEmbed
# SheetItemComment list/retrieve serializer
return serializers.SheetItemCommentRead
def get_queryset(self):
return self.model.objects.by_item(self.kwargs.get('pk'))
def pre_save(self, obj):
obj.user = Account.objects.get(uuid=self.request.DATA.get('user'))
obj.item = models.SheetItem.objects.get(id=self.kwargs.get('pk'))
#TODO: this is an ugly hack and awaiting a response here: https://groups.google.com/forum/?fromgroups=#!topic/django-rest-framework/JrYdE3p6QZE
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.DATA, files=request.FILES)
if serializer.is_valid():
self.pre_save(serializer.object)
self.object = serializer.save(force_insert=True)
self.post_save(self.object, created=True)
headers = self.get_success_headers(serializer.data)
# here we step in and override current serializer used for create
# with a different serializer used for retrieve
serializer = serializers.SheetItemCommentRead(self.object)
return Response(serializer.data, status=status.HTTP_201_CREATED,
headers=headers)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class SheetItemCommentList(generics.ListAPIView):
"""API endpoint that represents a list of sheet item comments."""
model = models.SheetItemComment
queryset = model.objects.related_map()
serializer_class = serializers.SheetItemCommentMin
class SheetItemCommentDetail(generics.RetrieveAPIView):
"""API endpoint that represents a single sheet item comment item."""
model = models.SheetItemComment
queryset = model.objects.related_map()
serializer_class = serializers.SheetItemCommentMin
|
|
from pandac.PandaModules import Point3, CollisionSphere, CollisionNode, CollisionHandlerEvent, NodePath, TextNode
from direct.distributed.ClockDelta import globalClockDelta
from direct.interval.IntervalGlobal import Wait, LerpFunctionInterval, LerpHprInterval, Sequence, Parallel, Func, SoundInterval, ActorInterval, ProjectileInterval, Track, LerpScaleInterval, WaitInterval, LerpPosHprInterval
from direct.gui.DirectGui import DirectLabel
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.showbase import RandomNumGen
from direct.task import Task
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import ToontownTimer
from toontown.minigame import CogThiefGameToonSD
from toontown.minigame.OrthoDrive import OrthoDrive
from toontown.minigame.OrthoWalk import OrthoWalk
from toontown.minigame import CogThiefGameGlobals
from toontown.minigame import CogThief
from toontown.minigame.DistributedMinigame import DistributedMinigame
from toontown.minigame import Trajectory
from toontown.minigame import MinigameGlobals
from toontown.minigame import CogThiefWalk
CTGG = CogThiefGameGlobals
class DistributedCogThiefGame(DistributedMinigame):
notify = directNotify.newCategory('DistributedCogThiefGame')
ToonSpeed = CTGG.ToonSpeed
StageHalfWidth = 200.0
StageHalfHeight = 100.0
BarrelScale = 0.25
TOON_Z = 0
UPDATE_SUITS_TASK = 'CogThiefGameUpdateSuitsTask'
REWARD_COUNTDOWN_TASK = 'cogThiefGameRewardCountdown'
ControlKeyLimitTime = 1.0
def __init__(self, cr):
DistributedMinigame.__init__(self, cr)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedCogThiefGame', [State.State('off', self.enterOff, self.exitOff, ['play']), State.State('play', self.enterPlay, self.exitPlay, ['cleanup']), State.State('cleanup', self.enterCleanup, self.exitCleanup, [])], 'off', 'cleanup')
self.addChildGameFSM(self.gameFSM)
self.cameraTopView = (0, 0, 55, 0, -90.0, 0)
self.barrels = []
self.cogInfo = {}
self.lastTimeControlPressed = 0
self.stolenBarrels = []
self.useOrthoWalk = base.config.GetBool('cog-thief-ortho', 1)
self.resultIval = None
self.gameIsEnding = False
self.__textGen = TextNode('cogThiefGame')
self.__textGen.setFont(ToontownGlobals.getSignFont())
self.__textGen.setAlign(TextNode.ACenter)
return
def getTitle(self):
return TTLocalizer.CogThiefGameTitle
def getInstructions(self):
return TTLocalizer.CogThiefGameInstructions
def getMaxDuration(self):
return 0
def load(self):
self.notify.debug('load')
DistributedMinigame.load(self)
self.music = base.loadMusic('phase_4/audio/bgm/MG_CogThief.ogg')
self.initCogInfo()
for barrelIndex in xrange(CTGG.NumBarrels):
barrel = loader.loadModel('phase_4/models/minigames/cogthief_game_gagTank')
barrel.setPos(CTGG.BarrelStartingPositions[barrelIndex])
barrel.setScale(self.BarrelScale)
barrel.reparentTo(render)
barrel.setTag('barrelIndex', str(barrelIndex))
collSphere = CollisionSphere(0, 0, 0, 4)
collSphere.setTangible(0)
name = 'BarrelSphere-%d' % barrelIndex
collSphereName = self.uniqueName(name)
collNode = CollisionNode(collSphereName)
collNode.setFromCollideMask(CTGG.BarrelBitmask)
collNode.addSolid(collSphere)
colNp = barrel.attachNewNode(collNode)
handler = CollisionHandlerEvent()
handler.setInPattern('barrelHit-%fn')
base.cTrav.addCollider(colNp, handler)
self.accept('barrelHit-' + collSphereName, self.handleEnterBarrel)
nodeToHide = '**/gagMoneyTen'
if barrelIndex % 2:
nodeToHide = '**/gagMoneyFive'
iconToHide = barrel.find(nodeToHide)
if not iconToHide.isEmpty():
iconToHide.hide()
self.barrels.append(barrel)
self.gameBoard = loader.loadModel('phase_4/models/minigames/cogthief_game')
self.gameBoard.find('**/floor_TT').hide()
self.gameBoard.find('**/floor_DD').hide()
self.gameBoard.find('**/floor_DG').hide()
self.gameBoard.find('**/floor_MM').hide()
self.gameBoard.find('**/floor_BR').hide()
self.gameBoard.find('**/floor_DL').hide()
zone = self.getSafezoneId()
if zone == ToontownGlobals.ToontownCentral:
self.gameBoard.find('**/floor_TT').show()
elif zone == ToontownGlobals.DonaldsDock:
self.gameBoard.find('**/floor_DD').show()
elif zone == ToontownGlobals.DaisyGardens:
self.gameBoard.find('**/floor_DG').show()
elif zone == ToontownGlobals.MinniesMelodyland:
self.gameBoard.find('**/floor_MM').show()
elif zone == ToontownGlobals.TheBrrrgh:
self.gameBoard.find('**/floor_BR').show()
elif zone == ToontownGlobals.DonaldsDreamland:
self.gameBoard.find('**/floor_DL').show()
else:
self.gameBoard.find('**/floor_TT').show()
self.gameBoard.setPosHpr(0, 0, 0, 0, 0, 0)
self.gameBoard.setScale(1.0)
self.toonSDs = {}
avId = self.localAvId
toonSD = CogThiefGameToonSD.CogThiefGameToonSD(avId, self)
self.toonSDs[avId] = toonSD
toonSD.load()
self.loadCogs()
self.toonHitTracks = {}
self.toonPieTracks = {}
self.sndOof = base.loadSfx('phase_4/audio/sfx/MG_cannon_hit_dirt.ogg')
self.sndRewardTick = base.loadSfx('phase_3.5/audio/sfx/tick_counter.ogg')
self.sndPerfect = base.loadSfx('phase_4/audio/sfx/ring_perfect.ogg')
self.timer = ToontownTimer.ToontownTimer()
self.timer.posInTopRightCorner()
self.timer.hide()
purchaseModels = loader.loadModel('phase_4/models/gui/purchase_gui')
self.jarImage = purchaseModels.find('**/Jar')
self.jarImage.reparentTo(hidden)
self.rewardPanel = DirectLabel(parent=hidden, relief=None, pos=(-0.173, 0.0, -0.55), scale=0.65, text='', text_scale=0.2, text_fg=(0.95, 0.95, 0, 1), text_pos=(0, -.13), text_font=ToontownGlobals.getSignFont(), image=self.jarImage)
self.rewardPanelTitle = DirectLabel(parent=self.rewardPanel, relief=None, pos=(0, 0, 0.06), scale=0.08, text=TTLocalizer.CannonGameReward, text_fg=(0.95, 0.95, 0, 1), text_shadow=(0, 0, 0, 1))
return
def unload(self):
self.notify.debug('unload')
DistributedMinigame.unload(self)
del self.music
self.removeChildGameFSM(self.gameFSM)
del self.gameFSM
self.gameBoard.removeNode()
del self.gameBoard
for barrel in self.barrels:
barrel.removeNode()
del self.barrels
for avId in self.toonSDs.keys():
toonSD = self.toonSDs[avId]
toonSD.unload()
del self.toonSDs
self.timer.destroy()
del self.timer
self.rewardPanel.destroy()
del self.rewardPanel
self.jarImage.removeNode()
del self.jarImage
del self.sndRewardTick
def onstage(self):
self.notify.debug('onstage')
DistributedMinigame.onstage(self)
self.gameBoard.reparentTo(render)
lt = base.localAvatar
lt.reparentTo(render)
self.__placeToon(self.localAvId)
lt.setSpeed(0, 0)
self.moveCameraToTop()
toonSD = self.toonSDs[self.localAvId]
toonSD.enter()
toonSD.fsm.request('normal')
self.stopGameWalk()
for cogIndex in xrange(self.getNumCogs()):
suit = self.cogInfo[cogIndex]['suit'].suit
pos = self.cogInfo[cogIndex]['pos']
suit.reparentTo(self.gameBoard)
suit.setPos(pos)
suit.nametag.setNametag2d(None)
suit.nametag.setNametag3d(None)
for avId in self.avIdList:
self.toonHitTracks[avId] = Wait(0.1)
self.toonRNGs = []
for i in xrange(self.numPlayers):
self.toonRNGs.append(RandomNumGen.RandomNumGen(self.randomNumGen))
self.sndTable = {'hitBySuit': [None] * self.numPlayers,
'falling': [None] * self.numPlayers}
for i in xrange(self.numPlayers):
self.sndTable['hitBySuit'][i] = base.loadSfx('phase_4/audio/sfx/MG_Tag_C.ogg')
self.sndTable['falling'][i] = base.loadSfx('phase_4/audio/sfx/MG_cannon_whizz.ogg')
base.playMusic(self.music, looping=1, volume=0.8)
self.introTrack = self.getIntroTrack()
self.introTrack.start()
return
def offstage(self):
self.notify.debug('offstage')
self.gameBoard.hide()
self.music.stop()
for barrel in self.barrels:
barrel.hide()
for avId in self.toonSDs.keys():
self.toonSDs[avId].exit()
for avId in self.avIdList:
av = self.getAvatar(avId)
if av:
av.resetLOD()
self.timer.reparentTo(hidden)
self.rewardPanel.reparentTo(hidden)
if self.introTrack.isPlaying():
self.introTrack.finish()
del self.introTrack
DistributedMinigame.offstage(self)
def handleDisabledAvatar(self, avId):
self.notify.debug('handleDisabledAvatar')
self.notify.debug('avatar ' + str(avId) + ' disabled')
self.toonSDs[avId].exit(unexpectedExit=True)
del self.toonSDs[avId]
DistributedMinigame.handleDisabledAvatar(self, avId)
def setGameReady(self):
if not self.hasLocalToon:
return
self.notify.debug('setGameReady')
if DistributedMinigame.setGameReady(self):
return
for avId in self.remoteAvIdList:
toon = self.getAvatar(avId)
if toon:
toon.reparentTo(render)
self.__placeToon(avId)
toon.useLOD(1000)
toonSD = CogThiefGameToonSD.CogThiefGameToonSD(avId, self)
self.toonSDs[avId] = toonSD
toonSD.load()
toonSD.enter()
toonSD.fsm.request('normal')
toon.startSmooth()
def setGameStart(self, timestamp):
if not self.hasLocalToon:
return
self.notify.debug('setGameStart')
DistributedMinigame.setGameStart(self, timestamp)
if not base.config.GetBool('cog-thief-endless', 0):
self.timer.show()
self.timer.countdown(CTGG.GameTime, self.__gameTimerExpired)
self.clockStopTime = None
self.rewardPanel.reparentTo(base.a2dTopRight)
self.scoreMult = MinigameGlobals.getScoreMult(self.cr.playGame.hood.id)
self.__startRewardCountdown()
if self.introTrack.isPlaying():
self.introTrack.finish()
self.gameFSM.request('play')
return
def enterOff(self):
self.notify.debug('enterOff')
def exitOff(self):
pass
def enterPlay(self):
self.notify.debug('enterPlay')
self.startGameWalk()
self.spawnUpdateSuitsTask()
self.accept(base.JUMP, self.controlKeyPressed)
self.pieHandler = CollisionHandlerEvent()
self.pieHandler.setInPattern('pieHit-%fn')
def exitPlay(self):
self.ignore(base.JUMP)
if self.resultIval and self.resultIval.isPlaying():
self.resultIval.finish()
self.resultIval = None
return
def enterCleanup(self):
self.__killRewardCountdown()
if hasattr(self, 'jarIval'):
self.jarIval.finish()
del self.jarIval
for key in self.toonHitTracks:
ival = self.toonHitTracks[key]
if ival.isPlaying():
ival.finish()
self.toonHitTracks = {}
for key in self.toonPieTracks:
ival = self.toonPieTracks[key]
if ival.isPlaying():
ival.finish()
self.toonPieTracks = {}
for key in self.cogInfo:
cogThief = self.cogInfo[key]['suit']
cogThief.cleanup()
self.removeUpdateSuitsTask()
self.notify.debug('enterCleanup')
def exitCleanup(self):
pass
def __placeToon(self, avId):
toon = self.getAvatar(avId)
if toon:
index = self.avIdList.index(avId)
toon.setPos(CTGG.ToonStartingPositions[index])
toon.setHpr(0, 0, 0)
def moveCameraToTop(self):
camera.reparentTo(render)
p = self.cameraTopView
camera.setPosHpr(p[0], p[1], p[2], p[3], p[4], p[5])
base.camLens.setMinFov(46/(4./3.))
camera.setZ(camera.getZ() + base.config.GetFloat('cog-thief-z-camera-adjust', 0.0))
def destroyGameWalk(self):
self.notify.debug('destroyOrthoWalk')
if self.useOrthoWalk:
self.gameWalk.destroy()
del self.gameWalk
else:
self.notify.debug('TODO destroyGameWalk')
def initGameWalk(self):
self.notify.debug('startOrthoWalk')
if self.useOrthoWalk:
def doCollisions(oldPos, newPos, self = self):
x = bound(newPos[0], CTGG.StageHalfWidth, -CTGG.StageHalfWidth)
y = bound(newPos[1], CTGG.StageHalfHeight, -CTGG.StageHalfHeight)
newPos.setX(x)
newPos.setY(y)
return newPos
orthoDrive = OrthoDrive(self.ToonSpeed, customCollisionCallback=doCollisions, instantTurn=True)
self.gameWalk = OrthoWalk(orthoDrive, broadcast=not self.isSinglePlayer())
else:
self.gameWalk = CogThiefWalk.CogThiefWalk('walkDone')
forwardSpeed = self.ToonSpeed / 2.0
base.mouseInterfaceNode.setForwardSpeed(forwardSpeed)
multiplier = forwardSpeed / ToontownGlobals.ToonForwardSpeed
base.mouseInterfaceNode.setRotateSpeed(ToontownGlobals.ToonRotateSpeed * 4)
def initCogInfo(self):
for cogIndex in xrange(self.getNumCogs()):
self.cogInfo[cogIndex] = {'pos': Point3(CTGG.CogStartingPositions[cogIndex]),
'goal': CTGG.NoGoal,
'goalId': CTGG.InvalidGoalId,
'suit': None}
return
def loadCogs(self):
suitTypes = ['ds',
'ac',
'bc',
'ms']
for suitIndex in xrange(self.getNumCogs()):
st = self.randomNumGen.choice(suitTypes)
suit = CogThief.CogThief(suitIndex, st, self, self.getCogSpeed())
self.cogInfo[suitIndex]['suit'] = suit
def handleEnterSphere(self, colEntry):
if self.gameIsEnding:
return
intoName = colEntry.getIntoNodePath().getName()
fromName = colEntry.getFromNodePath().getName()
debugInto = intoName.split('/')
debugFrom = fromName.split('/')
self.notify.debug('handleEnterSphere gametime=%s %s into %s' % (self.getCurrentGameTime(), debugFrom[-1], debugInto[-1]))
intoName = colEntry.getIntoNodePath().getName()
if 'CogThiefSphere' in intoName:
parts = intoName.split('-')
suitNum = int(parts[1])
self.localToonHitBySuit(suitNum)
def localToonHitBySuit(self, suitNum):
self.notify.debug('localToonHitBySuit %d' % suitNum)
timestamp = globalClockDelta.localToNetworkTime(globalClock.getFrameTime(), bits=32)
pos = self.cogInfo[suitNum]['suit'].suit.getPos()
self.sendUpdate('hitBySuit', [self.localAvId,
timestamp,
suitNum,
pos[0],
pos[1],
pos[2]])
self.showToonHitBySuit(self.localAvId, timestamp)
self.makeSuitRespondToToonHit(timestamp, suitNum)
def hitBySuit(self, avId, timestamp, suitNum, x, y, z):
if not self.hasLocalToon:
return
if self.gameFSM.getCurrentState().getName() not in ['play']:
self.notify.warning('ignoring msg: av %s hit by suit' % avId)
return
if self.gameIsEnding:
return
self.notify.debug('avatar ' + `avId` + ' hit by a suit')
if avId != self.localAvId:
self.showToonHitBySuit(avId, timestamp)
self.makeSuitRespondToToonHit(timestamp, suitNum)
def showToonHitBySuit(self, avId, timestamp):
toon = self.getAvatar(avId)
if toon == None:
return
rng = self.toonRNGs[self.avIdList.index(avId)]
curPos = toon.getPos(render)
oldTrack = self.toonHitTracks[avId]
if oldTrack.isPlaying():
oldTrack.finish()
toon.setPos(curPos)
toon.setZ(self.TOON_Z)
parentNode = render.attachNewNode('mazeFlyToonParent-' + `avId`)
parentNode.setPos(toon.getPos())
toon.reparentTo(parentNode)
toon.setPos(0, 0, 0)
startPos = parentNode.getPos()
dropShadow = toon.dropShadow.copyTo(parentNode)
dropShadow.setScale(toon.dropShadow.getScale(render))
trajectory = Trajectory.Trajectory(0, Point3(0, 0, 0), Point3(0, 0, 50), gravMult=1.0)
oldFlyDur = trajectory.calcTimeOfImpactOnPlane(0.0)
trajectory = Trajectory.Trajectory(0, Point3(0, 0, 0), Point3(0, 0, 50), gravMult=0.55)
flyDur = trajectory.calcTimeOfImpactOnPlane(0.0)
avIndex = self.avIdList.index(avId)
endPos = CTGG.ToonStartingPositions[avIndex]
def flyFunc(t, trajectory, startPos = startPos, endPos = endPos, dur = flyDur, moveNode = parentNode, flyNode = toon):
u = t / dur
moveNode.setX(startPos[0] + u * (endPos[0] - startPos[0]))
moveNode.setY(startPos[1] + u * (endPos[1] - startPos[1]))
flyNode.setPos(trajectory.getPos(t))
flyTrack = Sequence(LerpFunctionInterval(flyFunc, fromData=0.0, toData=flyDur, duration=flyDur, extraArgs=[trajectory]), name=toon.uniqueName('hitBySuit-fly'))
geomNode = toon.getGeomNode()
startHpr = geomNode.getHpr()
destHpr = Point3(startHpr)
hRot = rng.randrange(1, 8)
if rng.choice([0, 1]):
hRot = -hRot
destHpr.setX(destHpr[0] + hRot * 360)
spinHTrack = Sequence(LerpHprInterval(geomNode, flyDur, destHpr, startHpr=startHpr), Func(geomNode.setHpr, startHpr), name=toon.uniqueName('hitBySuit-spinH'))
parent = geomNode.getParent()
rotNode = parent.attachNewNode('rotNode')
geomNode.reparentTo(rotNode)
rotNode.setZ(toon.getHeight() / 2.0)
oldGeomNodeZ = geomNode.getZ()
geomNode.setZ(-toon.getHeight() / 2.0)
startHpr = rotNode.getHpr()
destHpr = Point3(startHpr)
pRot = rng.randrange(1, 3)
if rng.choice([0, 1]):
pRot = -pRot
destHpr.setY(destHpr[1] + pRot * 360)
spinPTrack = Sequence(LerpHprInterval(rotNode, flyDur, destHpr, startHpr=startHpr), Func(rotNode.setHpr, startHpr), name=toon.uniqueName('hitBySuit-spinP'))
i = self.avIdList.index(avId)
soundTrack = Sequence(Func(base.playSfx, self.sndTable['hitBySuit'][i]), Wait(flyDur * (2.0 / 3.0)), SoundInterval(self.sndTable['falling'][i], duration=flyDur * (1.0 / 3.0)), name=toon.uniqueName('hitBySuit-soundTrack'))
def preFunc(self = self, avId = avId, toon = toon, dropShadow = dropShadow):
forwardSpeed = toon.forwardSpeed
rotateSpeed = toon.rotateSpeed
if avId == self.localAvId:
self.stopGameWalk()
else:
toon.stopSmooth()
if forwardSpeed or rotateSpeed:
toon.setSpeed(forwardSpeed, rotateSpeed)
toon.dropShadow.hide()
def postFunc(self = self, avId = avId, oldGeomNodeZ = oldGeomNodeZ, dropShadow = dropShadow, parentNode = parentNode):
if avId == self.localAvId:
base.localAvatar.setPos(endPos)
if hasattr(self, 'gameWalk'):
toon = base.localAvatar
toon.setSpeed(0, 0)
self.startGameWalk()
dropShadow.removeNode()
del dropShadow
toon = self.getAvatar(avId)
if toon:
toon.dropShadow.show()
geomNode = toon.getGeomNode()
rotNode = geomNode.getParent()
baseNode = rotNode.getParent()
geomNode.reparentTo(baseNode)
rotNode.removeNode()
del rotNode
geomNode.setZ(oldGeomNodeZ)
if toon:
toon.reparentTo(render)
toon.setPos(endPos)
parentNode.removeNode()
del parentNode
if avId != self.localAvId:
if toon:
toon.startSmooth()
preFunc()
slipBack = Parallel(Sequence(ActorInterval(toon, 'slip-backward', endFrame=24), Wait(CTGG.LyingDownDuration - (flyDur - oldFlyDur)), ActorInterval(toon, 'slip-backward', startFrame=24)))
if toon.doId == self.localAvId:
slipBack.append(SoundInterval(self.sndOof))
hitTrack = Sequence(Parallel(flyTrack, spinHTrack, spinPTrack, soundTrack), slipBack, Func(postFunc), name=toon.uniqueName('hitBySuit'))
self.notify.debug('hitTrack duration = %s' % hitTrack.getDuration())
self.toonHitTracks[avId] = hitTrack
hitTrack.start(globalClockDelta.localElapsedTime(timestamp))
return
def updateSuitGoal(self, timestamp, inResponseToClientStamp, suitNum, goalType, goalId, x, y, z):
if not self.hasLocalToon:
return
self.notify.debug('updateSuitGoal gameTime=%s timeStamp=%s cog=%s goal=%s goalId=%s (%.1f, %.1f,%.1f)' % (self.getCurrentGameTime(),
timestamp,
suitNum,
CTGG.GoalStr[goalType],
goalId,
x,
y,
z))
cog = self.cogInfo[suitNum]
cog['goal'] = goalType
cog['goalId'] = goalId
newPos = Point3(x, y, z)
cog['pos'] = newPos
suit = cog['suit']
suit.updateGoal(timestamp, inResponseToClientStamp, goalType, goalId, newPos)
def spawnUpdateSuitsTask(self):
self.notify.debug('spawnUpdateSuitsTask')
for cogIndex in self.cogInfo:
suit = self.cogInfo[cogIndex]['suit']
suit.gameStart(self.gameStartTime)
taskMgr.remove(self.UPDATE_SUITS_TASK)
taskMgr.add(self.updateSuitsTask, self.UPDATE_SUITS_TASK)
def removeUpdateSuitsTask(self):
taskMgr.remove(self.UPDATE_SUITS_TASK)
def updateSuitsTask(self, task):
if self.gameIsEnding:
return task.done
for cogIndex in self.cogInfo:
suit = self.cogInfo[cogIndex]['suit']
suit.think()
return task.cont
def makeSuitRespondToToonHit(self, timestamp, suitNum):
cog = self.cogInfo[suitNum]['suit']
cog.respondToToonHit(timestamp)
def handleEnterBarrel(self, colEntry):
if self.gameIsEnding:
return
intoName = colEntry.getIntoNodePath().getName()
fromName = colEntry.getFromNodePath().getName()
debugInto = intoName.split('/')
debugFrom = fromName.split('/')
self.notify.debug('handleEnterBarrel gameTime=%s %s into %s' % (self.getCurrentGameTime(), debugFrom[-1], debugInto[-1]))
if 'CogThiefSphere' in intoName:
parts = intoName.split('-')
cogIndex = int(parts[1])
barrelName = colEntry.getFromNodePath().getName()
barrelParts = barrelName.split('-')
barrelIndex = int(barrelParts[1])
cog = self.cogInfo[cogIndex]['suit']
if cog.barrel == CTGG.NoBarrelCarried and barrelIndex not in self.stolenBarrels:
timestamp = globalClockDelta.localToNetworkTime(globalClock.getFrameTime(), bits=32)
if cog.suit:
cogPos = cog.suit.getPos()
collisionPos = colEntry.getContactPos(render)
if (cogPos - collisionPos).length() > 4:
import pdb
pdb.set_trace()
self.sendUpdate('cogHitBarrel', [timestamp,
cogIndex,
barrelIndex,
cogPos[0],
cogPos[1],
cogPos[2]])
def makeCogCarryBarrel(self, timestamp, inResponseToClientStamp, cogIndex, barrelIndex, x, y, z):
if not self.hasLocalToon:
return
if self.gameIsEnding:
return
self.notify.debug('makeCogCarryBarrel gameTime=%s timeStamp=%s cog=%s barrel=%s (%.1f, %.1f,%.1f)' % (self.getCurrentGameTime(),
timestamp,
cogIndex,
barrelIndex,
x,
y,
z))
barrel = self.barrels[barrelIndex]
self.notify.debug('barrelPos= %s' % barrel.getPos())
cog = self.cogInfo[cogIndex]['suit']
cogPos = Point3(x, y, z)
cog.makeCogCarryBarrel(timestamp, inResponseToClientStamp, barrel, barrelIndex, cogPos)
def makeCogDropBarrel(self, timestamp, inResponseToClientStamp, cogIndex, barrelIndex, x, y, z):
if not self.hasLocalToon:
return
self.notify.debug('makeCogDropBarrel gameTime=%s timeStamp=%s cog=%s barrel=%s (%.1f, %.1f,%.1f)' % (self.getCurrentGameTime(),
timestamp,
cogIndex,
barrelIndex,
x,
y,
z))
barrel = self.barrels[barrelIndex]
self.notify.debug('barrelPos= %s' % barrel.getPos())
cog = self.cogInfo[cogIndex]['suit']
cogPos = Point3(x, y, z)
cog.makeCogDropBarrel(timestamp, inResponseToClientStamp, barrel, barrelIndex, cogPos)
def controlKeyPressed(self):
if self.isToonPlayingHitTrack(self.localAvId):
return
if self.gameIsEnding:
return
if self.getCurrentGameTime() - self.lastTimeControlPressed > self.ControlKeyLimitTime:
self.lastTimeControlPressed = self.getCurrentGameTime()
self.notify.debug('controlKeyPressed')
toonSD = self.toonSDs[self.localAvId]
curState = toonSD.fsm.getCurrentState().getName()
toon = self.getAvatar(self.localAvId)
timestamp = globalClockDelta.localToNetworkTime(globalClock.getFrameTime(), bits=32)
pos = toon.getPos()
heading = toon.getH()
self.sendUpdate('throwingPie', [self.localAvId,
timestamp,
heading,
pos[0],
pos[1],
pos[2]])
self.showToonThrowingPie(self.localAvId, timestamp, heading, pos)
def throwingPie(self, avId, timestamp, heading, x, y, z):
if not self.hasLocalToon:
return
if self.gameFSM.getCurrentState().getName() not in ['play']:
self.notify.warning('ignoring msg: av %s hit by suit' % avId)
return
self.notify.debug('avatar ' + `avId` + ' throwing pie')
if avId != self.localAvId:
pos = Point3(x, y, z)
self.showToonThrowingPie(avId, timestamp, heading, pos)
def showToonThrowingPie(self, avId, timestamp, heading, pos):
toon = self.getAvatar(avId)
if toon:
tossTrack, pieTrack, flyPie = self.getTossPieInterval(toon, pos[0], pos[1], pos[2], heading, 0, 0, 0)
def removePieFromTraverser(flyPie = flyPie):
if base.cTrav:
if flyPie:
base.cTrav.removeCollider(flyPie)
if avId == self.localAvId:
flyPie.setTag('throwerId', str(avId))
collSphere = CollisionSphere(0, 0, 0, 0.5)
collSphere.setTangible(0)
name = 'PieSphere-%d' % avId
collSphereName = self.uniqueName(name)
collNode = CollisionNode(collSphereName)
collNode.setFromCollideMask(ToontownGlobals.PieBitmask)
collNode.addSolid(collSphere)
colNp = flyPie.attachNewNode(collNode)
colNp.show()
base.cTrav.addCollider(colNp, self.pieHandler)
self.accept('pieHit-' + collSphereName, self.handlePieHitting)
def matchRunningAnim(toon = toon):
toon.playingAnim = None
toon.setSpeed(toon.forwardSpeed, toon.rotateSpeed)
return
newTossTrack = Sequence(tossTrack, Func(matchRunningAnim))
pieTrack = Parallel(newTossTrack, pieTrack)
elapsedTime = globalClockDelta.localElapsedTime(timestamp)
if elapsedTime < 16.0 / 24.0:
elapsedTime = 16.0 / 24.0
pieTrack.start(elapsedTime)
self.toonPieTracks[avId] = pieTrack
def getTossPieInterval(self, toon, x, y, z, h, p, r, power, beginFlyIval = Sequence()):
from toontown.toonbase import ToontownBattleGlobals
from toontown.battle import BattleProps
pie = toon.getPieModel()
pie.setScale(0.9)
flyPie = pie.copyTo(NodePath('a'))
pieName = ToontownBattleGlobals.pieNames[toon.pieType]
pieType = BattleProps.globalPropPool.getPropType(pieName)
animPie = Sequence()
if pieType == 'actor':
animPie = ActorInterval(pie, pieName, startFrame=48)
sound = loader.loadSfx('phase_3.5/audio/sfx/AA_pie_throw_only.ogg')
t = power / 100.0
dist = 100 - 70 * t
time = 1 + 0.5 * t
proj = ProjectileInterval(None, startPos=Point3(0, 0, 0), endPos=Point3(0, dist, 0), duration=time)
relVel = proj.startVel
def getVelocity(toon = toon, relVel = relVel):
return render.getRelativeVector(toon, relVel) * 0.6
toss = Track((0, Sequence(Func(toon.setPosHpr, x, y, z, h, p, r), Func(pie.reparentTo, toon.rightHand), Func(pie.setPosHpr, 0, 0, 0, 0, 0, 0), Parallel(ActorInterval(toon, 'throw', startFrame=48, partName='torso'), animPie), Func(toon.loop, 'neutral'))), (16.0 / 24.0, Func(pie.detachNode)))
fly = Track((14.0 / 24.0, SoundInterval(sound, node=toon)), (16.0 / 24.0, Sequence(Func(flyPie.reparentTo, render), Func(flyPie.setPosHpr, toon, 0.52, 0.97, 2.24, 0, -45, 0), beginFlyIval, ProjectileInterval(flyPie, startVel=getVelocity, duration=6), Func(flyPie.detachNode))))
return (toss, fly, flyPie)
def handlePieHitting(self, colEntry):
if self.gameIsEnding:
return
into = colEntry.getIntoNodePath()
intoName = into.getName()
if 'CogThiefPieSphere' in intoName:
timestamp = globalClockDelta.localToNetworkTime(globalClock.getFrameTime(), bits=32)
parts = intoName.split('-')
suitNum = int(parts[1])
pos = self.cogInfo[suitNum]['suit'].suit.getPos()
if pos in CTGG.CogStartingPositions:
self.notify.debug('Cog %d hit at starting pos %s, ignoring' % (suitNum, pos))
else:
self.sendUpdate('pieHitSuit', [self.localAvId,
timestamp,
suitNum,
pos[0],
pos[1],
pos[2]])
self.makeSuitRespondToPieHit(timestamp, suitNum)
def pieHitSuit(self, avId, timestamp, suitNum, x, y, z):
if not self.hasLocalToon:
return
if self.gameFSM.getCurrentState().getName() not in ['play']:
self.notify.warning('ignoring msg: av %s hit by suit' % avId)
return
if self.gameIsEnding:
return
self.notify.debug('avatar ' + `avId` + ' hit by a suit')
if avId != self.localAvId:
self.makeSuitRespondToPieHit(timestamp, suitNum)
def makeSuitRespondToPieHit(self, timestamp, suitNum):
cog = self.cogInfo[suitNum]['suit']
cog.respondToPieHit(timestamp)
def sendCogAtReturnPos(self, cogIndex, barrelIndex):
timestamp = globalClockDelta.localToNetworkTime(globalClock.getFrameTime(), bits=32)
self.sendUpdate('cogAtReturnPos', [timestamp, cogIndex, barrelIndex])
def markBarrelStolen(self, timestamp, inResponseToClientStamp, barrelIndex):
if not self.hasLocalToon:
return
if barrelIndex not in self.stolenBarrels:
self.stolenBarrels.append(barrelIndex)
barrel = self.barrels[barrelIndex]
barrel.hide()
if base.config.GetBool('cog-thief-check-barrels', 1):
if not base.config.GetBool('cog-thief-endless', 0):
if len(self.stolenBarrels) == len(self.barrels):
localStamp = globalClockDelta.networkToLocalTime(timestamp, bits=32)
gameTime = self.local2GameTime(localStamp)
self.clockStopTime = gameTime
self.notify.debug('clockStopTime = %s' % gameTime)
score = int(self.scoreMult * CTGG.calcScore(gameTime) + 0.5)
self.rewardPanel['text'] = str(score)
self.showResults()
def __gameTimerExpired(self):
self.notify.debug('game timer expired')
self.showResults()
def __startRewardCountdown(self):
taskMgr.remove(self.REWARD_COUNTDOWN_TASK)
taskMgr.add(self.__updateRewardCountdown, self.REWARD_COUNTDOWN_TASK)
def __killRewardCountdown(self):
taskMgr.remove(self.REWARD_COUNTDOWN_TASK)
def __updateRewardCountdown(self, task):
curTime = self.getCurrentGameTime()
if self.clockStopTime is not None:
if self.clockStopTime < curTime:
self.notify.debug('self.clockStopTime < curTime %s %s' % (self.clockStopTime, curTime))
self.__killRewardCountdown()
curTime = self.clockStopTime
if curTime > CTGG.GameTime:
curTime = CTGG.GameTime
score = int(self.scoreMult * CTGG.calcScore(curTime) + 0.5)
if not hasattr(task, 'curScore'):
task.curScore = score
result = Task.cont
if hasattr(self, 'rewardPanel'):
self.rewardPanel['text'] = str(score)
if task.curScore != score:
if hasattr(self, 'jarIval'):
self.jarIval.finish()
s = self.rewardPanel.getScale()
self.jarIval = Parallel(Sequence(self.rewardPanel.scaleInterval(0.15, s * 3.0 / 4.0, blendType='easeOut'), self.rewardPanel.scaleInterval(0.15, s, blendType='easeIn')), SoundInterval(self.sndRewardTick), name='cogThiefGameRewardJarThrob')
self.jarIval.start()
task.curScore = score
else:
result = Task.done
return result
def startGameWalk(self):
if self.useOrthoWalk:
self.gameWalk.start()
else:
self.gameWalk.enter()
self.gameWalk.fsm.request('walking')
def stopGameWalk(self):
if self.useOrthoWalk:
self.gameWalk.stop()
else:
self.gameWalk.exit()
def getCogThief(self, cogIndex):
return self.cogInfo[cogIndex]['suit']
def isToonPlayingHitTrack(self, avId):
if avId in self.toonHitTracks:
track = self.toonHitTracks[avId]
if track.isPlaying():
return True
return False
def getNumCogs(self):
result = base.config.GetInt('cog-thief-num-cogs', 0)
if not result:
safezone = self.getSafezoneId()
result = CTGG.calculateCogs(self.numPlayers, safezone)
return result
def getCogSpeed(self):
result = 6.0
safezone = self.getSafezoneId()
result = CTGG.calculateCogSpeed(self.numPlayers, safezone)
return result
def showResults(self):
if not self.gameIsEnding:
self.gameIsEnding = True
for barrel in self.barrels:
barrel.wrtReparentTo(render)
for key in self.cogInfo:
thief = self.cogInfo[key]['suit']
thief.suit.setPos(100, 0, 0)
thief.suit.hide()
self.__killRewardCountdown()
self.stopGameWalk()
numBarrelsSaved = len(self.barrels) - len(self.stolenBarrels)
resultStr = ''
if numBarrelsSaved == len(self.barrels):
resultStr = TTLocalizer.CogThiefPerfect
elif numBarrelsSaved > 1:
resultStr = TTLocalizer.CogThiefBarrelsSaved % {'num': numBarrelsSaved}
elif numBarrelsSaved == 1:
resultStr = TTLocalizer.CogThiefBarrelSaved % {'num': numBarrelsSaved}
else:
resultStr = TTLocalizer.CogThiefNoBarrelsSaved
perfectTextSubnode = hidden.attachNewNode(self.__genText(resultStr))
perfectText = hidden.attachNewNode('perfectText')
perfectTextSubnode.reparentTo(perfectText)
frame = self.__textGen.getCardActual()
offsetY = -abs(frame[2] + frame[3]) / 2.0
perfectTextSubnode.setPos(0, 0, offsetY)
perfectText.setColor(1, 0.1, 0.1, 1)
def fadeFunc(t, text = perfectText):
text.setColorScale(1, 1, 1, t)
def destroyText(text = perfectText):
text.removeNode()
def safeGameOver(self = self):
if not self.frameworkFSM.isInternalStateInFlux():
self.gameOver()
textTrack = Sequence(Func(perfectText.reparentTo, aspect2d), Parallel(LerpScaleInterval(perfectText, duration=0.5, scale=0.3, startScale=0.0), LerpFunctionInterval(fadeFunc, fromData=0.0, toData=1.0, duration=0.5)), Wait(2.0), Parallel(LerpScaleInterval(perfectText, duration=0.5, scale=1.0), LerpFunctionInterval(fadeFunc, fromData=1.0, toData=0.0, duration=0.5, blendType='easeIn')), Func(destroyText), WaitInterval(0.5), Func(safeGameOver))
if numBarrelsSaved == len(self.barrels):
soundTrack = SoundInterval(self.sndPerfect)
else:
soundTrack = Sequence()
self.resultIval = Parallel(textTrack, soundTrack)
self.resultIval.start()
def __genText(self, text):
self.__textGen.setText(text)
return self.__textGen.generate()
def getIntroTrack(self):
base.camera.setPosHpr(0, -13.66, 13.59, 0, -51.6, 0)
result = Sequence(Wait(2), LerpPosHprInterval(base.camera, 13, Point3(self.cameraTopView[0], self.cameraTopView[1], self.cameraTopView[2]), Point3(self.cameraTopView[3], self.cameraTopView[4], self.cameraTopView[5]), blendType='easeIn'))
return result
|
|
"""
Functional tests using Selenium.
See: ``docs/testing/selenium.rst`` for details.
"""
from django.utils.text import slugify
import time
from django.conf import settings
from django.utils.unittest.case import skipUnless
from bluebottle.bb_projects.models import ProjectPhase
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.factory_models.projects import ProjectPhaseFactory
from onepercentclub.tests.factory_models.project_factories import OnePercentProjectFactory
from onepercentclub.tests.utils import OnePercentSeleniumTestCase
from selenium.webdriver.common.keys import Keys
@skipUnless(getattr(settings, 'SELENIUM_TESTS', False),
'Selenium tests disabled. Set SELENIUM_TESTS = True in your settings.py to enable.')
class PositiveDonationFlow(OnePercentSeleniumTestCase):
def setUp(self):
self.init_projects()
self.user = BlueBottleUserFactory.create()
campaign_phase = ProjectPhase.objects.get(name='Campaign')
for title in [u'Mobile payments for everyone!', u'Schools for children', u'Women first']:
project = OnePercentProjectFactory.create(title=title, owner=self.user,
amount_asked=1000, status=campaign_phase)
self.login(self.user.email, 'testing')
def test_positive_flow_mockdeal(self, lang_code=None):
"""
Test a positive donation flow for a donation paid with iDeal
"""
self.visit_path('/projects/schools-for-children', lang_code)
# Assert visual donation elements on project page
self.assert_css(".amount-donated")
self.assert_css(".project-fund-amount-slider")
# Bring up the donation modal
self.wait_for_element_css('a[data-action-type=donate]')
button = self.browser.find_by_css('a[data-action-type=donate]')[0]
button.click()
# Verify the elements of the donation modal
self.wait_for_element_css('input.donation-input')
donation_input = self.browser.find_by_css("input.donation-input").first
# Make a donation of 65 euros (default is 25)
donation_input.fill('65')
self.assertEqual(int(donation_input.value), 65)
self.assert_css(".donation-buttons")
self.assert_css("#hideMyName")
# Jump to next step
self.scroll_to_and_click_by_css(".donate-btn")
self.assert_css(".payment-tabs")
self.assert_css(".payment-tab-content")
# If you select only the li, the click will fail because the modal closes
ideal_payments = self.browser.find_by_css("li.ideal label")
ideal_payments[0].click()
self.assert_css(".ember-select")
self.browser.select('mockiDealSelect', 'huey')
self.scroll_to_and_click_by_css(".payment-btn")
time.sleep(2)
self.assertTrue(self.browser.is_text_present('This is a Mock Payment Service provider', wait_time=20))
self.scroll_to_and_click_by_css('a.btn-ok')
self.assertTrue(self.browser.is_text_present('Thanks for your support', wait_time=30))
text = 'I made a donation with mockdeal! Good luck!'
self.assert_css('.wallpost-textarea')
self.scroll_to_and_click_by_css('.wallpost-textarea')
self.browser.find_by_css('.wallpost-textarea').type(text)
self.browser.find_by_css(".wallpost-buttons .btn")[1].click()
wallpost = self.browser.driver.find_element_by_css_selector('section#wallposts article:first-of-type')
wallpost_text = wallpost.find_element_by_css_selector('.wallpost-body').text
self.assertEqual(wallpost_text, text)
author = wallpost.find_element_by_css_selector(".user-name").text
self.assertEqual(author.lower(), self.user.full_name.lower())
class LoginDonationFlow(OnePercentSeleniumTestCase):
def setUp(self):
self.init_projects()
self.user = BlueBottleUserFactory.create()
campaign_phase = ProjectPhase.objects.get(name='Campaign')
for title in [u'Mobile payments for everyone!', u'Schools for children', u'Women first']:
project = OnePercentProjectFactory.create(title=title, owner=self.user,
amount_asked=1000, status=campaign_phase)
self.visit_path('/projects/schools-for-children')
# Assert visual donation elements on project page
self.assert_css(".amount-donated")
self.assert_css(".project-fund-amount-slider")
# Bring up the donation modal
self.assert_css('a[data-action-type="donate"]')
self.scroll_to_and_click_by_css('a[data-action-type=donate]')
# Verify the elements of the donation modal
self.wait_for_element_css('input.donation-input')
donation_input = self.browser.find_by_css("input.donation-input").first
# Make a donation of 55 euros (default is 25)
donation_input.fill('55')
self.assertEqual(int(donation_input.value), 55)
self.assert_css(".donation-buttons")
self.assert_css("#hideMyName")
# Jump to next step
self.scroll_to_and_click_by_css(".donate-btn")
def tearDown(self):
self.close_modal()
def test_signup_donation_flow(self):
"""
Test signup flow for a donation
"""
user_data = {
'first_name': 'Bob',
'last_name': 'Brown',
'password': 'testing',
'email': '[email protected]'
}
# Wait for the signup modal
self.assert_css("input[type=email]")
# There should be two email fields in the signup form
self.assertEqual(len(self.browser.find_by_css('input[type=email]')), 2)
# Signup
self.browser.fill('first-name', user_data['first_name'])
self.browser.fill('last-name', user_data['last_name'])
self.browser.fill('email', user_data['email'])
self.browser.fill('email-confirmation', user_data['email'])
self.browser.fill('password', user_data['password'])
self.browser.driver.find_element_by_name('signup').click()
# Assert the payment modal loads
self.assert_css('.btn.payment-btn')
def test_login_donation_flow(self):
"""
Test login flow for a donation
"""
# Wait for the signup modal
self.assert_css("input[type=email]")
# There should be two email fields in the signup form
self.assertEqual(len(self.browser.find_by_css('input[type=email]')), 2)
# Load the login modal
self.browser.driver.find_element_by_link_text('Sign in here.').click()
# Wait for the user login modal to appear
self.assert_css('input[name=username]')
# There should be an username field in the login form
self.assertEqual(len(self.browser.find_by_css('input[name=username]')), 1)
# Login as test user
self.browser.find_by_css('input[name=username]').first.type(self.user.email)
self.browser.find_by_css('input[name=password]').first.type('testing')
self.browser.driver.find_element_by_name('login').click()
# Assert the payment modal loads
self.assert_css('.btn.payment-btn')
self.logout()
def test_guest_donation_flow(self):
"""
Test guest flow for a donation
"""
# Wait for the signup modal
self.assert_css("input[type=email]")
# There should be two email fields in the signup form
self.assertEqual(len(self.browser.find_by_css('input[type=email]')), 2)
# Select guest donation
self.browser.driver.find_element_by_link_text('donate as guest.').click()
# Assert the payment modal loads
self.assert_css('.btn.payment-btn')
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest
import os
import shutil
import numpy as np
from monty.json import MontyDecoder
from pymatgen.io.vasp.sets import MITVaspInputSet, MITHSEVaspInputSet, \
MPVaspInputSet, MITGGAVaspInputSet, MITNEBVaspInputSet,\
MPStaticVaspInputSet, MPNonSCFVaspInputSet, MITMDVaspInputSet,\
MPHSEVaspInputSet, MPBSHSEVaspInputSet, MPStaticDielectricDFPTVaspInputSet,\
MPOpticsNonSCFVaspInputSet
from pymatgen.io.vasp.inputs import Poscar, Incar
from pymatgen import Specie, Lattice, Structure
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
dec = MontyDecoder()
class MITMPVaspInputSetTest(unittest.TestCase):
def setUp(self):
if "VASP_PSP_DIR" not in os.environ:
os.environ["VASP_PSP_DIR"] = test_dir
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.mitparamset = MITVaspInputSet()
self.mitparamset_unsorted = MITVaspInputSet(sort_structure=False)
self.mithseparamset = MITHSEVaspInputSet()
self.paramset = MPVaspInputSet()
self.userparamset = MPVaspInputSet(
user_incar_settings={'MAGMOM': {"Fe": 10, "S": -5, "Mn3+": 100}}
)
self.mitggaparam = MITGGAVaspInputSet()
self.mpstaticparamset = MPStaticVaspInputSet()
self.mpnscfparamsetu = MPNonSCFVaspInputSet(
{"NBANDS": 50}, mode="Uniform")
self.mpnscfparamsetl = MPNonSCFVaspInputSet(
{"NBANDS": 60}, mode="Line")
self.mphseparamset = MPHSEVaspInputSet()
self.mpbshseparamsetl = MPBSHSEVaspInputSet(mode="Line")
self.mpbshseparamsetu = MPBSHSEVaspInputSet(
mode="Uniform", added_kpoints=[[0.5, 0.5, 0.0]])
self.mpdielparamset = MPStaticDielectricDFPTVaspInputSet()
def test_get_poscar(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Fe", "Mn"], coords)
s_unsorted = self.mitparamset_unsorted.get_poscar(struct).structure
s_sorted = self.mitparamset.get_poscar(struct).structure
self.assertEqual(s_unsorted[0].specie.symbol, 'Fe')
self.assertEqual(s_sorted[0].specie.symbol, 'Mn')
def test_get_potcar_symbols(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.75, 0.25, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["P", "Fe", "O"], coords)
syms = self.paramset.get_potcar_symbols(struct)
self.assertEqual(syms, ['Fe_pv', 'P', 'O'])
syms = MPVaspInputSet(sort_structure=False).get_potcar_symbols(struct)
self.assertEqual(syms, ['P', 'Fe_pv', 'O'])
def test_false_potcar_hash(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.75, 0.25, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["P", "Fe", "O"], coords)
self.mitparamset.potcar_settings['Fe']['symbol'] = 'Fe_pv'
self.assertRaises(ValueError, self.mitparamset.get_potcar, struct, check_hash=True)
self.mitparamset.potcar_settings['Fe']['symbol'] = 'Fe'
def test_lda_potcar(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["P", "Fe"], coords)
p = MITVaspInputSet(potcar_functional="LDA").get_potcar(struct)
self.assertEqual(p.functional, 'LDA')
def test_get_nelect(self):
coords = [[0]*3, [0.5]*3, [0.75]*3]
lattice = Lattice.cubic(4)
s = Structure(lattice, ['Si', 'Si', 'Fe'], coords)
self.assertAlmostEqual(MITVaspInputSet().get_nelect(s), 16)
def test_get_incar(self):
incar = self.paramset.get_incar(self.struct)
self.assertEqual(incar['LDAUU'], [5.3, 0, 0])
self.assertAlmostEqual(incar['EDIFF'], 0.0012)
incar = self.mitparamset.get_incar(self.struct)
self.assertEqual(incar['LDAUU'], [4.0, 0, 0])
self.assertAlmostEqual(incar['EDIFF'], 0.0012)
incar_gga = self.mitggaparam.get_incar(self.struct)
self.assertNotIn("LDAU", incar_gga)
incar_static = self.mpstaticparamset.get_incar(self.struct)
self.assertEqual(incar_static["NSW"], 0)
incar_nscfl = self.mpnscfparamsetl.get_incar(self.struct)
self.assertEqual(incar_nscfl["NBANDS"], 60)
incar_nscfu = self.mpnscfparamsetu.get_incar(self.struct)
self.assertEqual(incar_nscfu["ISYM"], 0)
incar_hse = self.mphseparamset.get_incar(self.struct)
self.assertEqual(incar_hse['LHFCALC'], True)
self.assertEqual(incar_hse['HFSCREEN'], 0.2)
incar_hse_bsl = self.mpbshseparamsetl.get_incar(self.struct)
self.assertEqual(incar_hse_bsl['LHFCALC'], True)
self.assertEqual(incar_hse_bsl['HFSCREEN'], 0.2)
self.assertEqual(incar_hse_bsl['NSW'], 0)
incar_hse_bsu = self.mpbshseparamsetu.get_incar(self.struct)
self.assertEqual(incar_hse_bsu['LHFCALC'], True)
self.assertEqual(incar_hse_bsu['HFSCREEN'], 0.2)
self.assertEqual(incar_hse_bsu['NSW'], 0)
incar_diel = self.mpdielparamset.get_incar(self.struct)
self.assertEqual(incar_diel['IBRION'], 8)
self.assertEqual(incar_diel['LEPSILON'], True)
si = 14
coords = list()
coords.append(np.array([0, 0, 0]))
coords.append(np.array([0.75, 0.5, 0.75]))
#Silicon structure for testing.
latt = Lattice(np.array([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]))
struct = Structure(latt, [si, si], coords)
incar = self.paramset.get_incar(struct)
self.assertNotIn("LDAU", incar)
incar = self.mithseparamset.get_incar(self.struct)
self.assertTrue(incar['LHFCALC'])
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Fe", "Mn"], coords)
incar = self.paramset.get_incar(struct)
self.assertNotIn('LDAU', incar)
#check fluorides
struct = Structure(lattice, ["Fe", "F"], coords)
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [5.3, 0])
self.assertEqual(incar['MAGMOM'], [5, 0.6])
struct = Structure(lattice, ["Fe", "F"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [4.0, 0])
#Make sure this works with species.
struct = Structure(lattice, ["Fe2+", "O2-"], coords)
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [5.3, 0])
struct = Structure(lattice, ["Fe", "Mn"], coords,
site_properties={'magmom': (5.2, -4.5)})
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [-4.5, 5.2])
incar = self.mpstaticparamset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [-4.5, 5.2])
incar = self.mitparamset_unsorted.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [5.2, -4.5])
struct = Structure(lattice, [Specie("Fe", 2, {'spin': 4.1}), "Mn"],
coords)
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [5, 4.1])
incar = self.mpnscfparamsetl.get_incar(struct)
self.assertEqual(incar.get('MAGMOM', None), None)
struct = Structure(lattice, ["Mn3+", "Mn4+"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [4, 3])
incar = self.mpnscfparamsetu.get_incar(struct)
self.assertEqual(incar.get('MAGMOM', None), None)
self.assertEqual(self.userparamset.get_incar(struct)['MAGMOM'],
[100, 0.6])
#sulfide vs sulfate test
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.25, 0.5, 0])
struct = Structure(lattice, ["Fe", "Fe", "S"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [1.9, 0])
#Make sure Matproject sulfides are ok.
self.assertNotIn('LDAUU', self.paramset.get_incar(struct))
self.assertNotIn('LDAUU', self.mpstaticparamset.get_incar(struct))
struct = Structure(lattice, ["Fe", "S", "O"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [4.0, 0, 0])
#Make sure Matproject sulfates are ok.
self.assertEqual(self.paramset.get_incar(struct)['LDAUU'], [5.3, 0, 0])
self.assertEqual(self.mpnscfparamsetl.get_incar(struct)['LDAUU'],
[5.3, 0, 0])
self.assertEqual(self.userparamset.get_incar(struct)['MAGMOM'],
[10, -5, 0.6])
def test_optics(self):
self.mpopticsparamset = MPOpticsNonSCFVaspInputSet.from_previous_vasp_run(
'{}/static_silicon'.format(test_dir), output_dir='optics_test_dir',
nedos=1145)
self.assertTrue(os.path.exists('optics_test_dir/CHGCAR'))
incar = Incar.from_file('optics_test_dir/INCAR')
self.assertTrue(incar['LOPTICS'])
self.assertEqual(incar['NEDOS'], 1145)
#Remove the directory in which the inputs have been created
shutil.rmtree('optics_test_dir')
def test_get_kpoints(self):
kpoints = self.paramset.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
self.assertEqual(kpoints.style, 'Monkhorst')
kpoints = self.mitparamset.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
self.assertEqual(kpoints.style, 'Monkhorst')
kpoints = self.mpstaticparamset.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[6, 6, 4]])
self.assertEqual(kpoints.style, 'Monkhorst')
kpoints = self.mpnscfparamsetl.get_kpoints(self.struct)
self.assertEqual(kpoints.num_kpts, 140)
self.assertEqual(kpoints.style, 'Reciprocal')
kpoints = self.mpnscfparamsetu.get_kpoints(self.struct)
self.assertEqual(kpoints.num_kpts, 168)
kpoints = self.mpbshseparamsetl.get_kpoints(self.struct)
self.assertAlmostEqual(kpoints.num_kpts, 164)
self.assertAlmostEqual(kpoints.kpts[10][0], 0.0)
self.assertAlmostEqual(kpoints.kpts[10][1], 0.5)
self.assertAlmostEqual(kpoints.kpts[10][2], 0.16666667)
self.assertAlmostEqual(kpoints.kpts[26][0], 0.0714285714286)
self.assertAlmostEqual(kpoints.kpts[26][1], 0.0)
self.assertAlmostEqual(kpoints.kpts[26][2], 0.0)
self.assertAlmostEqual(kpoints.kpts[-1][0], 0.5)
self.assertAlmostEqual(kpoints.kpts[-1][1], 0.5)
self.assertAlmostEqual(kpoints.kpts[-1][2], 0.5)
kpoints = self.mpbshseparamsetu.get_kpoints(self.struct)
self.assertAlmostEqual(kpoints.num_kpts, 25)
self.assertAlmostEqual(kpoints.kpts[10][0], 0.0)
self.assertAlmostEqual(kpoints.kpts[10][1], 0.5)
self.assertAlmostEqual(kpoints.kpts[10][2], 0.16666667)
self.assertAlmostEqual(kpoints.kpts[-1][0], 0.5)
self.assertAlmostEqual(kpoints.kpts[-1][1], 0.5)
self.assertAlmostEqual(kpoints.kpts[-1][2], 0.0)
def test_get_all_vasp_input(self):
d = self.mitparamset.get_all_vasp_input(self.struct)
self.assertEqual(d["INCAR"]["ISMEAR"], -5)
self.struct.make_supercell(4)
d = self.mitparamset.get_all_vasp_input(self.struct)
self.assertEqual(d["INCAR"]["ISMEAR"], 0)
def test_to_from_dict(self):
self.mitparamset = MITVaspInputSet()
self.mithseparamset = MITHSEVaspInputSet()
self.paramset = MPVaspInputSet()
self.userparamset = MPVaspInputSet(
user_incar_settings={'MAGMOM': {"Fe": 10, "S": -5, "Mn3+": 100}}
)
d = self.mitparamset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LDAUU"]["O"]["Fe"], 4)
d = self.mitggaparam.as_dict()
v = dec.process_decoded(d)
self.assertNotIn("LDAUU", v.incar_settings)
d = self.mithseparamset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LHFCALC"], True)
d = self.mphseparamset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LHFCALC"], True)
d = self.paramset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LDAUU"]["O"]["Fe"], 5.3)
d = self.userparamset.as_dict()
v = dec.process_decoded(d)
#self.assertEqual(type(v), MPVaspInputSet)
self.assertEqual(v.incar_settings["MAGMOM"],
{"Fe": 10, "S": -5, "Mn3+": 100})
class MITMDVaspInputSetTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.mitmdparam = MITMDVaspInputSet(300, 1200, 10000)
def test_get_potcar_symbols(self):
syms = self.mitmdparam.get_potcar_symbols(self.struct)
self.assertEqual(syms, ['Fe', 'P', 'O'])
def test_get_incar(self):
incar = self.mitmdparam.get_incar(self.struct)
self.assertNotIn("LDAUU", incar)
self.assertAlmostEqual(incar['EDIFF'], 2.4e-5)
def test_get_kpoints(self):
kpoints = self.mitmdparam.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [(1, 1, 1)])
self.assertEqual(kpoints.style, 'Gamma')
def test_to_from_dict(self):
d = self.mitmdparam.as_dict()
v = dec.process_decoded(d)
self.assertEqual(type(v), MITMDVaspInputSet)
self.assertEqual(v.incar_settings["TEBEG"], 300)
class MITNEBVaspInputSetTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.vis = MITNEBVaspInputSet(nimages=10, hubbard_off=True)
def test_get_potcar_symbols(self):
syms = self.vis.get_potcar_symbols(self.struct)
self.assertEqual(syms, ['Fe', 'P', 'O'])
def test_get_incar(self):
incar = self.vis.get_incar(self.struct)
self.assertNotIn("LDAUU", incar)
self.assertAlmostEqual(incar['EDIFF'], 0.00005)
def test_get_kpoints(self):
kpoints = self.vis.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
self.assertEqual(kpoints.style, 'Monkhorst')
def test_to_from_dict(self):
d = self.vis.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["IMAGES"], 10)
def test_write_inputs(self):
c1 = [[0.5] * 3, [0.9] * 3]
c2 = [[0.5] * 3, [0.9, 0.1, 0.1]]
s1 = Structure(Lattice.cubic(5), ['Si', 'Si'], c1)
s2 = Structure(Lattice.cubic(5), ['Si', 'Si'], c2)
structs = []
for s in s1.interpolate(s2, 3, pbc=True):
structs.append(Structure.from_sites(s.sites,
to_unit_cell=True))
fc = self.vis._process_structures(structs)[2].frac_coords
self.assertTrue(np.allclose(fc, [[0.5]*3,[0.9, 1.033333, 1.0333333]]))
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import absolute_import
import collections
import time
from .cache import Cache, _deprecated
class _Link(object):
__slots__ = ('key', 'expire', 'next', 'prev')
def __init__(self, key=None, expire=None):
self.key = key
self.expire = expire
def __reduce__(self):
return _Link, (self.key, self.expire)
def unlink(self):
next = self.next
prev = self.prev
prev.next = next
next.prev = prev
class _Timer(object):
def __init__(self, timer):
self.__timer = timer
self.__nesting = 0
def __call__(self):
if self.__nesting == 0:
return self.__timer()
else:
return self.__time
def __enter__(self):
if self.__nesting == 0:
self.__time = time = self.__timer()
else:
time = self.__time
self.__nesting += 1
return time
def __exit__(self, *exc):
self.__nesting -= 1
def __reduce__(self):
return _Timer, (self.__timer,)
def __getattr__(self, name):
return getattr(self.__timer, name)
class TTLCache(Cache):
"""LRU Cache implementation with per-item time-to-live (TTL) value."""
def __init__(self, maxsize, ttl, timer=time.time, missing=_deprecated,
getsizeof=None):
Cache.__init__(self, maxsize, missing, getsizeof)
self.__root = root = _Link()
root.prev = root.next = root
self.__links = collections.OrderedDict()
self.__timer = _Timer(timer)
self.__ttl = ttl
def __contains__(self, key):
try:
link = self.__links[key] # no reordering
except KeyError:
return False
else:
return not (link.expire < self.__timer())
def __getitem__(self, key, cache_getitem=Cache.__getitem__):
try:
link = self.__getlink(key)
except KeyError:
expired = False
else:
expired = link.expire < self.__timer()
if expired:
return self.__missing__(key)
else:
return cache_getitem(self, key)
def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
with self.__timer as time:
self.expire(time)
cache_setitem(self, key, value)
try:
link = self.__getlink(key)
except KeyError:
self.__links[key] = link = _Link(key)
else:
link.unlink()
link.expire = time + self.__ttl
link.next = root = self.__root
link.prev = prev = root.prev
prev.next = root.prev = link
def __delitem__(self, key, cache_delitem=Cache.__delitem__):
cache_delitem(self, key)
link = self.__links.pop(key)
link.unlink()
if link.expire < self.__timer():
raise KeyError(key)
def __iter__(self):
root = self.__root
curr = root.next
while curr is not root:
# "freeze" time for iterator access
with self.__timer as time:
if not (curr.expire < time):
yield curr.key
curr = curr.next
def __len__(self):
root = self.__root
curr = root.next
time = self.__timer()
count = len(self.__links)
while curr is not root and curr.expire < time:
count -= 1
curr = curr.next
return count
def __setstate__(self, state):
self.__dict__.update(state)
root = self.__root
root.prev = root.next = root
for link in sorted(self.__links.values(), key=lambda obj: obj.expire):
link.next = root
link.prev = prev = root.prev
prev.next = root.prev = link
self.expire(self.__timer())
def __repr__(self, cache_repr=Cache.__repr__):
with self.__timer as time:
self.expire(time)
return cache_repr(self)
@property
def currsize(self):
with self.__timer as time:
self.expire(time)
return super(TTLCache, self).currsize
@property
def timer(self):
"""The timer function used by the cache."""
return self.__timer
@property
def ttl(self):
"""The time-to-live value of the cache's items."""
return self.__ttl
def expire(self, time=None):
"""Remove expired items from the cache."""
if time is None:
time = self.__timer()
root = self.__root
curr = root.next
links = self.__links
cache_delitem = Cache.__delitem__
while curr is not root and curr.expire < time:
cache_delitem(self, curr.key)
del links[curr.key]
next = curr.next
curr.unlink()
curr = next
def clear(self):
with self.__timer as time:
self.expire(time)
Cache.clear(self)
def get(self, *args, **kwargs):
with self.__timer:
return Cache.get(self, *args, **kwargs)
def pop(self, *args, **kwargs):
with self.__timer:
return Cache.pop(self, *args, **kwargs)
def setdefault(self, *args, **kwargs):
with self.__timer:
return Cache.setdefault(self, *args, **kwargs)
def popitem(self):
"""Remove and return the `(key, value)` pair least recently used that
has not already expired.
"""
with self.__timer as time:
self.expire(time)
try:
key = next(iter(self.__links))
except StopIteration:
raise KeyError('%s is empty' % self.__class__.__name__)
else:
return (key, self.pop(key))
if hasattr(collections.OrderedDict, 'move_to_end'):
def __getlink(self, key):
value = self.__links[key]
self.__links.move_to_end(key)
return value
else:
def __getlink(self, key):
value = self.__links.pop(key)
self.__links[key] = value
return value
|
|
#!/usr/bin/env python
"""
Subband Autocorrelation Classification (SAcC) Pitch Tracker feature
Based on Matlab code by Byung Suk Lee and Dan Ellis
Python port based on SRI Feature template.
2013-08-25 Dan Ellis [email protected]
"""
import os
import numpy as np
import scipy.signal
import scipy.io
import scipy.cluster.vq
# For SRI's wavreading code
import scipy.io.wavfile as wav
import mlp
import sbpca
################## from sbpca_viterbi.m
def viterbi(posteriors, hmm_vp = 0.9):
"""
% path = sbpca_viterbi(posteriors, hmm_vp)
% Find the best (viterbi) path through a set of pitch class
% posteriors, for the SAcC pitch tracker.
% <posteriors> is <nbins> x <nframes>
% <hmm_vp> is
% 2013-08-23 Dan Ellis [email protected] sbpca refactor cleanup
"""
# Equalizing variance in log-posterior domain per BSL implementation
sposts = np.exp(standardize(np.log(posteriors)))
# Set up data for decode
nbins, nframes = np.shape(sposts)
npch = nbins - 1 # number of actual pitches (i.e., all except unvoiced)
# Parameters
uvtrp = 0.9 # prob of going from unvoiced to voiced (9x larger
# than BSL's code, to compensate for normalization of txmat)
vutrp = 0.01 # prob of going from voiced to unvoiced
transfloor = np.exp(-10.0) # smallest transition probability
wdyn = 3.0 # laplacian half-width for transition probs
#hmm_vp = 0.9 # scaling of unvoiced state
# Transition matrix - row = from, column = to
# A matrix of how far apart two bins are
ijdiff = np.abs(np.tile(range(npch), (npch, 1)).transpose() - range(npch))
# pitch-to-pitch transitions are laplacian
# summed in log-domain, per BSL...
pptxmat = np.log(transfloor + np.exp(np.exp(-np.abs(ijdiff)/wdyn)))
# normalize rows of pitch-to-pitch transitions to be true probabilities
pptxmat /= pptxmat.sum(axis=1)[:, np.newaxis]
# transmat wraps unvoiced state around pitch-to-pitch
transmat = np.vstack( (np.r_[(1-uvtrp), uvtrp/npch*np.ones(npch)],
np.hstack((vutrp*np.ones( (npch, 1) ),
(1-vutrp)*pptxmat))))
# penalize unvoiced posterior & renormalize
sposts[0,] = hmm_vp * sposts[0,]
# renormalize columns
sposts /= sposts.sum(axis=0)
priors = np.ones(nbins)/nbins
return viterbi_path(sposts, priors, transmat)
#%%%%%%%%%%%%%%%%%%%%%%%
def standardize(array):
"""
N = standardize(array)
% Make each column of an array have a zero mean and unit sd
% was "normalise" by [email protected] (not to confuse with kpm's normalise)
"""
stddev = array.std(axis=0)
# normalize each column
return (array - array.mean(axis=0))/(stddev+(stddev==0))
################## from viterbi_path.m
def viterbi_path(posteriors, priors, transmat):
"""
% path = viterbi_path(posteriors, priors, transmat)
% Find best path through spectrogram-like posteriors (one
% column per time frame). Transmat is row from, column to.
% Linear probabilities (not log).
% Return sequence of state indices.
% 2013-08-23 Dan Ellis [email protected] sbpca refactor cleanup
"""
(nbins, nframes) = np.shape(posteriors)
# Array to hold traceback
prev = np.zeros( (nbins, nframes) , int)
# <pstate> holds normalized probability-to-date of landing in this
# state along best path
pstate = priors*posteriors[:, 0]
# normalize probs of best path to each state, to avoid underflow
pstate = pstate/np.sum(pstate)
use_log = True
#print "use_log=", use_log
# now calculate forward
if use_log:
# log domain
logtransmat = np.log(transmat.transpose())
pstate = np.log(pstate)
for i in range(1, nframes):
probs = (logtransmat
+ np.tile(np.log(posteriors[:, i]),(nbins, 1)).transpose()
+ np.tile(pstate, (nbins, 1)))
pstate = np.max(probs, axis=1)
prev[:, i] = np.argmax(probs, axis=1)
# Renormalize to keep probabilities in a sensible range
pstate = pstate - np.mean(pstate)
else:
# linear likelihood domain
for i in range(1, nframes):
# Find most likely combination of previous prob-to-path,
# and transition
probs = transmat.transpose() * np.outer(posteriors[:, i], pstate)
pstate = np.max(probs, axis=1)
prev[:, i] = np.argmax(probs, axis=1)
# Renormalize to keep probabilities in a sensible range
pstate = pstate/sum(pstate)
# traceback best precedent matrix to get best path
path = np.zeros(nframes, int)
# best final state
path[nframes-1] = np.argmax(pstate)
# .. and all its predecessors
for pth in range(nframes, 1, -1):
path[pth-2] = prev[path[pth-1], pth-1]
return path
#####################################
def dithering(data, noiselevel=1e-3):
"""
% y = dithering(x, noiselevel)
% Add low-level noise to x to avoid digital zeros
% noiselevel is scaling factor below SD of signal at which
% noise is added (default 1e-3).
"""
# Ensure consistent random sequence (in dither()
np.random.seed(0)
# Generate the dither sequence
xlen = len(data)
dither = np.random.rand(xlen) + np.random.rand(xlen) - 1
# add it on 120 dB below the signal
spow = np.std(data)
#print "dithering off"
#return x
#print "dithering at 1e-3"
#return data + 1e-6 * spow * dither
return data + noiselevel * spow * dither
# For SRI's wavreading code
import scipy.io.wavfile as wav
from scikits.audiolab import Sndfile
# For command line
import os
import sys
def readsph(filename):
""" read in audio data from a sphere file. Return d, sr """
f = Sndfile(filename, 'r')
data = f.read_frames(f.nframes, dtype=np.float32)
sr = f.samplerate
return data, sr
def readwav(filename):
""" read in audio data from a wav file. Return d, sr """
# Read in wav file
sr, wavd = wav.read(filename)
# normalize short ints to floats of -1 / 1
data = np.asfarray(wavd) / 32768.0
return data, sr
def audioread(filename, targetsr=None):
"""
Read a soundfile of either WAV or SPH, based on filename
returns d, sr
"""
fileName, fileExtension = os.path.splitext(filename)
if fileExtension == ".wav":
data, sr = readwav(filename)
elif fileExtension == ".sph":
data, sr = readsph(filename)
else:
raise NameError( ("Cannot determine type of infile " +
filename) )
# Maybe fix sample rate
#if srate == 16000 and self.sbpca.srate == 8000:
if targetsr != None and sr != targetsr:
# Right now, only downsample by integer numbers
decimfact = int(np.round(sr/targetsr))
data = scipy.signal.decimate(np.r_[data[1:], 0],
decimfact, ftype='fir')
# slight trim to ss.decimate to make its phase align
# to matlab's resample
# for case of resampling 16 kHz down to 8 kHz
delay = 7
data = np.r_[data[delay:], np.zeros(delay)]
sr = sr/decimfact
return data, sr
# Main class
class SAcC(object):
""" Compute Subband Autocorrelation Classification (SAcC) pitch track
"""
def __init__(self, config):
""" Initialize default values """
#self.config = config
# initialize the sbpca subsystem
self.sbpca = sbpca.SbPca(config)
# initialize the mlp subsytem
self.net = mlp.MLP(config['wgt_file'], config['norms_file'])
# parameters specific to SAcC part
self.ptchtab = np.r_[0, np.loadtxt(config['pcf_file'])]
self.hmm_vp = config['hmm_vp']
self.n_s = 10.0
self.start_utt = 0
self.write_rownum = False
self.write_time = False
self.write_sbac = False
self.write_sbpca = False
self.write_posteriors = False
self.write_pitch = True
self.write_pvx = True
self.dither_level = 1e-3
if 'n_s' in config:
self.n_s = config['n_s']
if 'start_utt' in config:
self.start_utt = config['start_utt']
if 'write_rownum' in config:
self.write_rownum = config['write_rownum']
if 'write_time' in config:
self.write_time = config['write_time']
if 'write_sbac' in config:
self.write_sbac = config['write_sbac']
if 'write_sbpca' in config:
self.write_sbpca = config['write_sbpca']
if 'write_posteriors' in config:
self.write_posteriors = config['write_posteriors']
if 'write_pitch' in config:
self.write_pitch = config['write_pitch']
if 'write_pvx' in config:
self.write_pvx = config['write_pvx']
# added 2014-04-10
if 'dither_level' in config:
self.dither_level = config['dither_level']
def __call__(self, filename):
""" This is called for each file """
# remove dependency on libsndfile. Only accept wav, use Sox
data, srate = audioread(filename, targetsr=self.sbpca.srate)
assert srate == self.sbpca.srate
# Actually run it
ftrs = self.sacc(data, srate)
# Return the features
return ftrs
def sacc(self, data, srate):
"""
Run the SAcC pitch tracker on the specified waveform/sampling rate
using the configuration specified on construction
Return two vectors, pitch (in Hz) and P(voicing) (posterior)
"""
# Pad out d with zeros so get right number of winsamps frames
# (and add unique dithering noise over whole signal)
xdat = dithering(np.r_[data, np.zeros(self.sbpca.maxlags)],
self.dither_level)
# Pre-allocate whole activations matrix
nframes = self.sbpca.nframes(len(data))
# acts = np.zeros( (len(self.net.obias), nframes) )
acts = np.zeros( (len(self.net.obias), 0) )
# (nChs, nDim, nLag) = np.shape(self.sbpca.mapping)
# if self.output_pcas:
# ftrs = np.zeros( (nChs, nDim, nframes) )
# elif self.output_autocos:
# ftrs = np.zeros( (nChs, nLag, nframes) )
# else:
# ftrs = np.zeros( (2, nframes) )
framesamps = self.sbpca.framesamps
# How many frames to process each time in loop
#blockframes = 100
blockframes = max(1, int(np.ceil(self.n_s * (srate/framesamps))))
blocksamps = blockframes * framesamps
nblocks = int(np.ceil(float(nframes) / float(blockframes)))
# How many frames do we try to prepad?
prepadframes = 10
isfirst = 1
donefr = 0
for block in range(nblocks):
# Figure next block of samples, including pre- and post-padding
actualprepadframes = min(prepadframes, block*blockframes)
blockbasesamp = block*blocksamps
blocklastsamp = min(len(xdat), blockbasesamp + blocksamps
+self.sbpca.padsamps)
xpts = xdat[(blockbasesamp - actualprepadframes*framesamps)
:blocklastsamp]
# Run the sbpca part
acs = self.sbpca.calc_autocos(xpts, srate, isfirst)
(nsb, nlg, nfr) = np.shape(acs) # 24, 200, 501
# Now we know how many frames this block...
nactfr = nfr - actualprepadframes
ftr = np.zeros( (nactfr, 0), float)
frixs = range(donefr, donefr+nactfr)
donefr += nactfr
if self.write_rownum:
#ftr = np.c_[ftr, np.array(frixs, ndmin=2).transpose()]
ftr = np.c_[ftr,
self.start_utt * np.ones( (nactfr, 1), float),
np.array(frixs, ndmin=2).transpose()]
if self.write_time:
ftr = np.c_[ftr, self.sbpca.ac_hop
* np.array(frixs, ndmin=2).transpose()]
#blockix = range(block*blockframes, block*blockframes+nactfr)
if self.write_sbac:
ftr = np.c_[ftr, np.reshape(acs[:, :, actualprepadframes:],
(nsb*nlg, nactfr)).transpose()]
# Restore uniform zero'th lag (used to store energy).
acs[:, 0, :] = 1.0
pcas = sbpca.pca(acs[:, :, actualprepadframes:],
self.sbpca.mapping)
(nsb, npc, nactfr) = np.shape(pcas)
#pcasr = np.reshape(pcas, (nsb*npc, nactfr)).transpose()
# Required order of each frame is pcdim slowest, subband fastest!
pcasr = pcas.transpose().reshape((nactfr, nsb*npc))
if self.write_sbpca:
ftr = np.c_[ftr, pcasr]
# Run the MLP classifier
act = self.net.apply(pcasr).transpose()
#acts[:,blockix] = act
acts = np.c_[acts, act]
if self.write_posteriors:
ftr = np.c_[ftr, act.T]
if isfirst:
isfirst = 0
ftrs = ftr
else:
ftrs = np.r_[ftrs, ftr]
if self.write_pitch:
# Run viterbi decode on all activations stitched together
pth = viterbi(acts, self.hmm_vp)
# Convert pitch bin indices to frequencies in Hz by table lookup
ftrs = np.c_[ftrs, self.ptchtab[pth]]
# first activation is Pr(unvoiced), so Pr(voiced) is its complement
if self.write_pvx:
ftrs = np.c_[ftrs, 1.0 - acts[0,]]
return ftrs
AUX_DIRECTORY = os.path.join(os.path.split(__file__)[0], 'aux')
def default_config():
""" Provide a set of default configuration parameters."""
# Setup config
config = {}
# sbpca params
# diff file for py
config['pca_file'] = os.path.join(
AUX_DIRECTORY, 'mapping-pca_sr8k_bpo6_sb24_k10.mat')
#config['kdim'] = 10 # inferred from mapping file
config['nchs'] = 24
config['n_s'] = 5.0 # secs per process block, controls blockframes
config['SBF_sr'] = 8000.0
config['SBF_fmin'] = 100.0
config['SBF_bpo'] = 6.0
config['SBF_q'] = 8.0 # not actually used for SlanPat ERB filters
config['SBF_order'] = 2 # not actually used for SlanPat ERB filters
config['SBF_ftype'] = 2 # ignored - python is always SlanPat ERB
config['twin'] = 0.025 # autoco window len
thop = 0.010
config['thop'] = thop # autoco hop
# mlp params
#config['wgt_file'] = os.path.join(
# AUX_DIRECTORY, 'rats_sr8k_bpo6_sb24_k10_aCH_h100.wgt')
#config['norms_file'] = os.path.join(
# AUX_DIRECTORY, 'tr_rats_sr8k_bpo6_sb24_k10.norms')
config['wgt_file'] = os.path.join(
AUX_DIRECTORY, 'sub_qtr_rats_keele_sr8k_bpo6_sb24_k10_ep5_h100.wgt')
config['norms_file'] = os.path.join(
AUX_DIRECTORY, 'tr_keele_rbf_pinknoise_sr8000_bpo6_nchs24_k10.norms')
#config['nhid'] = 100 # inferred from wgt file, + input size from norms file
#config['nmlp'] = 68 # output layer size, inferred from wgt file
config['pcf_file'] = os.path.join(
AUX_DIRECTORY, 'pitch_candidates_freqz.txt')
# viterbi decode params
config['hmm_vp'] = 0.9 # interpretation changed c/w Matlab
# output options
config['write_rownum'] = 0 # prepend row number
config['write_time'] = 1 # prepend time in seconds to output
config['write_sbac'] = 0 # output raw autocorrelations (big - 24 x 200)
config['write_sbpca'] = 0 # output subband pcas (24 x 10)
config['write_posteriors'] = 0 # output raw pitch posteriors (68)
config['write_pitch'] = 1 # output the actual pitch value in Hz (1)
config['write_pvx'] = 1 # output just 1-posterior(unvoiced) (1)
# Tricks with segmenting utterances not implemented in Python
config['start_utt'] = 0 # what utterance number to start at
#config['incr_utt'] = 0 # increment the utterance each seg (?)
#config['segs_per_utt'] = 1 # break each utterance into this many segs
config['verbose'] = 0
#config['disp'] = 0 # no display code in Python
# Output file format is the concern of the calling layer
#config['sph_out'] = 0
#config['mat_out'] = 0
#config['txt_out'] = 1
config['dither_level'] = 1e-3
return config
############## Provide a command-line wrapper
def main(argv):
""" Main routine to calculate SAcC from wav file """
if len(argv) != 3:
raise NameError( ("Usage: ", argv[0],
" inputsound.wav outputpitchtrack.txt") )
inwavfile = argv[1]
outptfile = argv[2]
# Setup config
config = default_config()
# Configure
sacc_extractor = SAcC(config)
# Apply
features = sacc_extractor(inwavfile)
# Write the data out
np.savetxt(outptfile, features, fmt='%.3f', delimiter=' ', newline='\n')
# Run the main function if called from the command line
if __name__ == "__main__":
import sys
main(sys.argv)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachinesOperations:
"""VirtualMachinesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.avs.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
private_cloud_name: str,
cluster_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachinesList"]:
"""List of virtual machines in a private cloud cluster.
List of virtual machines in a private cloud cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param private_cloud_name: Name of the private cloud.
:type private_cloud_name: str
:param cluster_name: Name of the cluster in the private cloud.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachinesList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.avs.models.VirtualMachinesList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachinesList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'privateCloudName': self._serialize.url("private_cloud_name", private_cloud_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualMachinesList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/clusters/{clusterName}/virtualMachines'} # type: ignore
async def get(
self,
resource_group_name: str,
private_cloud_name: str,
cluster_name: str,
virtual_machine_id: str,
**kwargs: Any
) -> "_models.VirtualMachine":
"""Get a virtual machine by id in a private cloud cluster.
Get a virtual machine by id in a private cloud cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param private_cloud_name: Name of the private cloud.
:type private_cloud_name: str
:param cluster_name: Name of the cluster in the private cloud.
:type cluster_name: str
:param virtual_machine_id: Virtual Machine identifier.
:type virtual_machine_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachine, or the result of cls(response)
:rtype: ~azure.mgmt.avs.models.VirtualMachine
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'privateCloudName': self._serialize.url("private_cloud_name", private_cloud_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'virtualMachineId': self._serialize.url("virtual_machine_id", virtual_machine_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/clusters/{clusterName}/virtualMachines/{virtualMachineId}'} # type: ignore
async def _restrict_movement_initial(
self,
resource_group_name: str,
private_cloud_name: str,
cluster_name: str,
virtual_machine_id: str,
restrict_movement: "_models.VirtualMachineRestrictMovement",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._restrict_movement_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'privateCloudName': self._serialize.url("private_cloud_name", private_cloud_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'virtualMachineId': self._serialize.url("virtual_machine_id", virtual_machine_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(restrict_movement, 'VirtualMachineRestrictMovement')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_restrict_movement_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/clusters/{clusterName}/virtualMachines/{virtualMachineId}/restrictMovement'} # type: ignore
async def begin_restrict_movement(
self,
resource_group_name: str,
private_cloud_name: str,
cluster_name: str,
virtual_machine_id: str,
restrict_movement: "_models.VirtualMachineRestrictMovement",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Enable or disable DRS-driven VM movement restriction.
Enable or disable DRS-driven VM movement restriction.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param private_cloud_name: Name of the private cloud.
:type private_cloud_name: str
:param cluster_name: Name of the cluster in the private cloud.
:type cluster_name: str
:param virtual_machine_id: Virtual Machine identifier.
:type virtual_machine_id: str
:param restrict_movement: Whether VM DRS-driven movement is restricted (Enabled) or not
(Disabled).
:type restrict_movement: ~azure.mgmt.avs.models.VirtualMachineRestrictMovement
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._restrict_movement_initial(
resource_group_name=resource_group_name,
private_cloud_name=private_cloud_name,
cluster_name=cluster_name,
virtual_machine_id=virtual_machine_id,
restrict_movement=restrict_movement,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'privateCloudName': self._serialize.url("private_cloud_name", private_cloud_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'virtualMachineId': self._serialize.url("virtual_machine_id", virtual_machine_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restrict_movement.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/clusters/{clusterName}/virtualMachines/{virtualMachineId}/restrictMovement'} # type: ignore
|
|
from __future__ import print_function
from collections import defaultdict
import datetime
import json
import re
from six import text_type
from stacktraces import process_model, thread_analyzer
from stacktraces.python import stacktrace
def get_process_from_traceback(traceback_lines, name=None):
p = process_model.Process(0)
ptb = stacktrace.PythonTraceback(
proc=p, lines=traceback_lines, name=name,
)
ptb.parse()
# thread_analyzer.cleanup(p, my_cleanups)
# thread_analyzer.annotate(p, my_annotations)
p.group() # only one thread, but this allows str(p) to work
return p
def describe_lines(traceback_lines):
return text_type(get_process_from_traceback(traceback_lines))
LOGLVL_RE = r'(CRITICAL|ERROR|WARNING|INFO|DEBUG)'
TRACE_MSG_RE_1 = re.compile(r'^\[([^]]+)\] ' + LOGLVL_RE + ' \[[^]]+\] (.*)\n?$')
TRACE_MSG_RE_2 = re.compile(r'^(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,?\d?\d?\d?).*' + LOGLVL_RE + ' +(.*)\n?$')
TRACE_MSG_RE_3 = re.compile(r'^(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d) .*\[' + LOGLVL_RE + '\] +(.*)\n?')
TRACE_MSG_RE_4 = re.compile(r'^.*\[([A-Z][a-z][a-z] [A-Z][a-z][a-z] \d\d \d\d:\d\d:\d\d \d\d\d\d)\] +(.*)\n?')
TRACE_MSG_RE_DEFAULT = re.compile(r'^\[[^]]+\] *(.*)\n?$')
TRACE_MSG_RES = [
(TRACE_MSG_RE_1, 1, 3,),
(TRACE_MSG_RE_2, 1, 3,),
(TRACE_MSG_RE_3, 1, 3,),
(TRACE_MSG_RE_4, 1, 2),
(TRACE_MSG_RE_DEFAULT, None, 1)
]
TIMESTAMP_RE_1 = re.compile(r'^(\d\d\d\d)-(\d\d)-(\d\d) (\d\d):(\d\d):(\d\d)(,\d\d\d)?$')
TIMESTAMP_RE_2 = re.compile(r'^(\d\d)/([A-Z][a-z][a-z])/(\d\d\d\d) (\d\d):(\d\d):(\d\d)$')
TIMESTAMP_RE_3 = re.compile(r'^[A-Z][a-z][a-z] ([A-Z][a-z][a-z]) (\d\d) (\d\d):(\d\d):(\d\d) (\d\d\d\d)$')
MONTH_NAME_TO_NUMBER = {
'Jan': 1,
'Feb': 2,
'Mar': 3,
'Apr': 4,
'May': 5,
'Jun': 6,
'Jul': 7,
'Aug': 8,
'Sep': 9,
'Oct': 10,
'Nov': 11,
'Dec': 12,
}
def _get_timestamp_1(m):
dt = datetime.datetime(
int(m.group(1)),
int(m.group(2)),
int(m.group(3)),
int(m.group(4)),
int(m.group(5)),
int(m.group(6)),
)
if m.group(7):
dt = dt.replace(microsecond=1000 * int(m.group(7)[1:]))
return dt
def _get_timestamp_2(m):
month = MONTH_NAME_TO_NUMBER[m.group(2)]
return datetime.datetime(
int(m.group(3)),
month,
int(m.group(1)),
int(m.group(4)),
int(m.group(5)),
int(m.group(6)),
)
def _get_timestamp_3(m):
month = MONTH_NAME_TO_NUMBER[m.group(1)]
return datetime.datetime(
int(m.group(6)),
month,
int(m.group(2)),
int(m.group(3)),
int(m.group(4)),
int(m.group(5)),
)
TIMESTAMP_REGEXES = [
(TIMESTAMP_RE_1, _get_timestamp_1),
(TIMESTAMP_RE_2, _get_timestamp_2),
(TIMESTAMP_RE_3, _get_timestamp_3),
]
def parse_trace_msg(msg, pytz_timezone=None):
for regex, timestamp_index, msg_index in TRACE_MSG_RES:
m = regex.match(msg)
if m:
d = None
if timestamp_index is not None:
timestamp = m.group(timestamp_index)
for time_regex, extract in TIMESTAMP_REGEXES:
n = time_regex.match(timestamp)
if n:
d = extract(n)
if pytz_timezone:
d = pytz_timezone.localize(d)
break
else:
timestamp = None
if msg_index is not None:
msg = m.group(msg_index)
else:
msg = None
return msg, timestamp, d
return None, None, None
def handle_traceback(traceback_lines, msg, tracelvl, cleanups, annotations, pytz_timezone):
# We just have a traceback from an individual thread, so skip:
# . ProcessGroup representation
# . Process.group(), which finds threads in a process with same backtrace
if msg:
_, timestamp, timestamp_dt = parse_trace_msg(msg.line, pytz_timezone)
timestamp_args = {
'timestamp': timestamp,
}
if timestamp_dt:
timestamp_args['isotimestamp'] = timestamp_dt.isoformat()
else:
timestamp_args = {}
# Ignore error message in the related log message for now; it seems to be
# always duplicated within the traceback output
p = process_model.Process(0)
ptb = stacktrace.PythonTraceback(
proc=p, lines=traceback_lines, **timestamp_args
)
ptb.parse()
thread_analyzer.cleanup(p, cleanups)
thread_analyzer.annotate(p, annotations)
p.group() # only one thread, but this allows str(p) to work
if tracelvl > 1:
print('-------------')
print(traceback_lines)
return p, traceback_lines
class Line(object):
def __init__(self, line):
self.line = line
self.is_start_of_traceback = line.startswith('Traceback ')
self.is_log_msg = False
if not self.is_start_of_traceback:
msg, timestamp, dt = parse_trace_msg(line)
if timestamp or msg:
self.is_log_msg = True
def __str__(self):
return '%s%s%s' % (
'TB ' if self.is_start_of_traceback else '',
'LG ' if self.is_log_msg else '',
self.line,
)
class ParseState(object):
def __init__(self):
self.in_traceback = False
self.traceback_lines = []
self.traceback_log_msg = None
def __str__(self):
fields = []
if self.in_traceback:
fields.append('IN-TB')
fields.append('%s..' % self.traceback_lines[0])
if self.traceback_log_msg:
fields.append(self.traceback_log_msg.line)
return ' '.join(fields)
def read_log(tracelvl, logfile, cleanups=(), annotations=(), pytz_timezone=None):
prev_log_msg = None
s = ParseState()
while True:
line = logfile.readline()
if line == '':
break
line = Line(line)
if line.is_start_of_traceback:
if s.in_traceback:
yield handle_traceback(
s.traceback_lines, s.traceback_log_msg, tracelvl, cleanups,
annotations, pytz_timezone
)
s = ParseState()
s.in_traceback = True
s.traceback_log_msg = prev_log_msg
elif line.is_log_msg and s.traceback_lines:
yield handle_traceback(
s.traceback_lines, s.traceback_log_msg, tracelvl, cleanups,
annotations, pytz_timezone
)
s = ParseState()
if s.in_traceback and not (line.line.startswith('[') or line.line in ('', '\n')):
s.traceback_lines.append(line.line)
if line.is_log_msg:
prev_log_msg = line
if s.in_traceback:
yield handle_traceback(
s.traceback_lines, s.traceback_log_msg, tracelvl, cleanups,
annotations, pytz_timezone
)
# s = ParseState()
def _output_process(
output_format,
include_duplicates, include_raw, messages, stacktraces, need_delim,
process, traceback_lines, outfile
):
thread = process.threads[0]
st = ', '.join([f.fn for f in thread.frames])
if not include_duplicates:
if thread.failure_text:
messages[thread.failure_text] += 1
if thread.error_msg:
messages[thread.error_msg] += 1
stacktraces[st] += 1
if stacktraces[st] > 1:
return need_delim
if output_format == 'text':
if thread.error_msg:
print(thread.error_msg, file=outfile)
if thread.failure_text:
print(thread.failure_text, file=outfile)
print(st, file=outfile)
if include_raw:
print(''.join(traceback_lines), file=outfile)
print(file=outfile)
else:
if need_delim:
print(',', file=outfile)
if include_raw:
to_serialize = {
'wrapped': process.description(wrapped=True),
'raw': ''.join(traceback_lines)
}
else:
to_serialize = process.description(wrapped=True)
print(json.dumps(to_serialize), file=outfile)
need_delim = True
return need_delim
def process_log_file(
log_file, outfile,
output_format='text', include_duplicates=False, include_raw=False,
pytz_timezone=None,
):
need_delim = False
if output_format == 'json':
print('[', file=outfile)
message_counts = defaultdict(int)
stacktrace_counts = defaultdict(int)
for p, traceback_lines in read_log(tracelvl=1, logfile=log_file, pytz_timezone=pytz_timezone):
need_delim = _output_process(
output_format, include_duplicates, include_raw, message_counts,
stacktrace_counts, need_delim, p, traceback_lines, outfile
)
if output_format == 'json':
print(']', file=outfile)
return message_counts, stacktrace_counts
|
|
from __future__ import absolute_import, print_function, division
import operator
from collections import OrderedDict
from petl.compat import next, string_types, text_type
from petl.errors import ArgumentError
from petl.util.base import Table, expr, rowgroupby, Record
from petl.transform.sorts import sort
def fieldmap(table, mappings=None, failonerror=False, errorvalue=None):
"""
Transform a table, mapping fields arbitrarily between input and output.
E.g.::
>>> import petl as etl
>>> from collections import OrderedDict
>>> table1 = [['id', 'sex', 'age', 'height', 'weight'],
... [1, 'male', 16, 1.45, 62.0],
... [2, 'female', 19, 1.34, 55.4],
... [3, 'female', 17, 1.78, 74.4],
... [4, 'male', 21, 1.33, 45.2],
... [5, '-', 25, 1.65, 51.9]]
>>> mappings = OrderedDict()
>>> # rename a field
... mappings['subject_id'] = 'id'
>>> # translate a field
... mappings['gender'] = 'sex', {'male': 'M', 'female': 'F'}
>>> # apply a calculation to a field
... mappings['age_months'] = 'age', lambda v: v * 12
>>> # apply a calculation to a combination of fields
... mappings['bmi'] = lambda rec: rec['weight'] / rec['height']**2
>>> # transform and inspect the output
... table2 = etl.fieldmap(table1, mappings)
>>> table2
+------------+--------+------------+--------------------+
| subject_id | gender | age_months | bmi |
+============+========+============+====================+
| 1 | 'M' | 192 | 29.48870392390012 |
+------------+--------+------------+--------------------+
| 2 | 'F' | 228 | 30.8531967030519 |
+------------+--------+------------+--------------------+
| 3 | 'F' | 204 | 23.481883600555488 |
+------------+--------+------------+--------------------+
| 4 | 'M' | 252 | 25.55260331279326 |
+------------+--------+------------+--------------------+
| 5 | '-' | 300 | 19.0633608815427 |
+------------+--------+------------+--------------------+
Note also that the mapping value can be an expression string, which will be
converted to a lambda function via :func:`petl.util.base.expr`.
"""
return FieldMapView(table, mappings=mappings, failonerror=failonerror,
errorvalue=errorvalue)
Table.fieldmap = fieldmap
class FieldMapView(Table):
def __init__(self, source, mappings=None, failonerror=False,
errorvalue=None):
self.source = source
if mappings is None:
self.mappings = OrderedDict()
else:
self.mappings = mappings
self.failonerror = failonerror
self.errorvalue = errorvalue
def __setitem__(self, key, value):
self.mappings[key] = value
def __iter__(self):
return iterfieldmap(self.source, self.mappings, self.failonerror,
self.errorvalue)
def iterfieldmap(source, mappings, failonerror, errorvalue):
it = iter(source)
hdr = next(it)
flds = list(map(text_type, hdr))
outhdr = mappings.keys()
yield tuple(outhdr)
mapfuns = dict()
for outfld, m in mappings.items():
if m in hdr:
mapfuns[outfld] = operator.itemgetter(m)
elif isinstance(m, int) and m < len(hdr):
mapfuns[outfld] = operator.itemgetter(m)
elif isinstance(m, string_types):
mapfuns[outfld] = expr(m)
elif callable(m):
mapfuns[outfld] = m
elif isinstance(m, (tuple, list)) and len(m) == 2:
srcfld = m[0]
fm = m[1]
if callable(fm):
mapfuns[outfld] = composefun(fm, srcfld)
elif isinstance(fm, dict):
mapfuns[outfld] = composedict(fm, srcfld)
else:
raise ArgumentError('expected callable or dict')
else:
raise ArgumentError('invalid mapping %r: %r' % (outfld, m))
# wrap rows as records
it = (Record(row, flds) for row in it)
for row in it:
outrow = list()
for outfld in outhdr:
try:
val = mapfuns[outfld](row)
except Exception as e:
if failonerror:
raise e
else:
val = errorvalue
outrow.append(val)
yield tuple(outrow)
def composefun(f, srcfld):
def g(rec):
return f(rec[srcfld])
return g
def composedict(d, srcfld):
def g(rec):
k = rec[srcfld]
if k in d:
return d[k]
else:
return k
return g
def rowmap(table, rowmapper, header, failonerror=False):
"""
Transform rows via an arbitrary function. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'sex', 'age', 'height', 'weight'],
... [1, 'male', 16, 1.45, 62.0],
... [2, 'female', 19, 1.34, 55.4],
... [3, 'female', 17, 1.78, 74.4],
... [4, 'male', 21, 1.33, 45.2],
... [5, '-', 25, 1.65, 51.9]]
>>> def rowmapper(row):
... transmf = {'male': 'M', 'female': 'F'}
... return [row[0],
... transmf[row['sex']] if row['sex'] in transmf else None,
... row.age * 12,
... row.height / row.weight ** 2]
...
>>> table2 = etl.rowmap(table1, rowmapper,
... header=['subject_id', 'gender', 'age_months',
... 'bmi'])
>>> table2
+------------+--------+------------+-----------------------+
| subject_id | gender | age_months | bmi |
+============+========+============+=======================+
| 1 | 'M' | 192 | 0.0003772112382934443 |
+------------+--------+------------+-----------------------+
| 2 | 'F' | 228 | 0.0004366015456998006 |
+------------+--------+------------+-----------------------+
| 3 | 'F' | 204 | 0.0003215689675106949 |
+------------+--------+------------+-----------------------+
| 4 | 'M' | 252 | 0.0006509906805544679 |
+------------+--------+------------+-----------------------+
| 5 | None | 300 | 0.0006125608384287258 |
+------------+--------+------------+-----------------------+
The `rowmapper` function should accept a single row and return a single
row (list or tuple).
"""
return RowMapView(table, rowmapper, header, failonerror=failonerror)
Table.rowmap = rowmap
class RowMapView(Table):
def __init__(self, source, rowmapper, header, failonerror=False):
self.source = source
self.rowmapper = rowmapper
self.header = header
self.failonerror = failonerror
def __iter__(self):
return iterrowmap(self.source, self.rowmapper, self.header,
self.failonerror)
def iterrowmap(source, rowmapper, header, failonerror):
it = iter(source)
hdr = next(it)
flds = list(map(text_type, hdr))
yield tuple(header)
it = (Record(row, flds) for row in it)
for row in it:
try:
outrow = rowmapper(row)
yield tuple(outrow)
except Exception as e:
if failonerror:
raise e
def rowmapmany(table, rowgenerator, header, failonerror=False):
"""
Map each input row to any number of output rows via an arbitrary
function. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'sex', 'age', 'height', 'weight'],
... [1, 'male', 16, 1.45, 62.0],
... [2, 'female', 19, 1.34, 55.4],
... [3, '-', 17, 1.78, 74.4],
... [4, 'male', 21, 1.33]]
>>> def rowgenerator(row):
... transmf = {'male': 'M', 'female': 'F'}
... yield [row[0], 'gender',
... transmf[row['sex']] if row['sex'] in transmf else None]
... yield [row[0], 'age_months', row.age * 12]
... yield [row[0], 'bmi', row.height / row.weight ** 2]
...
>>> table2 = etl.rowmapmany(table1, rowgenerator,
... header=['subject_id', 'variable', 'value'])
>>> table2.lookall()
+------------+--------------+-----------------------+
| subject_id | variable | value |
+============+==============+=======================+
| 1 | 'gender' | 'M' |
+------------+--------------+-----------------------+
| 1 | 'age_months' | 192 |
+------------+--------------+-----------------------+
| 1 | 'bmi' | 0.0003772112382934443 |
+------------+--------------+-----------------------+
| 2 | 'gender' | 'F' |
+------------+--------------+-----------------------+
| 2 | 'age_months' | 228 |
+------------+--------------+-----------------------+
| 2 | 'bmi' | 0.0004366015456998006 |
+------------+--------------+-----------------------+
| 3 | 'gender' | None |
+------------+--------------+-----------------------+
| 3 | 'age_months' | 204 |
+------------+--------------+-----------------------+
| 3 | 'bmi' | 0.0003215689675106949 |
+------------+--------------+-----------------------+
| 4 | 'gender' | 'M' |
+------------+--------------+-----------------------+
| 4 | 'age_months' | 252 |
+------------+--------------+-----------------------+
The `rowgenerator` function should accept a single row and yield zero or
more rows (lists or tuples).
See also the :func:`petl.transform.reshape.melt` function.
"""
return RowMapManyView(table, rowgenerator, header, failonerror=failonerror)
Table.rowmapmany = rowmapmany
class RowMapManyView(Table):
def __init__(self, source, rowgenerator, header, failonerror=False):
self.source = source
self.rowgenerator = rowgenerator
self.header = header
self.failonerror = failonerror
def __iter__(self):
return iterrowmapmany(self.source, self.rowgenerator, self.header,
self.failonerror)
def iterrowmapmany(source, rowgenerator, header, failonerror):
it = iter(source)
hdr = next(it)
flds = list(map(text_type, hdr))
yield tuple(header)
it = (Record(row, flds) for row in it)
for row in it:
try:
for outrow in rowgenerator(row):
yield tuple(outrow)
except Exception as e:
if failonerror:
raise e
else:
pass
def rowgroupmap(table, key, mapper, header=None, presorted=False,
buffersize=None, tempdir=None, cache=True):
"""
Group rows under the given key then apply `mapper` to yield zero or more
output rows for each input group of rows.
"""
return RowGroupMapView(table, key, mapper, header=header,
presorted=presorted,
buffersize=buffersize, tempdir=tempdir, cache=cache)
Table.rowgroupmap = rowgroupmap
class RowGroupMapView(Table):
def __init__(self, source, key, mapper, header=None,
presorted=False, buffersize=None, tempdir=None, cache=True):
if presorted:
self.source = source
else:
self.source = sort(source, key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
self.header = header
self.mapper = mapper
def __iter__(self):
return iterrowgroupmap(self.source, self.key, self.mapper, self.header)
def iterrowgroupmap(source, key, mapper, header):
yield tuple(header)
for key, rows in rowgroupby(source, key):
for row in mapper(key, rows):
yield row
|
|
import os
import pickle
import signal
import time
from gppylib.mainUtils import *
from gppylib.utils import checkNotNone, appendNewEntriesToHbaFile
from gppylib.db import dbconn
from gppylib import gparray, gplog
from gppylib.gplog import *
from gppylib.commands import unix
from gppylib.commands import gp
from gppylib.commands import base
from gppylib.gparray import GpArray
from gppylib import gphostcache
from gppylib.testold.testUtils import *
from gppylib.operations import startSegments, Operation
from gppylib.gp_era import read_era
from gppylib.operations.utils import ParallelOperation, RemoteOperation
from gppylib.operations.unix import CleanSharedMem
from gppylib.operations.filespace import PG_SYSTEM_FILESPACE, GP_TRANSACTION_FILES_FILESPACE, GP_TEMPORARY_FILES_FILESPACE, GetMoveOperationList, GetFilespaceEntriesDict, GetFilespaceEntries, GetCurrentFilespaceEntries, RollBackFilespaceChanges, UpdateFlatFiles, FileType, MoveFilespaceError
from gppylib.commands.gp import is_pid_postmaster, get_pid_from_remotehost
from gppylib.commands.unix import check_pid_on_remotehost
logger = get_default_logger()
gDatabaseDirectories = [
# this list and the gDatabaseSubDirectories occur combined inside initdb.c
"global",
"pg_log",
"pg_xlog",
"pg_clog",
"pg_changetracking",
"pg_subtrans",
"pg_twophase",
"pg_multixact",
"pg_distributedxidmap",
"pg_distributedlog",
"pg_utilitymodedtmredo",
"base",
"pg_tblspc",
"pg_stat_tmp"
]
gDatabaseSubDirectories = [
"pg_xlog/archive_status",
"pg_multixact/members",
"pg_multixact/offsets",
"base/1"
]
#
# Database files that may exist in the root directory and need deleting
#
gDatabaseFiles = [
"PG_VERSION",
"pg_hba.conf",
"pg_ident.conf",
"postgresql.conf",
"postmaster.log",
"postmaster.opts",
"postmaster.pid",
"gp_dbid"
]
def MPP_12038_fault_injection():
"""This function will check for the environment variable
GP_MPP_12038 and if it is set will sleep for 2 * gp_fts_probe_interval.
This is used in this module to check interaction with the FTS prober and
should only be used for testing. Note this delay is long enough for a
small test installation but would likely not be long enough for a large
cluster."""
if os.getenv("GP_MPP_12038_INJECT_DELAY", None):
faultProber = faultProberInterface.getFaultProber()
probe_interval_secs = faultProber.getFaultProberInterval()
logger.info("Sleeping for %d seconds for MPP-12038 test..." % (probe_interval_secs * 2))
time.sleep(probe_interval_secs * 2)
#
# note: it's a little quirky that caller must set up failed/failover so that failover is in gparray but
# failed is not (if both set)...change that, or at least protect against problems
#
class GpMirrorToBuild:
def __init__(self, failedSegment, liveSegment, failoverSegment, forceFullSynchronization):
checkNotNone("liveSegment", liveSegment)
checkNotNone("forceFullSynchronization", forceFullSynchronization)
if failedSegment is None and failoverSegment is None:
raise Exception( "No mirror passed to GpMirrorToBuild")
if not liveSegment.isSegmentQE():
raise ExceptionNoStackTraceNeeded("Segment to recover from for content %s is not a correct segment " \
"(it is a master or standby master)" % liveSegment.getSegmentContentId())
if not liveSegment.isSegmentPrimary(True):
raise ExceptionNoStackTraceNeeded("Segment to recover from for content %s is not a primary" % liveSegment.getSegmentContentId())
if not liveSegment.isSegmentUp():
raise ExceptionNoStackTraceNeeded("Primary segment is not up for content %s" % liveSegment.getSegmentContentId())
if failedSegment is not None:
if failedSegment.getSegmentContentId() != liveSegment.getSegmentContentId():
raise ExceptionNoStackTraceNeeded("The primary is not of the same content as the failed mirror. Primary content %d, " \
"mirror content %d" % (liveSegment.getSegmentContentId(), failedSegment.getSegmentContentId()))
if failedSegment.getSegmentDbId() == liveSegment.getSegmentDbId():
raise ExceptionNoStackTraceNeeded("For content %d, the dbid values are the same. " \
"A segment may not be recovered from itself" % liveSegment.getSegmentDbId())
if failoverSegment is not None:
if failoverSegment.getSegmentContentId() != liveSegment.getSegmentContentId():
raise ExceptionNoStackTraceNeeded("The primary is not of the same content as the mirror. Primary content %d, " \
"mirror content %d" % (liveSegment.getSegmentContentId(), failoverSegment.getSegmentContentId()))
if failoverSegment.getSegmentDbId() == liveSegment.getSegmentDbId():
raise ExceptionNoStackTraceNeeded("For content %d, the dbid values are the same. " \
"A segment may not be built from itself" % liveSegment.getSegmentDbId())
if failedSegment is not None and failoverSegment is not None:
# for now, we require the code to have produced this -- even when moving the segment to another
# location, we preserve the directory
assert failedSegment.getSegmentDbId() == failoverSegment.getSegmentDbId()
self.__failedSegment = failedSegment
self.__liveSegment = liveSegment
self.__failoverSegment = failoverSegment
"""
__forceFullSynchronization is true if full resynchronization should be FORCED -- that is, the
existing segment will be cleared and all objects will be transferred by the file resynchronization
process on the server
"""
self.__forceFullSynchronization = forceFullSynchronization
def getFailedSegment(self):
"""
returns the segment that failed. This can be None, for example when adding mirrors
"""
return self.__failedSegment
def getLiveSegment(self):
"""
returns the primary segment from which the recovery will take place. Will always be non-None
"""
return self.__liveSegment
def getFailoverSegment(self):
"""
returns the target segment to which we will copy the data, or None
if we will recover in place. Note that __failoverSegment should refer to the same dbid
as __failedSegment, but should have updated path + file information.
"""
return self.__failoverSegment
def isFullSynchronization(self):
"""
Returns whether or not this segment to recover needs to recover using full resynchronization
"""
if self.__forceFullSynchronization:
return True
# if we are failing over to a new segment location then we must fully resync
if self.__failoverSegment is not None:
return True
return False
class GpMirrorListToBuild:
def __init__(self, toBuild, pool, quiet, parallelDegree, additionalWarnings=None):
self.__mirrorsToBuild = toBuild
self.__pool = pool
self.__quiet = quiet
self.__parallelDegree = parallelDegree
self.__additionalWarnings = additionalWarnings or []
def getMirrorsToBuild(self):
"""
Returns a newly allocated list
"""
return [m for m in self.__mirrorsToBuild]
def getAdditionalWarnings(self):
"""
Returns any additional warnings generated during building of list
"""
return self.__additionalWarnings
def __moveFilespaces(self, gparray, target_segment):
"""
Moves filespaces for temporary and transaction files to a particular location.
"""
master_seg = gparray.master
default_filespace_dir = master_seg.getSegmentDataDirectory()
cur_filespace_entries = GetFilespaceEntriesDict(GetFilespaceEntries(gparray,
PG_SYSTEM_FILESPACE).run()).run()
pg_system_filespace_entries = GetFilespaceEntriesDict(GetFilespaceEntries(gparray,
PG_SYSTEM_FILESPACE).run()).run()
cur_filespace_name = gparray.getFileSpaceName(int(cur_filespace_entries[1][0]))
segments = [target_segment] + [seg for seg in gparray.getDbList() if seg.getSegmentContentId() == target_segment.getSegmentContentId() and seg.getSegmentDbId() != target_segment.getSegmentDbId()]
logger.info('Starting file move procedure for %s' % target_segment)
if os.path.exists(os.path.join(default_filespace_dir, GP_TRANSACTION_FILES_FILESPACE)):
#On the expansion segments, the current filespace used by existing nodes will be the
#new filespace to which we want to move the transaction and temp files.
#The filespace directories which have to be moved will be the default pg_system directories.
new_filespace_entries = GetFilespaceEntriesDict(GetCurrentFilespaceEntries(gparray,
FileType.TRANSACTION_FILES).run()).run()
logger.info('getting filespace information')
new_filespace_name = gparray.getFileSpaceName(int(new_filespace_entries[1][0]))
logger.info('getting move operations list for filespace %s' % new_filespace_name)
operation_list = GetMoveOperationList(segments,
FileType.TRANSACTION_FILES,
new_filespace_name,
new_filespace_entries,
cur_filespace_entries,
pg_system_filespace_entries).run()
logger.info('Starting transaction files move')
ParallelOperation(operation_list).run()
logger.debug('Checking transaction files move')
try:
for operation in operation_list:
operation.get_ret()
pass
except Exception, e:
logger.info('Failed to move transaction filespace. Rolling back changes ...')
RollBackFilespaceChanges(gparray.getExpansionSegDbList(),
FileType.TRANSACTION_FILES,
cur_filespace_name,
cur_filespace_entries,
new_filespace_entries,
pg_system_filespace_entries).run()
raise
if os.path.exists(os.path.join(default_filespace_dir, GP_TEMPORARY_FILES_FILESPACE)):
new_filespace_entries = GetFilespaceEntriesDict(GetCurrentFilespaceEntries(gparray,
FileType.TEMPORARY_FILES).run()).run()
new_filespace_name = gparray.getFileSpaceName(int(new_filespace_entries[1][0]))
operation_list = GetMoveOperationList(segments,
FileType.TEMPORARY_FILES,
new_filespace_name,
new_filespace_entries,
cur_filespace_entries,
pg_system_filespace_entries).run()
logger.info('Starting temporary files move')
ParallelOperation(operation_list).run()
logger.debug('Checking temporary files move')
try:
for operation in operation_list:
operation.get_ret()
pass
except Exception, e:
logger.info('Failed to move temporary filespace. Rolling back changes ...')
RollBackFilespaceChanges(gparray.getExpansionDbList(),
FileType.TRANSACTION_FILES,
cur_filespace_name,
cur_filespace_entries,
new_filespace_entries,
pg_system_filespace_entries).run()
raise
def buildMirrors(self, actionName, gpEnv, gpArray):
"""
Build the mirrors.
gpArray must have already been altered to have updated directories -- that is, the failoverSegments
from the mirrorsToBuild must be present in gpArray.
"""
testOutput("building %s segment(s)" % len(self.__mirrorsToBuild))
if len(self.__mirrorsToBuild) == 0:
logger.info("No segments to " + actionName)
return
self.checkForPortAndDirectoryConflicts(gpArray)
logger.info("%s segment(s) to %s" % (len(self.__mirrorsToBuild), actionName))
self.__verifyGpArrayContents(gpArray)
# make sure the target directories are up-to-date
# by cleaning them, if needed, and then copying a basic directory there
# the postgresql.conf in that basic directory will need updating (to change the port)
toStopDirectives = []
toEnsureMarkedDown = []
cleanupDirectives = []
copyDirectives = []
for toRecover in self.__mirrorsToBuild:
if toRecover.getFailedSegment() is not None:
# will stop the failed segment. Note that we do this even if we are recovering to a different location!
toStopDirectives.append(GpStopSegmentDirectoryDirective(toRecover.getFailedSegment()))
if toRecover.getFailedSegment().getSegmentStatus() == gparray.STATUS_UP:
toEnsureMarkedDown.append(toRecover.getFailedSegment())
if toRecover.isFullSynchronization():
isTargetReusedLocation = False
if toRecover.getFailedSegment() is not None and \
toRecover.getFailoverSegment() is None:
#
# We are recovering a failed segment in-place
#
cleanupDirectives.append(GpCleanupSegmentDirectoryDirective(toRecover.getFailedSegment()))
isTargetReusedLocation = True
if toRecover.getFailoverSegment() is not None:
targetSegment = toRecover.getFailoverSegment()
else: targetSegment = toRecover.getFailedSegment()
d = GpCopySegmentDirectoryDirective(toRecover.getLiveSegment(), targetSegment, isTargetReusedLocation)
copyDirectives.append(d)
self.__ensureStopped(gpEnv, toStopDirectives)
self.__ensureSharedMemCleaned(gpEnv, toStopDirectives)
self.__ensureMarkedDown(gpEnv, toEnsureMarkedDown)
self.__cleanUpSegmentDirectories(cleanupDirectives)
self.__copySegmentDirectories(gpEnv, gpArray, copyDirectives)
#Move the filespace for transaction and temporary files
for toRecover in self.__mirrorsToBuild:
target_segment = None
if toRecover.getFailoverSegment() is not None:
target_segment = toRecover.getFailoverSegment()
elif toRecover.isFullSynchronization():
target_segment = toRecover.getFailedSegment()
if target_segment is not None:
self.__moveFilespaces(gpArray, target_segment)
#If we are adding mirrors, we need to update the flat files on the primaries as well
if actionName == "add":
try:
UpdateFlatFiles(gpArray, primaries=True).run()
except MoveFilespaceError, e:
logger.error(str(e))
raise
else:
try:
print 'updating flat files'
UpdateFlatFiles(gpArray, primaries=False).run()
except MoveFilespaceError, e:
logger.error(str(e))
raise
# update and save metadata in memory
for toRecover in self.__mirrorsToBuild:
if toRecover.getFailoverSegment() is None:
# we are recovering the lost segment in place
seg = toRecover.getFailedSegment()
else:
seg = toRecover.getFailedSegment()
# no need to update the failed segment's information -- it is
# being overwritten in the configuration with the failover segment
for gpArraySegment in gpArray.getDbList():
if gpArraySegment is seg:
raise Exception("failed segment should not be in the new configuration if failing over to new segment")
seg = toRecover.getFailoverSegment()
seg.setSegmentStatus(gparray.STATUS_DOWN) # down initially, we haven't started it yet
seg.setSegmentMode(gparray.MODE_RESYNCHRONIZATION)
# figure out what needs to be started or transitioned
mirrorsToStart = []
primariesToConvert = []
convertPrimaryUsingFullResync = []
fullResyncMirrorDbIds = {}
for toRecover in self.__mirrorsToBuild:
seg = toRecover.getFailoverSegment()
if seg is None:
seg = toRecover.getFailedSegment() # we are recovering in place
mirrorsToStart.append(seg)
primarySeg = toRecover.getLiveSegment()
# The change in configuration to of the mirror to down requires
# that the primary also be change to change tracking if required.
if primarySeg.getSegmentMode() != gparray.MODE_CHANGELOGGING:
primarySeg.setSegmentMode(gparray.MODE_CHANGELOGGING)
primariesToConvert.append(primarySeg)
convertPrimaryUsingFullResync.append(toRecover.isFullSynchronization())
if toRecover.isFullSynchronization() and seg.getSegmentDbId() > 0:
fullResyncMirrorDbIds[seg.getSegmentDbId()] = True
# should use mainUtils.getProgramName but I can't make it work!
programName = os.path.split(sys.argv[0])[-1]
# Disable Ctrl-C, going to save metadata in database and transition segments
signal.signal(signal.SIGINT,signal.SIG_IGN)
try:
logger.info("Updating configuration with new mirrors")
configInterface.getConfigurationProvider().updateSystemConfig(
gpArray,
"%s: segment config for resync" % programName,
dbIdToForceMirrorRemoveAdd = fullResyncMirrorDbIds,
useUtilityMode = False,
allowPrimary = False
)
MPP_12038_fault_injection()
logger.info("Updating mirrors")
self.__updateGpIdFile(gpEnv, gpArray, mirrorsToStart)
logger.info("Starting mirrors")
self.__startAll(gpEnv, gpArray, mirrorsToStart)
logger.info("Updating configuration to mark mirrors up")
for seg in mirrorsToStart:
seg.setSegmentStatus(gparray.STATUS_UP)
for seg in primariesToConvert:
seg.setSegmentMode(gparray.MODE_RESYNCHRONIZATION)
configInterface.getConfigurationProvider().updateSystemConfig(
gpArray,
"%s: segment resync marking mirrors up and primaries resync" % programName,
dbIdToForceMirrorRemoveAdd = {},
useUtilityMode = True,
allowPrimary = False
)
MPP_12038_fault_injection()
#
# note: converting the primaries may take a really long time to complete because of initializing
# resynchronization
#
logger.info("Updating primaries")
self.__convertAllPrimaries(gpEnv, gpArray, primariesToConvert, convertPrimaryUsingFullResync)
logger.info("Done updating primaries")
finally:
# Reenable Ctrl-C
signal.signal(signal.SIGINT,signal.default_int_handler)
def __verifyGpArrayContents(self, gpArray):
"""
Run some simple assertions against gpArray contents
"""
for seg in gpArray.getDbList():
if seg.getSegmentDataDirectory() != seg.getSegmentFilespaces()[gparray.SYSTEM_FILESPACE]:
raise Exception("Mismatch between segment data directory and filespace entry for segment %s" %
seg.getSegmentDbId())
def checkForPortAndDirectoryConflicts(self, gpArray):
"""
Check gpArray for internal consistency -- no duplicate ports or directories on the same host, for example
A detected problem causes an Exception to be raised
"""
for hostName, segmentArr in GpArray.getSegmentsByHostName(gpArray.getDbList()).iteritems():
usedPorts = {}
usedDataDirectories = {}
for segment in segmentArr:
# check for port conflict
replicationPort = segment.getSegmentReplicationPort()
port = segment.getSegmentPort()
dbid = segment.getSegmentDbId()
if port in usedPorts:
raise Exception("On host %s, a port for segment with dbid %s conflicts with a port for segment dbid %s" \
% (hostName, dbid, usedPorts.get(port)))
if segment.isSegmentQE():
if replicationPort is None:
raise Exception("On host %s, the replication port is not set for segment with dbid %s" \
% (hostName, dbid))
if replicationPort in usedPorts:
raise Exception("On host %s, a port for segment with dbid %s conflicts with a port for segment dbid %s" \
% (hostName, dbid, usedPorts.get(replicationPort)))
if port == replicationPort:
raise Exception("On host %s, segment with dbid %s has equal port and replication port" \
% (hostName, dbid))
usedPorts[port] = dbid
usedPorts[replicationPort] = dbid
# check for directory conflict; could improve this by reporting nicer the conflicts
paths = [path for oid, path in segment.getSegmentFilespaces().items() if oid != gparray.SYSTEM_FILESPACE]
paths.append(segment.getSegmentDataDirectory())
for path in paths:
if path in usedDataDirectories:
raise Exception("On host %s, directory (base or filespace) for segment with dbid %s conflicts with a " \
"directory (base or filespace) for segment dbid %s; directory: %s" % \
(hostName, dbid, usedDataDirectories.get(path), path))
usedDataDirectories[path] = dbid
def __runWaitAndCheckWorkerPoolForErrorsAndClear(self, cmds, actionVerb, suppressErrorCheck=False):
for cmd in cmds:
self.__pool.addCommand(cmd)
self.__pool.wait_and_printdots(len(cmds), self.__quiet)
if not suppressErrorCheck:
self.__pool.check_results()
self.__pool.empty_completed_items()
def __copyFiles(self, srcDir, destDir, fileNames):
for name in fileNames:
cmd = gp.LocalCopy("copy file for segment", srcDir + "/" + name, destDir + "/" + name)
cmd.run(validateAfter=True)
def __createEmptyDirectories( self, dir, newDirectoryNames ):
for name in newDirectoryNames:
subDir = os.path.join(dir, name)
unix.MakeDirectory("create blank directory for segment", subDir).run(validateAfter=True)
unix.Chmod.local('set permissions on blank dir', subDir, '0700')
def __buildTarFileForTransfer(self, gpEnv, masterSegment, sampleSegment, newSegments):
"""
Returns the file for the tarfile that should be transferred and used
for building the blank segment
"""
masterDir = gpEnv.getMasterDataDir()
# note that this tempdir will be left around on the system (this is what other scripts do currently)
tempDir = gp.createTempDirectoryName(gpEnv.getMasterDataDir(), "gpbuildingsegment")
unix.MakeDirectory("create temp directory for segment", tempDir ).run(validateAfter=True)
schemaDir = tempDir + "/schema"
unix.MakeDirectory("create temp schema directory for segment", schemaDir ).run(validateAfter=True)
unix.Chmod.local('set permissions on schema dir', schemaDir, '0700') # set perms so postgres can start
#
# Copy remote files from the sample segment to the master
#
for toCopyFromRemote in ["postgresql.conf", "pg_hba.conf"]:
cmd = gp.RemoteCopy('copying %s from a segment' % toCopyFromRemote,
sampleSegment.getSegmentDataDirectory() + '/' + toCopyFromRemote,
masterSegment.getSegmentHostName(), schemaDir, ctxt=base.REMOTE,
remoteHost=sampleSegment.getSegmentAddress())
cmd.run(validateAfter=True)
appendNewEntriesToHbaFile( schemaDir + "/pg_hba.conf", newSegments)
#
# Use the master's version of other files, and build
#
self.__createEmptyDirectories( schemaDir, gDatabaseDirectories )
self.__createEmptyDirectories( schemaDir, gDatabaseSubDirectories )
self.__copyFiles(masterDir, schemaDir, ["PG_VERSION", "pg_ident.conf"])
#
# Build final tar
#
tarFileName = "gp_emptySegmentSchema.tar"
tarFile = tempDir + "/" + tarFileName
cmd = gp.CreateTar('gpbuildingmirrorsegment tar segment template', schemaDir, tarFile)
cmd.run(validateAfter=True)
return (tempDir, tarFile, tarFileName)
def __copySegmentDirectories(self, gpEnv, gpArray, directives):
"""
directives should be composed of GpCopySegmentDirectoryDirective values
"""
if len(directives) == 0:
return
srcSegments = [d.getSrcSegment() for d in directives]
destSegments = [d.getDestSegment() for d in directives]
isTargetReusedLocation = [d.isTargetReusedLocation() for d in directives]
destSegmentByHost = GpArray.getSegmentsByHostName(destSegments)
newSegmentInfo = gp.ConfigureNewSegment.buildSegmentInfoForNewSegment(destSegments, isTargetReusedLocation)
logger.info('Building template directory')
(tempDir, blankTarFile, tarFileName) = self.__buildTarFileForTransfer(gpEnv, gpArray.master, srcSegments[0], destSegments)
def createConfigureNewSegmentCommand(hostName, cmdLabel, validationOnly):
segmentInfo = newSegmentInfo[hostName]
checkNotNone("segmentInfo for %s" % hostName, segmentInfo)
return gp.ConfigureNewSegment(cmdLabel,
segmentInfo,
tarFile=tarFileName,
newSegments=True,
verbose=gplog.logging_is_verbose(),
batchSize=self.__parallelDegree,
ctxt=gp.REMOTE,
remoteHost=hostName,
validationOnly=validationOnly)
#
# validate directories for target segments
#
logger.info('Validating remote directories')
cmds = []
for hostName in destSegmentByHost.keys():
cmds.append(createConfigureNewSegmentCommand(hostName, 'validate blank segments', True))
for cmd in cmds:
self.__pool.addCommand(cmd)
self.__pool.wait_and_printdots(len(cmds), self.__quiet)
validationErrors = []
for item in self.__pool.getCompletedItems():
results = item.get_results()
if not results.wasSuccessful():
if results.rc == 1:
# stdoutFromFailure = results.stdout.replace("\n", " ").strip()
lines = results.stderr.split("\n")
for line in lines:
if len(line.strip()) > 0:
validationErrors.append("Validation failure on host %s %s" % (item.remoteHost, line))
else:
validationErrors.append(str(item))
self.__pool.empty_completed_items()
if validationErrors:
raise ExceptionNoStackTraceNeeded("\n" + ("\n".join(validationErrors)))
#
# copy tar from master to target hosts
#
logger.info('Copying template directory file')
cmds = []
for hostName in destSegmentByHost.keys():
cmds.append( gp.RemoteCopy("copy segment tar", blankTarFile, hostName, tarFileName ))
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "building and transferring basic segment directory")
#
# unpack and configure new segments
#
logger.info('Configuring new segments')
cmds = []
for hostName in destSegmentByHost.keys():
cmds.append(createConfigureNewSegmentCommand(hostName, 'configure blank segments', False))
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "unpacking basic segment directory")
#
# Clean up copied tar from each remote host
#
logger.info('Cleaning files')
cmds = []
for hostName, segments in destSegmentByHost.iteritems():
cmds.append(unix.RemoveFiles('remove tar file', tarFileName, ctxt=gp.REMOTE, remoteHost=hostName))
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "cleaning up tar file on segment hosts")
#
# clean up the local temp directory
#
unix.RemoveFiles.local('remove temp directory', tempDir)
def _get_running_postgres_segments(self, segments):
running_segments = []
for seg in segments:
datadir = self.dereference_remote_symlink(seg.getSegmentDataDirectory(), seg.getSegmentHostName())
pid = get_pid_from_remotehost(seg.getSegmentHostName(), datadir)
if pid is not None:
if check_pid_on_remotehost(pid, seg.getSegmentHostName()):
if is_pid_postmaster(datadir, pid, seg.getSegmentHostName()):
running_segments.append(seg)
else:
logger.info("Skipping to stop segment %s on host %s since it is not a postgres process" % (seg.getSegmentDataDirectory(), seg.getSegmentHostName()))
else:
logger.debug("Skipping to stop segment %s on host %s since process with pid %s is not running" % (seg.getSegmentDataDirectory(), seg.getSegmentHostName(), pid))
else:
logger.debug("Skipping to stop segment %s on host %s since pid could not be found" % (seg.getSegmentDataDirectory(), seg.getSegmentHostName()))
return running_segments
def dereference_remote_symlink(self, datadir, host):
cmdStr = """python -c 'import os; print os.path.realpath("%s")'""" % datadir
cmd = base.Command('dereference a symlink on a remote host', cmdStr=cmdStr, ctxt=base.REMOTE, remoteHost=host)
cmd.run()
results = cmd.get_results()
if results.rc != 0:
logger.warning('Unable to determine if %s is symlink. Assuming it is not symlink' % (datadir))
return datadir
return results.stdout.strip()
def __ensureSharedMemCleaned(self, gpEnv, directives):
"""
@param directives a list of the GpStopSegmentDirectoryDirective values indicating which segments to cleanup
"""
if len(directives) == 0:
return
logger.info('Ensuring that shared memory is cleaned up for stopped segments')
segments = [d.getSegment() for d in directives]
segmentsByHost = GpArray.getSegmentsByHostName(segments)
operation_list = [RemoteOperation(CleanSharedMem(segments), host=hostName) for hostName, segments in segmentsByHost.items()]
ParallelOperation(operation_list).run()
for operation in operation_list:
try:
operation.get_ret()
except Exception as e:
logger.warning('Unable to clean up shared memory for stopped segments on host (%s)' % operation.host)
def __ensureStopped(self, gpEnv, directives):
"""
@param directives a list of the GpStopSegmentDirectoryDirective values indicating which segments to stop
"""
if len(directives) == 0:
return
logger.info("Ensuring %d failed segment(s) are stopped" % (len(directives)))
segments = [d.getSegment() for d in directives]
segments = self._get_running_postgres_segments(segments)
segmentByHost = GpArray.getSegmentsByHostName(segments)
cmds = []
for hostName, segments in segmentByHost.iteritems():
cmd=gp.GpSegStopCmd("remote segment stop on host '%s'" % hostName,
gpEnv.getGpHome(), gpEnv.getGpVersion(),
mode='fast', dbs=segments, verbose=logging_is_verbose(),
ctxt=base.REMOTE, remoteHost=hostName)
cmds.append( cmd)
# we suppress checking for the error. This is because gpsegstop will actually error
# in many cases where the stop is actually done (that is, for example, the segment is
# running but slow to shutdown so gpsegstop errors after whacking it with a kill)
#
# Perhaps we should make it so that it so that is checks if the seg is running and only attempt stop
# if it's running? In that case, we could propagate the error
#
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "stopping segments", suppressErrorCheck=True)
def __ensureMarkedDown(self, gpEnv, toEnsureMarkedDown):
"""Waits for FTS prober to mark segments as down"""
wait_time = 60 * 30 # Wait up to 30 minutes to handle very large, busy
# clusters that may have faults. In most cases the
# actual time to wait will be small and this operation
# is only needed when moving mirrors that are up and
# needed to be stopped, an uncommon operation.
dburl = dbconn.DbURL(port=gpEnv.getMasterPort(), dbname='template1')
time_elapsed = 0
seg_up_count = 0
initial_seg_up_count = len(toEnsureMarkedDown)
last_seg_up_count = initial_seg_up_count
if initial_seg_up_count == 0:
# Nothing to wait on
return
logger.info("Waiting for segments to be marked down.")
logger.info("This may take up to %d seconds on large clusters." % wait_time)
# wait for all needed segments to be marked down by the prober. We'll wait
# a max time of double the interval
while wait_time > time_elapsed:
seg_up_count = 0
current_gparray = GpArray.initFromCatalog(dburl, True)
seg_db_map = current_gparray.getSegDbMap()
# go through and get the status of each segment we need to be marked down
for segdb in toEnsureMarkedDown:
if segdb.getSegmentDbId() in seg_db_map and seg_db_map[segdb.getSegmentDbId()].isSegmentUp() == True:
seg_up_count += 1
if seg_up_count == 0:
break
else:
if last_seg_up_count != seg_up_count:
print "\n",
logger.info("%d of %d segments have been marked down." %
(initial_seg_up_count - seg_up_count, initial_seg_up_count))
last_seg_up_count = seg_up_count
for _i in range(1,5):
time.sleep(1)
sys.stdout.write(".")
sys.stdout.flush()
time_elapsed += 5
if seg_up_count == 0:
print "\n",
logger.info("%d of %d segments have been marked down." %
(initial_seg_up_count, initial_seg_up_count))
else:
raise Exception("%d segments were not marked down by FTS" % seg_up_count)
def __cleanUpSegmentDirectories(self, directives):
if len(directives) == 0:
return
logger.info("Cleaning files from %d segment(s)" % (len(directives)))
segments = [d.getSegment() for d in directives]
segmentByHost = GpArray.getSegmentsByHostName(segments)
cmds = []
for hostName, segments in segmentByHost.iteritems():
cmds.append( gp.GpCleanSegmentDirectories("clean segment directories on %s" % hostName, \
segments, gp.REMOTE, hostName))
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "cleaning existing directories")
def __createStartSegmentsOp(self, gpEnv):
return startSegments.StartSegmentsOperation(self.__pool, self.__quiet,
gpEnv.getLocaleData(), gpEnv.getGpVersion(),
gpEnv.getGpHome(), gpEnv.getMasterDataDir()
)
def __updateGpIdFile(self, gpEnv, gpArray, segments):
segmentByHost = GpArray.getSegmentsByHostName(segments)
newSegmentInfo = gp.ConfigureNewSegment.buildSegmentInfoForNewSegment(segments)
cmds = []
for hostName in segmentByHost.keys():
segmentInfo = newSegmentInfo[hostName]
checkNotNone("segmentInfo for %s" % hostName, segmentInfo)
cmd = gp.ConfigureNewSegment("update gpid file",
segmentInfo,
newSegments=False,
verbose=gplog.logging_is_verbose(),
batchSize=self.__parallelDegree,
ctxt=gp.REMOTE,
remoteHost=hostName,
validationOnly=False,
writeGpIdFileOnly=True)
cmds.append(cmd)
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "writing updated gpid files")
def __startAll(self, gpEnv, gpArray, segments):
# the newly started segments should belong to the current era
era = read_era(gpEnv.getMasterDataDir(), logger=gplog.get_logger_if_verbose())
segmentStartResult = self.__createStartSegmentsOp(gpEnv).startSegments(gpArray, segments, startSegments.START_AS_PRIMARY_OR_MIRROR, era)
for failure in segmentStartResult.getFailedSegmentObjs():
failedSeg = failure.getSegment()
failureReason = failure.getReason()
logger.warn("Failed to start segment. The fault prober will shortly mark it as down. Segment: %s: REASON: %s" % (failedSeg, failureReason))
pass
def __convertAllPrimaries(self, gpEnv, gpArray, segments, convertUsingFullResync):
segmentStartResult = self.__createStartSegmentsOp(gpEnv).transitionSegments(gpArray, segments, convertUsingFullResync, startSegments.MIRROR_MODE_PRIMARY)
for failure in segmentStartResult.getFailedSegmentObjs():
failedSeg = failure.getSegment()
failureReason = failure.getReason()
logger.warn("Failed to inform primary segment of updated mirroring state. Segment: %s: REASON: %s" % (failedSeg, failureReason))
class GpCleanupSegmentDirectoryDirective:
def __init__(self, segment):
checkNotNone("segment", segment)
self.__segment = segment
def getSegment(self):
return self.__segment
class GpStopSegmentDirectoryDirective:
def __init__(self, segment):
checkNotNone("segment", segment)
self.__segment = segment
def getSegment(self):
return self.__segment
class GpCopySegmentDirectoryDirective:
def __init__(self, source, dest, isTargetReusedLocation ):
"""
@param isTargetReusedLocation if True then the dest location is a cleaned-up location
"""
checkNotNone("source", source)
checkNotNone("dest", dest)
self.__source = source
self.__dest = dest
self.__isTargetReusedLocation = isTargetReusedLocation
def getSrcSegment(self):
return self.__source
def getDestSegment(self):
return self.__dest
def isTargetReusedLocation(self):
return self.__isTargetReusedLocation
|
|
import discord_logging
import traceback
from datetime import datetime
log = discord_logging.get_logger()
import counters
import utils
import static
from classes.subscription import Subscription
from classes.comment import DbComment
from praw_wrapper import ReturnType, id_from_fullname, PushshiftType
def process_comment(comment, reddit, database, count_string=""):
if comment['author'] == static.ACCOUNT_NAME:
log.info(f"{count_string}: Comment is from updatemebot")
return
if comment['author'] in static.BLACKLISTED_ACCOUNTS:
log.info(f"{count_string}: Comment is from a blacklisted account")
return
counters.replies.labels(source='comment').inc()
log.info(f"{count_string}: Processing comment {comment['id']} from u/{comment['author']}")
body = comment['body'].lower().strip()
use_tag = True
if static.TRIGGER_SUBSCRIBE_LOWER in body:
log.debug("Subscription comment")
recurring = True
elif static.TRIGGER_UPDATE_LOWER in body:
log.debug("Update comment")
recurring = False
elif static.TRIGGER_SUBSCRIBE_ALL_LOWER in body:
log.debug("Subscribe all comment")
recurring = True
use_tag = False
else:
log.debug("Command not in comment")
return
comment_result = None
thread_id = id_from_fullname(comment['link_id'])
subscriber = database.get_or_add_user(comment['author'])
subreddit = database.get_or_add_subreddit(comment['subreddit'])
db_submission = database.get_submission_by_id(thread_id)
tag = None
if db_submission is not None:
author = db_submission.author
if use_tag:
tag = db_submission.tag
else:
comment_result = ReturnType.SUBMISSION_NOT_PROCESSED
reddit_submission = reddit.get_submission(thread_id)
try:
author_name = reddit_submission.author.name
except Exception:
log.warning(f"Unable to fetch parent submission for comment: {thread_id}")
return
author = database.get_or_add_user(author_name)
result_message, subscription = Subscription.create_update_subscription(
database, subscriber, author, subreddit, recurring, tag
)
commented = False
if db_submission is not None and db_submission.comment is not None:
comment_result = ReturnType.THREAD_REPLIED
elif subreddit.is_banned or subreddit.no_comment:
comment_result = ReturnType.FORBIDDEN
elif not subreddit.is_enabled:
comment_result = ReturnType.SUBREDDIT_NOT_ENABLED
if comment_result is None:
reddit_comment = reddit.get_comment(comment['id'])
count_subscriptions = database.get_count_subscriptions_for_author_subreddit(author, subreddit, tag)
db_comment = DbComment(
comment_id=None,
submission=db_submission,
subscriber=subscriber,
author=author,
subreddit=subreddit,
recurring=recurring,
current_count=count_subscriptions,
tag=tag
)
bldr = utils.get_footer(db_comment.render_comment(
count_subscriptions=count_subscriptions,
pushshift_minutes=reddit.get_effective_pushshift_lag()
))
result_id, comment_result = reddit.reply_comment(reddit_comment, ''.join(bldr))
if comment_result in (
ReturnType.INVALID_USER,
ReturnType.USER_DOESNT_EXIST,
ReturnType.THREAD_LOCKED,
ReturnType.DELETED_COMMENT,
ReturnType.RATELIMIT):
log.info(f"Unable to reply as comment: {comment_result.name}")
elif comment_result == ReturnType.FORBIDDEN:
log.warning(f"Banned in subreddit, saving: {subreddit.name}")
subreddit.is_banned = True
else:
if comment_result == ReturnType.NOTHING_RETURNED:
result_id = "QUARANTINED"
log.warning(f"Opting in to quarantined subreddit: {subreddit.name}")
reddit.quarantine_opt_in(subreddit.name)
if result_id is None:
log.warning(f"Got comment ID of None when replying to {comment['id']}")
comment_result = ReturnType.FORBIDDEN
else:
log.info(
f"Subscription created: {subscription.id}, replied as comment: {result_id}")
if comment_result != ReturnType.QUARANTINED:
db_comment.comment_id = result_id
database.add_comment(db_comment)
commented = True
if not commented:
log.info(
f"Subscription created: {subscription.id}, replying as message: {comment_result.name}")
bldr = utils.str_bldr()
pushshift_lag = reddit.get_effective_pushshift_lag()
if pushshift_lag > 15:
bldr.append("There is a ")
if pushshift_lag > 60:
bldr.append(str(int(round(pushshift_lag / 60, 1))))
bldr.append(" hour")
else:
bldr.append(str(pushshift_lag))
bldr.append(" minute")
bldr.append(" delay fetching comments.")
bldr.append("\n\n")
bldr.append(result_message)
bldr = utils.get_footer(bldr)
result = reddit.send_message(subscriber.name, "UpdateMeBot Confirmation", ''.join(bldr))
if result != ReturnType.SUCCESS:
log.warning(f"Unable to send message: {result.name}")
def process_comments(reddit, database):
comments = reddit.get_keyword_comments(static.TRIGGER_COMBINED, database.get_or_init_datetime("comment_timestamp"))
counters.pushshift_delay.labels(client="prod").set(reddit.pushshift_prod_client.lag_minutes())
counters.pushshift_delay.labels(client="beta").set(reddit.pushshift_beta_client.lag_minutes())
counters.pushshift_delay.labels(client="auto").set(reddit.get_effective_pushshift_lag())
if reddit.recent_pushshift_client == PushshiftType.PROD:
counters.pushshift_client.labels(client="prod").set(1)
counters.pushshift_client.labels(client="beta").set(0)
elif reddit.recent_pushshift_client == PushshiftType.BETA:
counters.pushshift_client.labels(client="prod").set(0)
counters.pushshift_client.labels(client="beta").set(1)
else:
counters.pushshift_client.labels(client="prod").set(0)
counters.pushshift_client.labels(client="beta").set(0)
counters.pushshift_failed.labels(client="prod").set(1 if reddit.pushshift_prod_client.failed() else 0)
counters.pushshift_failed.labels(client="beta").set(1 if reddit.pushshift_beta_client.failed() else 0)
counters.pushshift_seconds.labels("prod").observe(reddit.pushshift_prod_client.request_seconds)
counters.pushshift_seconds.labels("beta").observe(reddit.pushshift_beta_client.request_seconds)
if len(comments):
log.debug(f"Processing {len(comments)} comments")
i = 0
for comment in comments[::-1]:
i += 1
mark_read = True
try:
process_comment(comment, reddit, database, f"{i}/{len(comments)}")
except Exception as err:
mark_read = not utils.process_error(
f"Error processing comment: {comment['id']} : {comment['author']}",
err, traceback.format_exc()
)
if mark_read:
reddit.mark_keyword_comment_processed(comment['id'])
database.save_datetime("comment_timestamp", datetime.utcfromtimestamp(comment['created_utc']))
else:
return i
return len(comments)
def update_comments(reddit, database):
count_incorrect = database.get_pending_incorrect_comments()
i = 0
if count_incorrect > 0:
incorrect_items = database.get_incorrect_comments(utils.requests_available(count_incorrect))
for db_comment, new_count in incorrect_items:
i += 1
log.info(
f"{i}/{len(incorrect_items)}/{count_incorrect}: Updating comment : "
f"{db_comment.comment_id} : {db_comment.current_count}/{new_count}")
bldr = utils.get_footer(db_comment.render_comment(count_subscriptions=new_count))
reddit.edit_comment(''.join(bldr), comment_id=db_comment.comment_id)
db_comment.current_count = new_count
else:
log.debug("No incorrect comments")
return i
|
|
from django.contrib import messages
from django.db.models import Max
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _
from django.views.generic import DetailView, DeleteView, CreateView, UpdateView
from ..forms import (
ProductForm,
ProductReleaseCreateForm,
ProductReleaseEditForm,
ProductReleaseDependencyCreateForm,
ReleaseBuildForm,
CheckCreateForm,
CheckUpdateForm
)
from ..models import Product, ProductRelease, Build, Check
from mixins import RequireAuthenticatedUser
class ProductCreateView(RequireAuthenticatedUser, CreateView):
model = Product
template_name = 'relman/includes/modals/create.html'
form_class = ProductForm
class ProductDetailView(RequireAuthenticatedUser, DetailView):
model = Product
context_object_name = 'product'
def get_context_data(self, **kwargs):
context_data = super(ProductDetailView, self).get_context_data(**kwargs)
release = None
build = None
if 'v' in self.request.GET:
try:
major, minor, patch = self.request.GET['v'].split('.')
release = ProductRelease.objects.get(
product=self.object,
major_version=major,
minor_version=minor,
patch_version=patch
)
context_data['release'] = release
except ValueError, ProductRelease.DoesNotExist:
pass
if release is not None and 'b' in self.request.GET:
try:
build = release.builds.get(
build_number=self.request.GET['b'],
)
context_data['build'] = build
except Build.DoesNotExist:
pass
return context_data
class ProductUpdateView(RequireAuthenticatedUser, UpdateView):
model = Product
template_name = 'relman/includes/modals/update.html'
form_class = ProductForm
success_url = '/'
class ReleaseCreateView(RequireAuthenticatedUser, CreateView):
model = ProductRelease
template_name = 'relman/includes/modals/create.html'
form_class = ProductReleaseCreateForm
def dispatch(self, request, *args, **kwargs):
self.product = get_object_or_404(Product, pk=kwargs['product_pk'])
return super(ReleaseCreateView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
form.instance.product = self.product
response = super(ReleaseCreateView, self).form_valid(form)
previous_versions = self.object.previous_versions()
if previous_versions:
self.object.dependencies.add(*previous_versions[0].dependencies.all())
return response
class ReleaseUpdateView(RequireAuthenticatedUser, UpdateView):
model = ProductRelease
template_name = 'relman/includes/modals/update.html'
form_class = ProductReleaseEditForm
class ReleaseDetailView(RequireAuthenticatedUser, DetailView):
model = ProductRelease
context_object_name = 'release'
template_name = 'relman/includes/product__release.html'
class ReleaseDeleteView(RequireAuthenticatedUser, DeleteView):
model = ProductRelease
template_name = 'relman/includes/modals/delete.html'
def get_success_url(self):
messages.warning(self.request, _("{object} has been deleted").format(object=self.object))
return self.object.product.get_absolute_url()
class ReleaseCreateDependencyView(RequireAuthenticatedUser, CreateView):
model = ProductRelease.dependencies.through
template_name = 'relman/includes/modals/create.html'
form_class = ProductReleaseDependencyCreateForm
def dispatch(self, request, *args, **kwargs):
self.release = get_object_or_404(ProductRelease, pk=kwargs['release_pk'])
return super(ReleaseCreateDependencyView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self, **kwargs):
form_kwargs = super(ReleaseCreateDependencyView, self).get_form_kwargs(**kwargs)
form_kwargs['release'] = self.release
return form_kwargs
def form_valid(self, form):
form.instance.productrelease = self.release
return super(ReleaseCreateDependencyView, self).form_valid(form)
def get_success_url(self):
return self.object.productrelease.get_absolute_url()
class ReleaseDeleteDependencyView(RequireAuthenticatedUser, DeleteView):
model = ProductRelease.dependencies.through
template_name = 'relman/includes/modals/delete.html'
def get_object(self):
return get_object_or_404(
self.model,
productrelease_id=self.kwargs['release_pk'],
packageversion_id=self.kwargs['version_pk']
)
def get_context_data(self, **kwargs):
data = super(ReleaseDeleteDependencyView, self).get_context_data(**kwargs)
data['delete_message'] = _(
"Remove {version} as a dependency of {release}?"
).format(
version=self.object.packageversion,
release=self.object.productrelease
)
return data
def get_success_url(self):
return self.object.productrelease.get_absolute_url()
class ReleaseBuildDetailView(RequireAuthenticatedUser, DetailView):
model = Build
context_object_name = 'build'
template_name = 'relman/includes/product__release__build.html'
class ReleaseBuildCreateView(RequireAuthenticatedUser, CreateView):
model = Build
template_name = 'relman/includes/modals/create.html'
form_class = ReleaseBuildForm
def dispatch(self, request, *args, **kwargs):
self.release = get_object_or_404(ProductRelease, pk=kwargs['release_pk'])
return super(ReleaseBuildCreateView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
form.instance.release = self.release
current_build_number = self.release.builds.aggregate(
Max('build_number')
)['build_number__max']
if current_build_number is None:
form.instance.build_number = 1
else:
form.instance.build_number = 1 + current_build_number
return super(ReleaseBuildCreateView, self).form_valid(form)
class ReleaseBuildUpdateView(RequireAuthenticatedUser, UpdateView):
model = Build
template_name = 'relman/includes/modals/update.html'
form_class = ReleaseBuildForm
def get_success_url(self):
messages.success(self.request, _("{object} has been updated").format(object=self.object))
return super(ReleaseBuildUpdateView, self).get_success_url()
class CheckCreateView(RequireAuthenticatedUser, CreateView):
model = Check
template_name = 'relman/includes/modals/create.html'
form_class = CheckCreateForm
def dispatch(self, request, *args, **kwargs):
self.build = get_object_or_404(Build, pk=kwargs['build_pk'])
return super(CheckCreateView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self, **kwargs):
form_kwargs = super(CheckCreateView, self).get_form_kwargs(**kwargs)
form_kwargs['build'] = self.build
return form_kwargs
def form_valid(self, form):
form.instance.build = self.build
return super(CheckCreateView, self).form_valid(form)
class CheckUpdateView(RequireAuthenticatedUser, UpdateView):
model = Check
template_name = 'relman/includes/modals/update.html'
form_class = CheckUpdateForm
|
|
#!/usr/bin/python
import logging
import traceback
import json
import yaml
import sys
import base64
import uuid
import collections
sys.path.append('/opt/contrail/fabric_ansible_playbooks/filter_plugins')
sys.path.append('/opt/contrail/fabric_ansible_playbooks/common')
from contrail_command import CreateCCNode, CreateCCNodeProfile
import jsonschema
from job_manager.job_utils import JobVncApi
class NodeProfileLog(object):
_instance = None
@staticmethod
def instance():
if not NodeProfileLog._instance:
NodeProfileLog._instance = NodeProfileLog()
return NodeProfileLog._instance
# end instance
@staticmethod
def _init_logging():
"""
:return: type=<logging.Logger>
"""
logger = logging.getLogger('ServerFilter')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y/%m/%d %H:%M:%S'
)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
return logger
# end _init_logging
def __init__(self):
self._msg = None
self._logs = []
self._logger = NodeProfileLog._init_logging()
# end __init__
def logger(self):
return self._logger
# end logger
def msg_append(self, msg):
if msg:
if not self._msg:
self._msg = msg + ' ... '
else:
self._msg += msg + ' ... '
# end log
def msg_end(self):
if self._msg:
self._msg += 'done'
self._logs.append(self._msg)
self._logger.warn(self._msg)
self._msg = None
# end msg_end
def dump(self):
retval = ""
for msg in self._logs:
retval += msg + '\n'
return retval
# end dump
# end NodeProfileLog
class FilterModule(object):
@staticmethod
def _init_logging():
logger = logging.getLogger('NodeProfileFilter')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.WARN)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
return logger
def __init__(self):
self._logger = FilterModule._init_logging()
# end __init__
@staticmethod
def _validate_job_ctx(job_ctx):
vnc_api = JobVncApi.vnc_init(job_ctx)
job_template_fqname = job_ctx.get('job_template_fqname')
if not job_template_fqname:
raise ValueError('Invalid job_ctx: missing job_template_fqname')
job_input = job_ctx.get('input')
if not job_input:
raise ValueError('Invalid job_ctx: missing job_input')
# retrieve job input schema from job template to validate the job input
node_profile_template = vnc_api.job_template_read(
fq_name=job_template_fqname
)
input_schema = node_profile_template.get_job_template_input_schema()
jsonschema.validate(job_input, input_schema)
return job_input
def filters(self):
return {
'add_node_profiles_from_file': self.add_node_profiles_from_file
}
def get_cc_node_profile_payload(self, node_profile_dict):
cc_node_profile = {"resources":
[{
"kind": "node_profile",
"data": {
"parent_type": "global-system-config",
"fq_name": ["default-global-system-config",
node_profile_dict['name']]
}
}]
}
cc_node_profile["resources"][0]["data"].update(node_profile_dict)
return cc_node_profile
def create_cc_node_profile(self, node_profile_dict):
if not node_profile_dict.get('uuid', None):
node_profile_dict['uuid'] = str(uuid.uuid4())
cc_node_profile_payload = self.get_cc_node_profile_payload(
node_profile_dict)
return cc_node_profile_payload, node_profile_dict['name']
def convert(self, data):
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(self.convert, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(self.convert, data))
else:
return data
def import_node_profiles(self, data, node_profile_object):
added_node_profiles = []
if isinstance(data, dict) and "node_profile" in data:
node_profile_list = data['node_profile']
for node_profile_dict in node_profile_list:
node_profile_payload, node_profile_name = \
self.create_cc_node_profile(node_profile_dict)
added_node_profiles.append(node_profile_name)
node_profile_object.create_cc_node_profile(node_profile_payload)
return added_node_profiles
# ***************** add_node_profiles_from_file filter *********************************
def add_node_profiles_from_file(self, job_ctx):
"""
:param job_ctx: Dictionary
example:
{
"auth_token": "EB9ABC546F98",
"job_input": {
"encoded_node_profiles": "....",
"contrail_command_host": "....",
"encoded_file": "...."
}
}
:return: Dictionary
if success, returns
[
<list: imported node profiles>
]
if failure, returns
{
'status': 'failure',
'error_msg': <string: error message>,
'np_log': <string: np_log>
}
"""
try:
job_input = FilterModule._validate_job_ctx(job_ctx)
self._logger.info("Job INPUT:\n" + str(job_input))
encoded_file = job_input.get("encoded_file")
file_format = job_input.get("file_format")
decoded = base64.decodestring(encoded_file)
cc_host = job_input.get('contrail_command_host')
auth_token = job_ctx.get('auth_token')
cc_node_profile_obj = CreateCCNodeProfile(cc_host, auth_token)
self._logger.info("Starting Node Profile Import")
if file_format.lower() == "yaml":
data = yaml.load(decoded)
elif file_format.lower() == "json":
data = self.convert(json.loads(decoded))
else:
raise ValueError('File format not recognized. Only yaml or '
'json supported')
added_profiles = self.import_node_profiles(
data, cc_node_profile_obj)
except Exception as e:
errmsg = "Unexpected error: %s\n%s" % (
str(e), traceback.format_exc()
)
self._logger.error(errmsg)
return {
'status': 'failure',
'error_msg': errmsg,
'np_log': NodeProfileLog.instance().dump()
}
return {
'status': 'success',
'added_profiles': added_profiles,
'np_log': NodeProfileLog.instance().dump()
}
|
|
import numpy as np
import os
import dill
import tempfile
import tensorflow as tf
import zipfile
from absl import flags
import baselines.common.tf_util as U
from baselines import logger
from baselines.common.schedules import LinearSchedule
from baselines import deepq
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
from pysc2.lib import actions as sc2_actions
from pysc2.env import environment
from pysc2.lib import features
from pysc2.lib import actions
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_PLAYER_FRIENDLY = 1
_PLAYER_NEUTRAL = 3 # beacon/minerals
_PLAYER_HOSTILE = 4
_NO_OP = actions.FUNCTIONS.no_op.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_NOT_QUEUED = [0]
_SELECT_ALL = [0]
FLAGS = flags.FLAGS
class ActWrapper(object):
def __init__(self, act):
self._act = act
#self._act_params = act_params
@staticmethod
def load(path, act_params, num_cpu=16):
with open(path, "rb") as f:
model_data = dill.load(f)
act = deepq.build_act(**act_params)
sess = U.make_session(num_cpu=num_cpu)
sess.__enter__()
with tempfile.TemporaryDirectory() as td:
arc_path = os.path.join(td, "packed.zip")
with open(arc_path, "wb") as f:
f.write(model_data)
zipfile.ZipFile(arc_path, 'r', zipfile.ZIP_DEFLATED).extractall(td)
U.load_state(os.path.join(td, "model"))
return ActWrapper(act)
def __call__(self, *args, **kwargs):
return self._act(*args, **kwargs)
def save(self, path):
"""Save model to a pickle located at `path`"""
with tempfile.TemporaryDirectory() as td:
U.save_state(os.path.join(td, "model"))
arc_name = os.path.join(td, "packed.zip")
with zipfile.ZipFile(arc_name, 'w') as zipf:
for root, dirs, files in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if file_path != arc_name:
zipf.write(file_path,
os.path.relpath(file_path, td))
with open(arc_name, "rb") as f:
model_data = f.read()
with open(path, "wb") as f:
dill.dump((model_data), f)
def load(path, act_params, num_cpu=16):
"""Load act function that was returned by learn function.
Parameters
----------
path: str
path to the act function pickle
num_cpu: int
number of cpus to use for executing the policy
Returns
-------
act: ActWrapper
function that takes a batch of observations
and returns actions.
"""
return ActWrapper.load(path, num_cpu=num_cpu, act_params=act_params)
def learn(env,
q_func,
num_actions=4,
lr=5e-4,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
train_freq=1,
batch_size=32,
print_freq=1,
checkpoint_freq=10000,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
num_cpu=16,
param_noise=False,
param_noise_threshold=0.05,
callback=None):
"""Train a deepq model.
Parameters
-------
env: pysc2.env.SC2Env
environment to train on
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
lr: float
learning rate for adam optimizer
max_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
set to None to disable printing
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to max_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
num_cpu: int
number of cpus to use for training
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
sess = U.make_session(num_cpu=num_cpu)
sess.__enter__()
def make_obs_ph(name):
return U.BatchInput((32, 32), name=name)
act, train, update_target, debug = deepq.build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=num_actions,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10,
scope="deepq")
#
# act_y, train_y, update_target_y, debug_y = deepq.build_train(
# make_obs_ph=make_obs_ph,
# q_func=q_func,
# num_actions=num_actions,
# optimizer=tf.train.AdamOptimizer(learning_rate=lr),
# gamma=gamma,
# grad_norm_clipping=10,
# scope="deepq_y"
# )
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': num_actions,
}
# Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(
buffer_size, alpha=prioritized_replay_alpha)
# replay_buffer_y = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = max_timesteps
beta_schedule = LinearSchedule(
prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
# beta_schedule_y = LinearSchedule(prioritized_replay_beta_iters,
# initial_p=prioritized_replay_beta0,
# final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
# replay_buffer_y = ReplayBuffer(buffer_size)
beta_schedule = None
# beta_schedule_y = None
# Create the schedule for exploration starting from 1.
exploration = LinearSchedule(
schedule_timesteps=int(exploration_fraction * max_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
# update_target_y()
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
# Select all marines first
obs = env.step(
actions=[sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])])
player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE]
screen = (player_relative == _PLAYER_NEUTRAL).astype(int) #+ path_memory
player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero()
player = [int(player_x.mean()), int(player_y.mean())]
if (player[0] > 16):
screen = shift(LEFT, player[0] - 16, screen)
elif (player[0] < 16):
screen = shift(RIGHT, 16 - player[0], screen)
if (player[1] > 16):
screen = shift(UP, player[1] - 16, screen)
elif (player[1] < 16):
screen = shift(DOWN, 16 - player[1], screen)
reset = True
with tempfile.TemporaryDirectory() as td:
model_saved = False
model_file = os.path.join("model/", "mineral_shards")
print(model_file)
for t in range(max_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
kwargs = {}
if not param_noise:
update_eps = exploration.value(t)
update_param_noise_threshold = 0.
else:
update_eps = 0.
if param_noise_threshold >= 0.:
update_param_noise_threshold = param_noise_threshold
else:
# Compute the threshold such that the KL divergence between perturbed and non-perturbed
# policy is comparable to eps-greedy exploration with eps = exploration.value(t).
# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
# for detailed explanation.
update_param_noise_threshold = -np.log(
1. - exploration.value(t) +
exploration.value(t) / float(num_actions))
kwargs['reset'] = reset
kwargs[
'update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = True
action = act(
np.array(screen)[None], update_eps=update_eps, **kwargs)[0]
# action_y = act_y(np.array(screen)[None], update_eps=update_eps, **kwargs)[0]
reset = False
coord = [player[0], player[1]]
rew = 0
if (action == 0): #UP
if (player[1] >= 8):
coord = [player[0], player[1] - 8]
#path_memory_[player[1] - 16 : player[1], player[0]] = -1
elif (player[1] > 0):
coord = [player[0], 0]
#path_memory_[0 : player[1], player[0]] = -1
#else:
# rew -= 1
elif (action == 1): #DOWN
if (player[1] <= 23):
coord = [player[0], player[1] + 8]
#path_memory_[player[1] : player[1] + 16, player[0]] = -1
elif (player[1] > 23):
coord = [player[0], 31]
#path_memory_[player[1] : 63, player[0]] = -1
#else:
# rew -= 1
elif (action == 2): #LEFT
if (player[0] >= 8):
coord = [player[0] - 8, player[1]]
#path_memory_[player[1], player[0] - 16 : player[0]] = -1
elif (player[0] < 8):
coord = [0, player[1]]
#path_memory_[player[1], 0 : player[0]] = -1
#else:
# rew -= 1
elif (action == 3): #RIGHT
if (player[0] <= 23):
coord = [player[0] + 8, player[1]]
#path_memory_[player[1], player[0] : player[0] + 16] = -1
elif (player[0] > 23):
coord = [31, player[1]]
#path_memory_[player[1], player[0] : 63] = -1
if _MOVE_SCREEN not in obs[0].observation["available_actions"]:
obs = env.step(actions=[
sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])
])
new_action = [
sc2_actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, coord])
]
# else:
# new_action = [sc2_actions.FunctionCall(_NO_OP, [])]
obs = env.step(actions=new_action)
player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE]
new_screen = (player_relative == _PLAYER_NEUTRAL).astype(
int) #+ path_memory
player_y, player_x = (
player_relative == _PLAYER_FRIENDLY).nonzero()
player = [int(player_x.mean()), int(player_y.mean())]
if (player[0] > 16):
new_screen = shift(LEFT, player[0] - 16, new_screen)
elif (player[0] < 16):
new_screen = shift(RIGHT, 16 - player[0], new_screen)
if (player[1] > 16):
new_screen = shift(UP, player[1] - 16, new_screen)
elif (player[1] < 16):
new_screen = shift(DOWN, 16 - player[1], new_screen)
rew = obs[0].reward
done = obs[0].step_type == environment.StepType.LAST
# Store transition in the replay buffer.
replay_buffer.add(screen, action, rew, new_screen, float(done))
# replay_buffer_y.add(screen, action_y, rew, new_screen, float(done))
screen = new_screen
episode_rewards[-1] += rew
reward = episode_rewards[-1]
if done:
obs = env.reset()
player_relative = obs[0].observation["screen"][
_PLAYER_RELATIVE]
screen = (player_relative == _PLAYER_NEUTRAL).astype(
int) #+ path_memory
player_y, player_x = (
player_relative == _PLAYER_FRIENDLY).nonzero()
player = [int(player_x.mean()), int(player_y.mean())]
# Select all marines first
env.step(actions=[
sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])
])
episode_rewards.append(0.0)
#episode_minerals.append(0.0)
reset = True
if t > learning_starts and t % train_freq == 0:
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if prioritized_replay:
experience = replay_buffer.sample(
batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights,
batch_idxes) = experience
# experience_y = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
# (obses_t_y, actions_y, rewards_y, obses_tp1_y, dones_y, weights_y, batch_idxes_y) = experience_y
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
batch_size)
weights, batch_idxes = np.ones_like(rewards), None
# obses_t_y, actions_y, rewards_y, obses_tp1_y, dones_y = replay_buffer_y.sample(batch_size)
# weights_y, batch_idxes_y = np.ones_like(rewards_y), None
td_errors = train(obses_t, actions, rewards, obses_tp1, dones,
weights)
# td_errors_y = train_x(obses_t_y, actions_y, rewards_y, obses_tp1_y, dones_y, weights_y)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
# new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes,
new_priorities)
# replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
update_target()
# update_target_y()
mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(
episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("reward", reward)
logger.record_tabular("mean 100 episode reward",
mean_100ep_reward)
logger.record_tabular("% time spent exploring",
int(100 * exploration.value(t)))
logger.dump_tabular()
if (checkpoint_freq is not None and t > learning_starts
and num_episodes > 100 and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log(
"Saving model due to mean reward increase: {} -> {}".
format(saved_mean_reward, mean_100ep_reward))
U.save_state(model_file)
model_saved = True
saved_mean_reward = mean_100ep_reward
if model_saved:
if print_freq is not None:
logger.log("Restored model with mean reward: {}".format(
saved_mean_reward))
U.load_state(model_file)
return ActWrapper(act)
def intToCoordinate(num, size=64):
if size != 64:
num = num * size * size // 4096
y = num // size
x = num - size * y
return [x, y]
UP, DOWN, LEFT, RIGHT = 'up', 'down', 'left', 'right'
def shift(direction, number, matrix):
''' shift given 2D matrix in-place the given number of rows or columns
in the specified (UP, DOWN, LEFT, RIGHT) direction and return it
'''
if direction in (UP):
matrix = np.roll(matrix, -number, axis=0)
matrix[number:, :] = 0
return matrix
elif direction in (DOWN):
matrix = np.roll(matrix, number, axis=0)
matrix[:number, :] = 0
return matrix
elif direction in (LEFT):
matrix = np.roll(matrix, -number, axis=1)
matrix[:, number:] = 0
return matrix
elif direction in (RIGHT):
matrix = np.roll(matrix, number, axis=1)
matrix[:, :number] = 0
return matrix
else:
return matrix
|
|
"""Classic mountain car task."""
from rlpy.Tools import plt, bound, fromAtoB
from rlpy.Tools import lines
from .Domain import Domain
import numpy as np
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann",
"William Dabney", "Jonathan P. How"]
__license__ = "BSD 3-Clause"
__author__ = ["Josh Joseph", "Alborz Geramifard"]
class MountainCar(Domain):
"""
The goal is to drive an under accelerated car up to the hill.\n
**STATE:** Position and velocity of the car [x, xdot] \n
**ACTIONS:** [Acc backwards, Coast, Acc forward] \n
**TRANSITIONS:** Move along the hill with some noise on the movement. \n
**REWARD:** -1 per step and 0 at or beyond goal (``x-goal > 0``). \n
There is optional noise on vehicle acceleration.
**REFERENCE:**
Based on `RL-Community Java Implementation <http://library.rl-community.org/wiki/Mountain_Car_(Java)>`_
"""
actions_num = 3
state_space_dims = 2
continuous_dims = [0, 1]
XMIN = -1.2 # : Lower bound on domain position
XMAX = 0.6 #: Upper bound on domain position
XDOTMIN = -0.07 # : Lower bound on car velocity
XDOTMAX = 0.07 #: Upper bound on car velocity
INIT_STATE = np.array([-0.5, 0.0]) # : Initial car state
STEP_REWARD = -1 # : Penalty for each step taken before reaching the goal
GOAL_REWARD = 0 #: Reward for reach the goal.
#: X-Position of the goal location (Should be at/near hill peak)
GOAL = .5
actions = [-1, 0, 1]
#: Magnitude of noise (times accelerationFactor) in stochastic velocity changes
noise = 0
accelerationFactor = 0.001 # : Magnitude of acceleration action
gravityFactor = -0.0025
#: Hill peaks are generated as sinusoid; this is freq. of that sinusoid.
hillPeakFrequency = 3.0
#discount_factor = .9
episodeCap = 10000 # : Maximum number of steps before terminating episode
# Used for visual stuff:
domain_fig = None
valueFunction_fig = None
policy_fig = None
actionArrow = None
X_discretization = 20
XDot_discretization = 20
CAR_HEIGHT = .2
CAR_WIDTH = .1
ARROW_LENGTH = .2
def __init__(self, noise=0):
"""
:param noise: Magnitude of noise (times accelerationFactor) in stochastic velocity changes
"""
self.statespace_limits = np.array(
[[self.XMIN, self.XMAX], [self.XDOTMIN, self.XDOTMAX]])
self.Noise = noise
# Visual stuff:
self.xTicks = np.linspace(0, self.X_discretization - 1, 5)
self.xTicksLabels = np.linspace(self.XMIN, self.XMAX, 5)
self.yTicks = np.linspace(0, self.XDot_discretization - 1, 5)
self.yTicksLabels = np.linspace(self.XDOTMIN, self.XDOTMAX, 5)
self.MIN_RETURN = self.STEP_REWARD * \
(1 - self.discount_factor ** self.episodeCap) / \
(1 - self.discount_factor) if self.discount_factor != 1 else self.STEP_REWARD * \
self.episodeCap
self.MAX_RETURN = 0
self.DimNames = ['X', 'Xdot']
super(MountainCar, self).__init__()
def step(self, a):
"""
Take acceleration action *a*, adding noise as specified in ``__init__()``.
"""
position, velocity = self.state
noise = self.accelerationFactor * self.noise * \
2 * (self.random_state.rand() - .5)
velocity += (noise +
self.actions[a] * self.accelerationFactor +
np.cos(self.hillPeakFrequency * position) * self.gravityFactor)
velocity = bound(velocity, self.XDOTMIN, self.XDOTMAX)
position += velocity
position = bound(position, self.XMIN, self.XMAX)
if position <= self.XMIN and velocity < 0:
velocity = 0 # Bump into wall
terminal = self.isTerminal()
r = self.GOAL_REWARD if terminal else self.STEP_REWARD
ns = np.array([position, velocity])
self.state = ns.copy()
return r, ns, terminal, self.possibleActions()
def s0(self):
self.state = self.INIT_STATE.copy()
return self.state.copy(), self.isTerminal(), self.possibleActions()
def isTerminal(self):
"""
:return: ``True`` if the car has reached or exceeded the goal position.
"""
return self.state[0] > self.GOAL
def showDomain(self, a):
s = self.state
# Plot the car and an arrow indicating the direction of accelaration
# Parts of this code was adopted from Jose Antonio Martin H.
# <[email protected]> online source code
pos, vel = s
if self.domain_fig is None: # Need to initialize the figure
self.domain_fig = plt.figure("Mountain Car Domain")
# plot mountain
mountain_x = np.linspace(self.XMIN, self.XMAX, 1000)
mountain_y = np.sin(3 * mountain_x)
plt.gca(
).fill_between(mountain_x,
min(mountain_y) - self.CAR_HEIGHT * 2,
mountain_y,
color='g')
plt.xlim([self.XMIN - .2, self.XMAX])
plt.ylim(
[min(mountain_y) - self.CAR_HEIGHT * 2,
max(mountain_y) + self.CAR_HEIGHT * 2])
# plot car
self.car = lines.Line2D([], [], linewidth=20, color='b', alpha=.8)
plt.gca().add_line(self.car)
# Goal
plt.plot(self.GOAL, np.sin(3 * self.GOAL), 'yd', markersize=10.0)
plt.axis('off')
plt.gca().set_aspect('1')
self.domain_fig = plt.figure("Mountain Car Domain")
#pos = 0
#a = 0
car_middle_x = pos
car_middle_y = np.sin(3 * pos)
slope = np.arctan(3 * np.cos(3 * pos))
car_back_x = car_middle_x - self.CAR_WIDTH * np.cos(slope) / 2.
car_front_x = car_middle_x + self.CAR_WIDTH * np.cos(slope) / 2.
car_back_y = car_middle_y - self.CAR_WIDTH * np.sin(slope) / 2.
car_front_y = car_middle_y + self.CAR_WIDTH * np.sin(slope) / 2.
self.car.set_data([car_back_x, car_front_x], [car_back_y, car_front_y])
# wheels
# plott(x(1)-0.05,sin(3*(x(1)-0.05))+0.06,'ok','markersize',12,'MarkerFaceColor',[.5 .5 .5]);
# plot(x(1)+0.05,sin(3*(x(1)+0.05))+0.06,'ok','markersize',12,'MarkerFaceColor',[.5 .5 .5]);
# Arrows
if self.actionArrow is not None:
self.actionArrow.remove()
self.actionArrow = None
if self.actions[a] > 0:
self.actionArrow = fromAtoB(
car_front_x, car_front_y,
car_front_x + self.ARROW_LENGTH *
np.cos(slope), car_front_y +
self.ARROW_LENGTH * np.sin(slope),
#car_front_x + self.CAR_WIDTH*cos(slope)/2., car_front_y + self.CAR_WIDTH*sin(slope)/2.+self.CAR_HEIGHT,
'k', "arc3,rad=0",
0, 0, 'simple'
)
if self.actions[a] < 0:
self.actionArrow = fromAtoB(
car_back_x, car_back_y,
car_back_x - self.ARROW_LENGTH *
np.cos(slope), car_back_y -
self.ARROW_LENGTH * np.sin(slope),
#car_front_x + self.CAR_WIDTH*cos(slope)/2., car_front_y + self.CAR_WIDTH*sin(slope)/2.+self.CAR_HEIGHT,
'r', "arc3,rad=0",
0, 0, 'simple'
)
plt.draw()
def showLearning(self, representation):
pi = np.zeros(
(self.X_discretization,
self.XDot_discretization),
'uint8')
V = np.zeros((self.X_discretization, self.XDot_discretization))
if self.valueFunction_fig is None:
self.valueFunction_fig = plt.figure("Value Function")
self.valueFunction_im = plt.imshow(
V,
cmap='ValueFunction',
interpolation='nearest',
origin='lower',
vmin=self.MIN_RETURN,
vmax=self.MAX_RETURN)
plt.xticks(self.xTicks, self.xTicksLabels, fontsize=12)
plt.yticks(self.yTicks, self.yTicksLabels, fontsize=12)
plt.xlabel(r"$x$")
plt.ylabel(r"$\dot x$")
self.policy_fig = plt.figure("Policy")
self.policy_im = plt.imshow(
pi,
cmap='MountainCarActions',
interpolation='nearest',
origin='lower',
vmin=0,
vmax=self.actions_num)
plt.xticks(self.xTicks, self.xTicksLabels, fontsize=12)
plt.yticks(self.yTicks, self.yTicksLabels, fontsize=12)
plt.xlabel(r"$x$")
plt.ylabel(r"$\dot x$")
plt.show()
for row, xDot in enumerate(np.linspace(self.XDOTMIN, self.XDOTMAX, self.XDot_discretization)):
for col, x in enumerate(np.linspace(self.XMIN, self.XMAX, self.X_discretization)):
s = np.array([x, xDot])
Qs = representation.Qs(s, False)
As = self.possibleActions()
pi[row, col] = representation.bestAction(s, False, As)
V[row, col] = max(Qs)
self.valueFunction_im.set_data(V)
self.policy_im.set_data(pi)
self.valueFunction_fig = plt.figure("Value Function")
plt.draw()
self.policy_fig = plt.figure("Policy")
plt.draw()
|
|
import unittest2
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.auth_type import AuthType
from consts.event_type import EventType
from consts.media_type import MediaType
from helpers.media_helper import MediaParser
from helpers.suggestions.suggestion_creator import SuggestionCreator
from models.account import Account
from models.event import Event
from models.match import Match
from models.media import Media
from models.suggestion import Suggestion
from models.team import Team
class TestTeamMediaSuggestionCreator(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.account = Account.get_or_insert(
"123",
email="[email protected]",
registered=True)
self.account.put()
def tearDown(self):
self.testbed.deactivate()
def testCreateSuggestion(self):
status, _ = SuggestionCreator.createTeamMediaSuggestion(
self.account.key,
"http://imgur.com/ruRAxDm",
"frc1124",
"2016")
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
suggestion_id = Suggestion.render_media_key_name('2016', 'team', 'frc1124', 'imgur', 'ruRAxDm')
suggestion = Suggestion.get_by_id(suggestion_id)
expected_dict = MediaParser.partial_media_dict_from_url("http://imgur.com/ruRAxDm")
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_PENDING)
self.assertEqual(suggestion.author, self.account.key)
self.assertEqual(suggestion.target_model, 'media')
self.assertDictContainsSubset(expected_dict, suggestion.contents)
def testCreateSuggestionWithUrlParams(self):
status, _ = SuggestionCreator.createTeamMediaSuggestion(
self.account.key,
"https://www.youtube.com/watch?v=VP992UKFbko",
"frc1124",
"2016")
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
suggestion_id = Suggestion.render_media_key_name('2016', 'team', 'frc1124', 'youtube', 'VP992UKFbko')
suggestion = Suggestion.get_by_id(suggestion_id)
expected_dict = MediaParser.partial_media_dict_from_url("https://www.youtube.com/watch?v=VP992UKFbko")
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_PENDING)
self.assertEqual(suggestion.author, self.account.key)
self.assertEqual(suggestion.target_model, 'media')
self.assertDictContainsSubset(expected_dict, suggestion.contents)
def testCleanUrl(self):
status, _ = SuggestionCreator.createTeamMediaSuggestion(
self.account.key,
" http://imgur.com/ruRAxDm?foo=bar#meow ",
"frc1124",
"2016")
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
suggestion_id = Suggestion.render_media_key_name('2016', 'team', 'frc1124', 'imgur', 'ruRAxDm')
suggestion = Suggestion.get_by_id(suggestion_id)
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_PENDING)
self.assertEqual(suggestion.author, self.account.key)
self.assertEqual(suggestion.target_model, 'media')
def testDuplicateSuggestion(self):
suggestion_id = Suggestion.render_media_key_name('2016', 'team', 'frc1124', 'imgur', 'ruRAxDm')
Suggestion(
id=suggestion_id,
author=self.account.key,
review_state=Suggestion.REVIEW_PENDING,
target_key="2012cmp",
target_model="event").put()
status, _ = SuggestionCreator.createTeamMediaSuggestion(
self.account.key,
"http://imgur.com/ruRAxDm",
"frc1124",
"2016")
self.assertEqual(status, 'suggestion_exists')
def testMediaExists(self):
media_id = Media.render_key_name(MediaType.IMGUR, 'ruRAxDm')
Media.get_or_insert(
media_id,
media_type_enum=MediaType.IMGUR,
foreign_key='ruRAxDm',
references=[ndb.Key(Team, 'frc1124')]).put()
status, _ = SuggestionCreator.createTeamMediaSuggestion(
self.account.key,
"http://imgur.com/ruRAxDm",
"frc1124",
"2016")
self.assertEqual(status, 'media_exists')
def testBadUrl(self):
status, _ = SuggestionCreator.createTeamMediaSuggestion(
self.account.key,
"http://foo.com/blah",
"frc1124",
"2016")
self.assertEqual(status, 'bad_url')
class TestEventMediaSuggestionCreator(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.account = Account.get_or_insert(
"123",
email="[email protected]",
registered=True)
self.account.put()
def tearDown(self):
self.testbed.deactivate()
def testCreateSuggestion(self):
status, _ = SuggestionCreator.createEventMediaSuggestion(
self.account.key,
"https://www.youtube.com/watch?v=H-54KMwMKY0",
"2016nyny")
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
suggestion_id = Suggestion.render_media_key_name('2016', 'event', '2016nyny', 'youtube', 'H-54KMwMKY0')
suggestion = Suggestion.get_by_id(suggestion_id)
expected_dict = MediaParser.partial_media_dict_from_url("https://www.youtube.com/watch?v=H-54KMwMKY0")
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_PENDING)
self.assertEqual(suggestion.author, self.account.key)
self.assertEqual(suggestion.target_model, 'event_media')
self.assertDictContainsSubset(expected_dict, suggestion.contents)
def testCreateNonVideoSuggestion(self):
status, _ = SuggestionCreator.createEventMediaSuggestion(
self.account.key,
"http://imgur.com/ruRAxDm",
"2016nyny")
self.assertEqual(status, 'bad_url')
def testDuplicateSuggestion(self):
suggestion_id = Suggestion.render_media_key_name('2016', 'event', '2016nyny', 'youtube', 'H-54KMwMKY0')
Suggestion(
id=suggestion_id,
author=self.account.key,
review_state=Suggestion.REVIEW_PENDING,
target_key="2016nyny",
target_model="event_media").put()
status, _ = SuggestionCreator.createEventMediaSuggestion(
self.account.key,
"https://www.youtube.com/watch?v=H-54KMwMKY0",
"2016nyny")
self.assertEqual(status, 'suggestion_exists')
def testMediaExists(self):
media_id = Media.render_key_name(MediaType.YOUTUBE_VIDEO, 'H-54KMwMKY0')
Media.get_or_insert(
media_id,
media_type_enum=MediaType.YOUTUBE_VIDEO,
foreign_key='H-54KMwMKY0',
references=[ndb.Key(Event, '2016nyny')]).put()
status, _ = SuggestionCreator.createEventMediaSuggestion(
self.account.key,
"https://www.youtube.com/watch?v=H-54KMwMKY0",
"2016nyny")
self.assertEqual(status, 'media_exists')
def testCreateBadUrl(self):
status, _ = SuggestionCreator.createEventMediaSuggestion(
self.account.key,
"http://foobar.com/ruRAxDm",
"2016nyny")
self.assertEqual(status, 'bad_url')
class TestOffseasonEventSuggestionCreator(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.account = Account.get_or_insert(
"123",
email="[email protected]",
registered=True)
self.account.put()
def tearDown(self):
self.testbed.deactivate()
def testCreateSuggestion(self):
status, _ = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"Test Event",
"2016-5-1",
"2016-5-2",
"http://foo.bar.com",
"The Venue",
"123 Fake Street",
"New York", "NY", "USA")
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
suggestions = Suggestion.query().fetch()
self.assertIsNotNone(suggestions)
self.assertEqual(len(suggestions), 1)
suggestion = suggestions[0]
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.contents['name'], "Test Event")
self.assertEqual(suggestion.contents['start_date'], '2016-5-1')
self.assertEqual(suggestion.contents['end_date'], '2016-5-2')
self.assertEqual(suggestion.contents['website'], 'http://foo.bar.com')
self.assertEqual(suggestion.contents['address'], '123 Fake Street')
self.assertEqual(suggestion.contents['city'], 'New York')
self.assertEqual(suggestion.contents['state'], 'NY')
self.assertEqual(suggestion.contents['country'], 'USA')
self.assertEqual(suggestion.contents['venue_name'], 'The Venue')
def testMissingParameters(self):
status, failures = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"",
"2016-5-1",
"2016-5-2",
"http://foo.bar.com",
"The Venue",
"123 Fake Street", "New York", "NY", "USA")
self.assertEqual(status, 'validation_failure')
self.assertTrue('name' in failures)
status, failures = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"Test Event",
"",
"2016-5-2",
"http://foo.bar.com",
"The Venue",
"123 Fake Street", "New York", "NY", "USA")
self.assertEqual(status, 'validation_failure')
self.assertTrue('start_date' in failures)
status, failures = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"Test Event",
"2016-5-1",
"",
"http://foo.bar.com",
"The Venue",
"123 Fake Street", "New York", "NY", "USA")
self.assertEqual(status, 'validation_failure')
self.assertTrue('end_date' in failures)
status, failures = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"Test Event",
"2016-5-1",
"2016-5-2",
"",
"The Venue",
"123 Fake Street", "New York", "NY", "USA")
self.assertEqual(status, 'validation_failure')
self.assertTrue('website' in failures)
status, failures = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"Test Event",
"2016-5-1",
"2016-5-2",
"http://foo.bar.com",
"The Venue",
"", "New York", "NY", "USA")
self.assertEqual(status, 'validation_failure')
self.assertTrue('venue_address' in failures)
status, failures = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"Test Event",
"2016-5-1",
"2016-5-2",
"http://foo.bar.com",
"",
"123 Fake Street", "", "", "")
self.assertEqual(status, 'validation_failure')
self.assertTrue('venue_name' in failures)
self.assertTrue('venue_city' in failures)
self.assertTrue('venue_state' in failures)
self.assertTrue('venue_country' in failures)
def testOutOfOrderDates(self):
status, failures = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"Test Event",
"2016-5-4",
"2016-5-2",
"http://foo.bar.com",
"The Venue",
"123 Fake Street", "New York", "NY", "USA")
self.assertEqual(status, 'validation_failure')
self.assertTrue('end_date' in failures)
def testMalformedDates(self):
status, failures = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"Test Event",
"meow",
"2016-5-2",
"http://foo.bar.com",
"The Venue",
"123 Fake Street", "New York", "NY", "USA")
self.assertEqual(status, 'validation_failure')
self.assertTrue('start_date' in failures)
status, failures = SuggestionCreator.createOffseasonEventSuggestion(
self.account.key,
"Test Event",
"2016-5-1",
"moo",
"http://foo.bar.com",
"The Venue",
"123 Fake Street", "New York", "NY", "USA")
self.assertEqual(status, 'validation_failure')
self.assertTrue('end_date' in failures)
class TestApiWriteSuggestionCreator(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.account = Account.get_or_insert(
"123",
email="[email protected]",
registered=True)
self.account.put()
def tearDown(self):
self.testbed.deactivate()
def testCreateSuggestion(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createApiWriteSuggestion(
self.account.key,
"2016test",
"Event Organizer",
[1, 2, 3])
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
suggestions = Suggestion.query().fetch()
self.assertIsNotNone(suggestions)
self.assertEqual(len(suggestions), 1)
suggestion = suggestions[0]
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.contents['event_key'], "2016test")
self.assertEqual(suggestion.contents['affiliation'], "Event Organizer")
self.assertListEqual(suggestion.contents['auth_types'], [1, 2, 3])
def testOfficialEvent(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.REGIONAL)
event.put()
status = SuggestionCreator.createApiWriteSuggestion(
self.account.key,
"2016test",
"Event Organizer",
[AuthType.MATCH_VIDEO, AuthType.EVENT_MATCHES, AuthType.EVENT_ALLIANCES])
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created with only MATCH_VIDEO permission
suggestions = Suggestion.query().fetch()
self.assertIsNotNone(suggestions)
self.assertEqual(len(suggestions), 1)
suggestion = suggestions[0]
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.contents['event_key'], "2016test")
self.assertEqual(suggestion.contents['affiliation'], "Event Organizer")
self.assertListEqual(suggestion.contents['auth_types'], [AuthType.MATCH_VIDEO])
def testNoEvent(self):
status = SuggestionCreator.createApiWriteSuggestion(
self.account.key,
"2016test",
"Event Organizer",
[1, 2, 3])
self.assertEqual(status, 'bad_event')
def testNoRole(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createApiWriteSuggestion(
self.account.key,
"2016test",
"",
[1, 2, 3])
self.assertEqual(status, 'no_affiliation')
def testUndefinedAuthType(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createApiWriteSuggestion(
self.account.key,
"2016test",
"Event Organizer",
[1, 2, -1, -2]) # -1 and -2 should be filtered out
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
suggestions = Suggestion.query().fetch()
self.assertIsNotNone(suggestions)
self.assertEqual(len(suggestions), 1)
suggestion = suggestions[0]
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.contents['event_key'], "2016test")
self.assertEqual(suggestion.contents['affiliation'], "Event Organizer")
self.assertListEqual(suggestion.contents['auth_types'], [1, 2])
class TestSuggestEventWebcastCreator(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.account = Account.get_or_insert(
"123",
email="[email protected]",
registered=True)
self.account.put()
def tearDown(self):
self.testbed.deactivate()
def testBadEvent(self):
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://twitch.tv/frcgamesense",
"",
"2016test")
self.assertEqual(status, 'bad_event')
def testCreateSuggestion(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://twitch.tv/frcgamesense",
"",
"2016test")
self.assertEqual(status, 'success')
# Ensure the Suggestion gets created
expected_key = "webcast_2016test_twitch_frcgamesense_None"
suggestion = Suggestion.get_by_id(expected_key)
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.target_key, "2016test")
self.assertEqual(suggestion.author, self.account.key)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_PENDING)
self.assertIsNotNone(suggestion.contents)
self.assertEqual(suggestion.contents.get('webcast_url'), "http://twitch.tv/frcgamesense")
self.assertIsNotNone(suggestion.contents.get('webcast_dict'))
def testCleanupUrlWithoutScheme(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"twitch.tv/frcgamesense",
"",
"2016test")
self.assertEqual(status, 'success')
expected_key = "webcast_2016test_twitch_frcgamesense_None"
suggestion = Suggestion.get_by_id(expected_key)
self.assertIsNotNone(suggestion)
self.assertIsNotNone(suggestion.contents)
self.assertIsNotNone(suggestion.contents.get('webcast_dict'))
self.assertEqual(suggestion.contents.get('webcast_url'), "http://twitch.tv/frcgamesense")
def testUnknownUrlScheme(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://myweb.site/somewebcast",
"",
"2016test")
self.assertEqual(status, 'success')
suggestions = Suggestion.query().fetch()
self.assertIsNotNone(suggestions)
self.assertEqual(len(suggestions), 1)
suggestion = suggestions[0]
self.assertIsNotNone(suggestion)
self.assertIsNotNone(suggestion.contents)
self.assertIsNone(suggestion.contents.get('webcast_dict'))
self.assertEqual(suggestion.contents.get('webcast_url'), "http://myweb.site/somewebcast")
def testWebcastAlreadyExists(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016,
event_type_enum=EventType.OFFSEASON,
webcast_json="[{\"type\": \"twitch\", \"channel\": \"frcgamesense\"}]")
event.put()
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://twitch.tv/frcgamesense",
"",
"2016test")
self.assertEqual(status, 'webcast_exists')
def testDuplicateSuggestion(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://twitch.tv/frcgamesense",
"",
"2016test")
self.assertEqual(status, 'success')
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://twitch.tv/frcgamesense",
"",
"2016test")
self.assertEqual(status, 'suggestion_exists')
def testDuplicateUnknownSuggestionType(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://myweb.site/somewebcast",
"",
"2016test")
self.assertEqual(status, 'success')
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://myweb.site/somewebcast",
"",
"2016test")
self.assertEqual(status, 'suggestion_exists')
def testWebcastBadDate(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016,
event_type_enum=EventType.OFFSEASON,
webcast_json="[{\"type\": \"twitch\", \"channel\": \"frcgamesense\"}]")
event.put()
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://twitch.tv/frcgamesense",
"BAD DATE",
"2016test")
self.assertEqual(status, 'invalid_date')
def testWebcastGoodDate(self):
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
status = SuggestionCreator.createEventWebcastSuggestion(
self.account.key,
"http://twitch.tv/frcgamesense",
"2017-02-28",
"2016test")
self.assertEqual(status, 'success')
suggestions = Suggestion.query().fetch()
self.assertIsNotNone(suggestions)
self.assertEqual(len(suggestions), 1)
suggestion = suggestions[0]
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.target_key, "2016test")
self.assertEqual(suggestion.author, self.account.key)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_PENDING)
self.assertIsNotNone(suggestion.contents)
self.assertEqual(suggestion.contents.get('webcast_url'), "http://twitch.tv/frcgamesense")
self.assertIsNotNone(suggestion.contents.get('webcast_dict'))
self.assertEqual(suggestion.contents.get('webcast_date'), "2017-02-28")
class TestSuggestMatchVideoYouTube(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.account = Account.get_or_insert(
"123",
email="[email protected]",
registered=True)
self.account.put()
event = Event(id="2016test", name="Test Event", event_short="Test Event", year=2016, event_type_enum=EventType.OFFSEASON)
event.put()
self.match = Match(id="2016test_f1m1", event=ndb.Key(Event, "2016test"), year=2016, comp_level="f", set_number=1, match_number=1, alliances_json='')
self.match.put()
def tearDown(self):
self.testbed.deactivate()
def testBadMatch(self):
status = SuggestionCreator.createMatchVideoYouTubeSuggestion(self.account.key, "37F5tbrFqJQ", "2016necmp_f1m2")
self.assertEqual(status, 'bad_match')
def testCreateSuggestion(self):
status = SuggestionCreator.createMatchVideoYouTubeSuggestion(self.account.key, "37F5tbrFqJQ", "2016test_f1m1")
self.assertEqual(status, 'success')
suggestion_id = "media_2016_match_2016test_f1m1_youtube_37F5tbrFqJQ"
suggestion = Suggestion.get_by_id(suggestion_id)
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.author, self.account.key)
self.assertEqual(suggestion.target_key, '2016test_f1m1')
self.assertEqual(suggestion.target_model, 'match')
self.assertIsNotNone(suggestion.contents)
self.assertIsNotNone(suggestion.contents.get('youtube_videos'))
self.assertEqual(len(suggestion.contents.get('youtube_videos')), 1)
self.assertEqual(suggestion.contents.get('youtube_videos')[0], "37F5tbrFqJQ")
def testExistingVideo(self):
self.match.youtube_videos = ["37F5tbrFqJQ"]
self.match.put()
status = SuggestionCreator.createMatchVideoYouTubeSuggestion(self.account.key, "37F5tbrFqJQ", "2016test_f1m1")
self.assertEqual(status, 'video_exists')
def testExistingSuggestion(self):
status = SuggestionCreator.createMatchVideoYouTubeSuggestion(self.account.key, "37F5tbrFqJQ", "2016test_f1m1")
self.assertEqual(status, 'success')
status = SuggestionCreator.createMatchVideoYouTubeSuggestion(self.account.key, "37F5tbrFqJQ", "2016test_f1m1")
self.assertEqual(status, 'suggestion_exists')
def testBadYouTubeKey(self):
status = SuggestionCreator.createMatchVideoYouTubeSuggestion(self.account.key, "", "2016test_f1m1")
self.assertEqual(status, 'bad_url')
|
|
import os
import time
import typing
from datetime import datetime
import cauldron
from cauldron import environ
from cauldron.environ import Response
from cauldron.runner import html_file
from cauldron.runner import markdown_file
from cauldron.runner import python_file
from cauldron.runner import redirection
from cauldron.session.projects import Project
from cauldron.session.projects import ProjectStep
from cauldron.session.projects import StopCondition
ERROR_STATUS = 'error'
OK_STATUS = 'ok'
SKIP_STATUS = 'skip'
def get_step(
project: Project,
step: typing.Union[ProjectStep, str]
) -> typing.Union[ProjectStep, None]:
"""
:param project:
:param step:
:return:
"""
if isinstance(step, ProjectStep):
return step
matches = [ps for ps in project.steps if ps.definition.name == step]
return matches[0] if len(matches) > 0 else None
def has_extension(file_path: str, *args: str) -> bool:
"""
Checks to see if the given file path ends with any of the specified file
extensions. If a file extension does not begin with a '.' it will be added
automatically
:param file_path:
The path on which the extensions will be tested for a match
:param args:
One or more extensions to test for a match with the file_path argument
:return:
Whether or not the file_path argument ended with one or more of the
specified extensions
"""
def add_dot(extension):
return (
extension
if extension.startswith('.') else
'.{}'.format(extension)
)
return any([
file_path.endswith(add_dot(extension))
for extension in args
])
def _execute_step(project: Project, step: ProjectStep) -> dict:
if has_extension(step.source_path, 'md'):
return markdown_file.run(project, step)
if has_extension(step.source_path, 'html'):
return html_file.run(project, step)
# Mark the downstream steps as dirty because this one has run
for s in project.steps[(step.index + 1):]:
s.mark_dirty(True)
if has_extension(step.source_path, 'py'):
return python_file.run(project, step)
return {'success': False}
def run_step(
response: Response,
project: Project,
step: typing.Union[ProjectStep, str],
force: bool = False
) -> bool:
"""
:param response:
:param project:
:param step:
:param force:
:return:
"""
step = get_step(project, step)
if step is None:
return False
status = check_status(response, project, step, force)
if status == ERROR_STATUS:
return False
step.error = None
if status == SKIP_STATUS:
return True
os.chdir(os.path.dirname(step.source_path))
project.current_step = step
step.report.clear()
step.dom = None
step.is_visible = True
step.is_running = True
step.progress_message = None
step.progress = 0
step.sub_progress_message = None
step.sub_progress = 0
step.start_time = datetime.utcnow()
step.end_time = None
# Set the top-level display and cache values to the current project values
# before running the step for availability within the step scripts
cauldron.shared = cauldron.project.shared
redirection.enable(step)
try:
result = _execute_step(project, step)
except Exception as error:
result = dict(
success=False,
message='{}'.format(error),
html_message='<pre>{}</pre>'.format(error)
)
step.end_time = datetime.utcnow()
os.chdir(environ.configs.fetch('directory', os.path.expanduser('~')))
step.mark_dirty(not result['success'])
step.error = result.get('html_message')
step.progress = 0
step.progress_message = None
step.dumps(running_override=False)
# Make sure this is called prior to printing response information to the
# console or that will come along for the ride
redirection.disable(step)
step.project.stop_condition = result.get(
'stop_condition',
StopCondition(False, False)
)
if result['success']:
environ.log('[{}]: Updated in {}'.format(
step.definition.name,
step.get_elapsed_timestamp()
))
else:
response.fail(
message='Step execution error',
code='EXECUTION_ERROR',
project=project.kernel_serialize(),
step_name=step.definition.name
).console_raw(result.get('message') or '')
# Update the step timestamps so that the final dom changes
# will be included in interactive display updates.
step.report.update_last_modified()
step.last_modified = time.time()
# Wait until all of the step changes have been applied before
# marking the step as no longer running to help prevent race
# conditions with the dom output and running states when polled
# by the UI or other external source in the main thread.
step.is_running = False
return result['success']
def _check_exists(path: str, retry_count: int = 3) -> bool:
"""
Checks multiple times to see if a file exists, with a bit of
a delay between calls to make sure that any race conditions
will be avoided before making the call.
"""
for index in range(retry_count):
time.sleep(0.05 * min(4, index))
if os.path.exists(path):
return True
return False
def check_status(
response: Response,
project: Project,
step: ProjectStep,
force: bool = False
) -> str:
"""..."""
path = step.source_path
if step.is_muted:
environ.log('[{}]: Muted (skipped)'.format(step.definition.name))
return SKIP_STATUS
if not _check_exists(path):
response.fail(
code='MISSING_SOURCE_FILE',
message='Source file not found "{}"'.format(path),
id=step.definition.name,
path=path
).console(
'[{id}]: Not found "{path}"'.format(
id=step.definition.name,
path=path
)
)
return ERROR_STATUS
if not force and not step.is_dirty():
environ.log('[{}]: Nothing to update'.format(step.definition.name))
return SKIP_STATUS
return OK_STATUS
|
|
XXXXXXXXX XXXXX
XXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX XXXXXXX X XXX XXXXXXXXX X XXXXXX XXX XXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXX X
XXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXX X
XXXXXXXXXXXXX XXXXXXXX
X
X XX
X XX
XXXXXXXXX
XXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXX
XXXXXXXXXXXXXX XXXXXXX XXXXXXXXX XXXXXXXXX X XXXXXX XXX XXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXX
XXXXX XXXX XX XXXXXXXXX X XXXXXX XXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXXXX XXXXXX XX XXXX XXXXXXXXXX XXXX XX XXX XXXXXX XXXXXXXXXX XXX XXXX XXX XXXXXXXXX XXXXX XX
XXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXX X XXXXXXXX XXXXX XX XXXX XXX XXXXXXXXX XXXXXXX XXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX X X XXXXXX XXX XXX XX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX X XXXXXXXX XXXX XXX XX XXXXXXXX XXXXXX XX XXXXXXXX XX XXX XXXXXXXXX
XXXXXXXXXXXXXXXXXXX X XXXXXXXXX XXXXXX XXXX XXXXXXXXX XXXXX XXX XXX XXX XXX XXXXX XXX XXXX X XXX XXXX XX XXXX XXXXXXXX XX XXX X XXXXXX XXXX XXXX XXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXX XXXXXXX XXXXX XXX XXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXX
XXXXXX
XXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXX
XXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXXX
XXX XXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXX
XXXXX
XXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXX
XXXXXX XXXXXXXXXX XXXXX XXXXX XX XXXX XX XXXXXXXXXX XXX XXXXX XXXXX XX XXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXX X
XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXX
X
X XX
X XXXXXXXXX
XXXXX XXXXXXXX XX XXX XXXXX XXXXX XXX XXXXXXXXX XXXXXXXXXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXX XXXX XXXXX XXXXX XX XXX XXX XXXX XXXXX XXXXXXXX XXXXXX XX XXX XXXX XXXXXXXX XX XXXXXXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXX
XXXXXXX XXXXXXX XXXX X XXXXXX XXX XX XXXXXXXXXX XXX XXXXXX XXXX XX XXXXXX XXXX XXX XXXXXXX XXXXX XXXXXXXX XX XXXXX XX XXXXXXXXX XXXXXXX XXX XXXXXX XXX
XXXXXXXXXX XXX XXXX XX XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXX XXXXXXXXX XXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXX XX XXXXXXX XXX XXXXXXX XX XXX XXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXX XXXXX XXXXX XXXX XX XXXXX XXX XXXXXX XXXX XXXX XXX XXXX XXXXXX XX XXXXX XXXXXX XXXX XXXX XXXX XXXXXX XXXXXXXXXXXXX XX XXX XXXXXXXXXX XXXX XX
XXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXXX XXXXXX XXXX XX XXXXXXX XXX XXXXXXXXXXX XXXXXXXXXX XXX XXXX XXXXX XX XXXXX XXXXXX XXXXXX XXXX XXXX XXXX XX XXXX XX XXXXXXX XXXXXX XXXXX XXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXX XXX XX XXXXXXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXXXXXXXX XX XXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXX
XXXXXXXXX
XXXX XXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXX XXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX X XXXXXX XXX XXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX X XXXXXXXXX XXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX X XXXXXXXXX XXXXXX XXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX X XXX XXXXXXXX XX XXXXXXXX XXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXX
XXXXXXXXX XXXXX XX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX XXX XXXX XXXXXXXXXXX XXXXX XXX XXX XXXXXXXXXX XXX XXXXXXXXXXXX
XXXXXXXXXXXXX XXXXX XXX X XXXX XXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXX XXX XXXXXXXXXXXX XX XXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXX XXXXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXX XXXXXXXXXXXXX
XXXXXXXXXX XX XXXXXXXX XXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXXX
XXXXXXX
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.functional import cached_property
from paypal.standard.helpers import duplicate_txn_id, check_secret
from paypal.standard.conf import RECEIVER_EMAIL, POSTBACK_ENDPOINT, SANDBOX_POSTBACK_ENDPOINT
ST_PP_ACTIVE = 'Active'
ST_PP_CANCELLED = 'Cancelled'
ST_PP_CANCELED_REVERSAL = 'Canceled_Reversal'
ST_PP_CLEARED = 'Cleared'
ST_PP_COMPLETED = 'Completed'
ST_PP_CREATED = 'Created'
ST_PP_DENIED = 'Denied'
ST_PP_EXPIRED = 'Expired'
ST_PP_FAILED = 'Failed'
ST_PP_PAID = 'Paid'
ST_PP_PENDING = 'Pending'
ST_PP_PROCESSED = 'Processed'
ST_PP_REFUNDED = 'Refunded'
ST_PP_REFUSED = 'Refused'
ST_PP_REVERSED = 'Reversed'
ST_PP_REWARDED = 'Rewarded'
ST_PP_UNCLAIMED = 'Unclaimed'
ST_PP_UNCLEARED = 'Uncleared'
ST_PP_VOIDED = 'Voided'
try:
from idmapper.models import SharedMemoryModel as Model
except ImportError:
Model = models.Model
class PayPalStandardBase(Model):
"""Meta class for common variables shared by IPN and PDT: http://tinyurl.com/cuq6sj"""
# @@@ Might want to add all these one distant day.
# FLAG_CODE_CHOICES = (
# PAYMENT_STATUS_CHOICES = "Canceled_ Reversal Completed Denied Expired Failed Pending Processed Refunded Reversed Voided".split()
PAYMENT_STATUS_CHOICES = (ST_PP_ACTIVE, ST_PP_CANCELLED, ST_PP_CANCELED_REVERSAL,
ST_PP_CLEARED,
ST_PP_COMPLETED, ST_PP_CREATED, ST_PP_DENIED,
ST_PP_EXPIRED, ST_PP_FAILED, ST_PP_PAID,
ST_PP_PENDING, ST_PP_PROCESSED, ST_PP_REFUNDED,
ST_PP_REFUSED, ST_PP_REVERSED, ST_PP_REWARDED,
ST_PP_UNCLAIMED, ST_PP_UNCLEARED, ST_PP_VOIDED,)
# AUTH_STATUS_CHOICES = "Completed Pending Voided".split()
# ADDRESS_STATUS_CHOICES = "confirmed unconfirmed".split()
# PAYER_STATUS_CHOICES = "verified / unverified".split()
# PAYMENT_TYPE_CHOICES = "echeck / instant.split()
# PENDING_REASON = "address authorization echeck intl multi-currency unilateral upgrade verify other".split()
# REASON_CODE = "chargeback guarantee buyer_complaint refund other".split()
# TRANSACTION_ENTITY_CHOICES = "auth reauth order payment".split()
# Transaction and Notification-Related Variables
business = models.CharField(max_length=127, blank=True, help_text="Email where the money was sent.")
charset = models.CharField(max_length=32, blank=True)
custom = models.CharField(max_length=255, blank=True)
notify_version = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
parent_txn_id = models.CharField("Parent Transaction ID", max_length=19, blank=True)
receiver_email = models.EmailField(max_length=127, blank=True)
receiver_id = models.CharField(max_length=127, blank=True) # 258DLEHY2BDK6
residence_country = models.CharField(max_length=2, blank=True)
test_ipn = models.BooleanField(default=False, blank=True)
txn_id = models.CharField("Transaction ID", max_length=19, blank=True, help_text="PayPal transaction ID.",
db_index=True)
txn_type = models.CharField("Transaction Type", max_length=128, blank=True, help_text="PayPal transaction type.")
verify_sign = models.CharField(max_length=255, blank=True)
# Buyer Information Variables
address_country = models.CharField(max_length=64, blank=True)
address_city = models.CharField(max_length=40, blank=True)
address_country_code = models.CharField(max_length=64, blank=True, help_text="ISO 3166")
address_name = models.CharField(max_length=128, blank=True)
address_state = models.CharField(max_length=40, blank=True)
address_status = models.CharField(max_length=11, blank=True)
address_street = models.CharField(max_length=200, blank=True)
address_zip = models.CharField(max_length=20, blank=True)
contact_phone = models.CharField(max_length=20, blank=True)
first_name = models.CharField(max_length=64, blank=True)
last_name = models.CharField(max_length=64, blank=True)
payer_business_name = models.CharField(max_length=127, blank=True)
payer_email = models.CharField(max_length=127, blank=True)
payer_id = models.CharField(max_length=13, blank=True)
# Payment Information Variables
auth_amount = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
auth_exp = models.CharField(max_length=28, blank=True)
auth_id = models.CharField(max_length=19, blank=True)
auth_status = models.CharField(max_length=9, blank=True)
exchange_rate = models.DecimalField(max_digits=64, decimal_places=16, default=0, blank=True, null=True)
invoice = models.CharField(max_length=127, blank=True)
item_name = models.CharField(max_length=127, blank=True)
item_number = models.CharField(max_length=127, blank=True)
mc_currency = models.CharField(max_length=32, default="USD", blank=True)
mc_fee = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
mc_gross = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
mc_handling = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
mc_shipping = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
memo = models.CharField(max_length=255, blank=True)
num_cart_items = models.IntegerField(blank=True, default=0, null=True)
option_name1 = models.CharField(max_length=64, blank=True)
option_name2 = models.CharField(max_length=64, blank=True)
payer_status = models.CharField(max_length=10, blank=True)
payment_date = models.DateTimeField(blank=True, null=True, help_text="HH:MM:SS DD Mmm YY, YYYY PST")
payment_gross = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
payment_status = models.CharField(max_length=17, blank=True)
payment_type = models.CharField(max_length=7, blank=True)
pending_reason = models.CharField(max_length=14, blank=True)
protection_eligibility = models.CharField(max_length=32, blank=True)
quantity = models.IntegerField(blank=True, default=1, null=True)
reason_code = models.CharField(max_length=15, blank=True)
remaining_settle = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
settle_amount = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
settle_currency = models.CharField(max_length=32, blank=True)
shipping = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
shipping_method = models.CharField(max_length=255, blank=True)
tax = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
transaction_entity = models.CharField(max_length=7, blank=True)
# Auction Variables
auction_buyer_id = models.CharField(max_length=64, blank=True)
auction_closing_date = models.DateTimeField(blank=True, null=True, help_text="HH:MM:SS DD Mmm YY, YYYY PST")
auction_multi_item = models.IntegerField(blank=True, default=0, null=True)
for_auction = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
# Recurring Payments Variables
amount = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
amount_per_cycle = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
initial_payment_amount = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
next_payment_date = models.DateTimeField(blank=True, null=True, help_text="HH:MM:SS DD Mmm YY, YYYY PST")
outstanding_balance = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
payment_cycle = models.CharField(max_length=32, blank=True) #Monthly
period_type = models.CharField(max_length=32, blank=True)
product_name = models.CharField(max_length=128, blank=True)
product_type = models.CharField(max_length=128, blank=True)
profile_status = models.CharField(max_length=32, blank=True)
recurring_payment_id = models.CharField(max_length=128, blank=True) # I-FA4XVST722B9
rp_invoice_id = models.CharField(max_length=127, blank=True) # 1335-7816-2936-1451
time_created = models.DateTimeField(blank=True, null=True, help_text="HH:MM:SS DD Mmm YY, YYYY PST")
# Subscription Variables
amount1 = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
amount2 = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
amount3 = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
mc_amount1 = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
mc_amount2 = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
mc_amount3 = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
password = models.CharField(max_length=24, blank=True)
period1 = models.CharField(max_length=32, blank=True)
period2 = models.CharField(max_length=32, blank=True)
period3 = models.CharField(max_length=32, blank=True)
reattempt = models.CharField(max_length=1, blank=True)
recur_times = models.IntegerField(blank=True, default=0, null=True)
recurring = models.CharField(max_length=1, blank=True)
retry_at = models.DateTimeField(blank=True, null=True, help_text="HH:MM:SS DD Mmm YY, YYYY PST")
subscr_date = models.DateTimeField(blank=True, null=True, help_text="HH:MM:SS DD Mmm YY, YYYY PST")
subscr_effective = models.DateTimeField(blank=True, null=True, help_text="HH:MM:SS DD Mmm YY, YYYY PST")
subscr_id = models.CharField(max_length=19, blank=True)
username = models.CharField(max_length=64, blank=True)
# Dispute Resolution Variables
case_creation_date = models.DateTimeField(blank=True, null=True, help_text="HH:MM:SS DD Mmm YY, YYYY PST")
case_id = models.CharField(max_length=14, blank=True)
case_type = models.CharField(max_length=24, blank=True)
# Variables not categorized
receipt_id = models.CharField(max_length=64, blank=True) # 1335-7816-2936-1451
currency_code = models.CharField(max_length=32, default="USD", blank=True)
handling_amount = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
transaction_subject = models.CharField(max_length=255, blank=True)
# @@@ Mass Pay Variables (Not Implemented, needs a separate model, for each transaction x)
# fraud_managment_pending_filters_x = models.CharField(max_length=255, blank=True)
# option_selection1_x = models.CharField(max_length=200, blank=True)
# option_selection2_x = models.CharField(max_length=200, blank=True)
# masspay_txn_id_x = models.CharField(max_length=19, blank=True)
# mc_currency_x = models.CharField(max_length=32, default="USD", blank=True)
# mc_fee_x = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
# mc_gross_x = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
# mc_handlingx = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
# payment_date = models.DateTimeField(blank=True, null=True, help_text="HH:MM:SS DD Mmm YY, YYYY PST")
# payment_status = models.CharField(max_length=9, blank=True)
# reason_code = models.CharField(max_length=15, blank=True)
# receiver_email_x = models.EmailField(max_length=127, blank=True)
# status_x = models.CharField(max_length=9, blank=True)
# unique_id_x = models.CharField(max_length=13, blank=True)
# Non-PayPal Variables - full IPN/PDT query and time fields.
ipaddress = models.IPAddressField(blank=True)
flag = models.BooleanField(default=False, blank=True)
flag_code = models.CharField(max_length=16, blank=True)
flag_info = models.TextField(blank=True)
query = models.TextField(blank=True) # What Paypal sent to us initially
response = models.TextField(blank=True) # What we got back from our request
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
# Where did it come from?
# from_view = models.CharField(max_length=6, null=True, blank=True)
class Meta:
abstract = True
def __unicode__(self):
if self.is_transaction():
return self.format % ("Transaction", self.txn_id)
else:
return self.format % ("Recurring", self.recurring_payment_id)
@cached_property
def posted_data_dict(self):
"""
All the data that PayPal posted to us, as a correctly parsed dictionary of values.
"""
if not self.query:
return None
from django.http import QueryDict
roughdecode = dict(item.split('=', 1) for item in self.query.split('&'))
encoding = roughdecode.get('charset', None)
if encoding is None:
return None
query = self.query.encode('ascii')
data = QueryDict(query, encoding=encoding)
return data.dict()
def is_transaction(self):
return len(self.txn_id) > 0
def is_refund(self):
return self.payment_status == ST_PP_REFUNDED
def is_reversed(self):
return self.payment_status == ST_PP_REVERSED
def is_recurring(self):
return len(self.recurring_payment_id) > 0
def is_subscription_cancellation(self):
return self.txn_type == "subscr_cancel"
def is_subscription_end_of_term(self):
return self.txn_type == "subscr_eot"
def is_subscription_modified(self):
return self.txn_type == "subscr_modify"
def is_subscription_signup(self):
return self.txn_type == "subscr_signup"
def is_recurring_create(self):
return self.txn_type == "recurring_payment_profile_created"
def is_recurring_payment(self):
return self.txn_type == "recurring_payment"
def is_recurring_cancel(self):
return self.txn_type == "recurring_payment_profile_cancel"
def is_recurring_skipped(self):
return self.txn_type == "recurring_payment_skipped"
def is_recurring_failed(self):
return self.txn_type == "recurring_payment_failed"
def set_flag(self, info, code=None):
"""Sets a flag on the transaction and also sets a reason."""
self.flag = True
self.flag_info += info
if code is not None:
self.flag_code = code
def verify(self, item_check_callable=None):
"""
Verifies an IPN and a PDT.
Checks for obvious signs of weirdness in the payment and flags appropriately.
Provide a callable that takes an instance of this class as a parameter and returns
a tuple (False, None) if the item is valid. Should return (True, "reason") if the
item isn't valid. Strange but backward compatible :) This function should check
that `mc_gross`, `mc_currency` `item_name` and `item_number` are all correct.
"""
self.response = self._postback().decode('ascii')
self._verify_postback()
if not self.flag:
if self.is_transaction():
if self.payment_status not in self.PAYMENT_STATUS_CHOICES:
self.set_flag("Invalid payment_status. (%s)" % self.payment_status)
if duplicate_txn_id(self):
self.set_flag("Duplicate txn_id. (%s)" % self.txn_id)
if self.receiver_email != RECEIVER_EMAIL:
self.set_flag("Invalid receiver_email. (%s)" % self.receiver_email)
if callable(item_check_callable):
flag, reason = item_check_callable(self)
if flag:
self.set_flag(reason)
else:
# @@@ Run a different series of checks on recurring payments.
pass
self.save()
def verify_secret(self, form_instance, secret):
"""Verifies an IPN payment over SSL using EWP."""
if not check_secret(form_instance, secret):
self.set_flag("Invalid secret. (%s)") % secret
self.save()
def get_endpoint(self):
"""Set Sandbox endpoint if the test variable is present."""
if self.test_ipn:
return SANDBOX_POSTBACK_ENDPOINT
else:
return POSTBACK_ENDPOINT
def send_signals(self):
"""Shout for the world to hear whether a txn was successful."""
raise NotImplementedError
def initialize(self, request):
"""Store the data we'll need to make the postback from the request object."""
if request.method == 'GET':
# PDT only - this data is currently unused
self.query = request.META.get('QUERY_STRING', '')
elif request.method == 'POST':
# The following works if paypal sends an ASCII bytestring, which it does.
self.query = request.body.decode('ascii')
self.ipaddress = request.META.get('REMOTE_ADDR', '')
def _postback(self):
"""Perform postback to PayPal and store the response in self.response."""
raise NotImplementedError
def _verify_postback(self):
"""Check self.response is valid andcall self.set_flag if there is an error."""
raise NotImplementedError
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Contains container classes to represent different protocol buffer types.
This file defines container classes which represent categories of protocol
buffer field types which need extra maintenance. Currently these categories
are:
- Repeated scalar fields - These are all repeated fields which aren't
composite (e.g. they are of simple types like int32, string, etc).
- Repeated composite fields - Repeated fields which are composite. This
includes groups and nested messages.
"""
class BaseContainer(object):
"""Base container class."""
__slots__ = ['_message_listener', '_values']
def __init__(self, message_listener):
"""
Args:
message_listener: A MessageListener implementation.
The RepeatedScalarFieldContainer will call this object's
Modified() method when it is modified.
"""
self._message_listener = message_listener
self._values = []
def __getitem__(self, key):
"""Retrieves item by the specified key."""
return self._values[key]
def __len__(self):
"""Returns the number of elements in the container."""
return len(self._values)
def __ne__(self, other):
"""Checks if another instance isn't equal to this one."""
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def __repr__(self):
return repr(self._values)
def sort(self, *args, **kwargs):
if 'sort_function' in kwargs:
kwargs['cmp'] = kwargs.pop('sort_function')
self._values.sort(*args, **kwargs)
class RepeatedScalarFieldContainer(BaseContainer):
"""Simple, type-checked, list-like container for holding repeated scalars."""
__slots__ = ['_type_checker']
def __init__(self, message_listener, type_checker):
"""
Args:
message_listener: A MessageListener implementation.
The RepeatedScalarFieldContainer will call this object's
Modified() method when it is modified.
type_checker: A type_checkers.ValueChecker instance to run on elements
inserted into this container.
"""
super(RepeatedScalarFieldContainer, self).__init__(message_listener)
self._type_checker = type_checker
def append(self, value):
"""Appends an item to the list. Similar to list.append()."""
self._values.append(self._type_checker.CheckValue(value))
if not self._message_listener.dirty:
self._message_listener.Modified()
def insert(self, key, value):
"""Inserts the item at the specified position. Similar to list.insert()."""
self._values.insert(key, self._type_checker.CheckValue(value))
if not self._message_listener.dirty:
self._message_listener.Modified()
def extend(self, elem_seq):
"""Extends by appending the given sequence. Similar to list.extend()."""
if not elem_seq:
return
new_values = []
for elem in elem_seq:
new_values.append(self._type_checker.CheckValue(elem))
self._values.extend(new_values)
self._message_listener.Modified()
def MergeFrom(self, other):
"""Appends the contents of another repeated field of the same type to this
one. We do not check the types of the individual fields.
"""
self._values.extend(other._values)
self._message_listener.Modified()
def remove(self, elem):
"""Removes an item from the list. Similar to list.remove()."""
self._values.remove(elem)
self._message_listener.Modified()
def __setitem__(self, key, value):
"""Sets the item on the specified position."""
self._values[key] = self._type_checker.CheckValue(value)
self._message_listener.Modified()
def __getslice__(self, start, stop):
"""Retrieves the subset of items from between the specified indices."""
return self._values[start:stop]
def __setslice__(self, start, stop, values):
"""Sets the subset of items from between the specified indices."""
new_values = []
for value in values:
new_values.append(self._type_checker.CheckValue(value))
self._values[start:stop] = new_values
self._message_listener.Modified()
def __delitem__(self, key):
"""Deletes the item at the specified position."""
del self._values[key]
self._message_listener.Modified()
def __delslice__(self, start, stop):
"""Deletes the subset of items from between the specified indices."""
del self._values[start:stop]
self._message_listener.Modified()
def __eq__(self, other):
"""Compares the current instance with another one."""
if self is other:
return True
if isinstance(other, self.__class__):
return other._values == self._values
return other == self._values
class RepeatedCompositeFieldContainer(BaseContainer):
"""Simple, list-like container for holding repeated composite fields."""
__slots__ = ['_message_descriptor']
def __init__(self, message_listener, message_descriptor):
"""
Note that we pass in a descriptor instead of the generated directly,
since at the time we construct a _RepeatedCompositeFieldContainer we
haven't yet necessarily initialized the type that will be contained in the
container.
Args:
message_listener: A MessageListener implementation.
The RepeatedCompositeFieldContainer will call this object's
Modified() method when it is modified.
message_descriptor: A Descriptor instance describing the protocol type
that should be present in this container. We'll use the
_concrete_class field of this descriptor when the client calls add().
"""
super(RepeatedCompositeFieldContainer, self).__init__(message_listener)
self._message_descriptor = message_descriptor
def add(self, **kwargs):
"""Adds a new element at the end of the list and returns it. Keyword
arguments may be used to initialize the element.
"""
new_element = self._message_descriptor._concrete_class(**kwargs)
new_element._SetListener(self._message_listener)
self._values.append(new_element)
if not self._message_listener.dirty:
self._message_listener.Modified()
return new_element
def extend(self, elem_seq):
"""Extends by appending the given sequence of elements of the same type
as this one, copying each individual message.
"""
message_class = self._message_descriptor._concrete_class
listener = self._message_listener
values = self._values
for message in elem_seq:
new_element = message_class()
new_element._SetListener(listener)
new_element.MergeFrom(message)
values.append(new_element)
listener.Modified()
def MergeFrom(self, other):
"""Appends the contents of another repeated field of the same type to this
one, copying each individual message.
"""
self.extend(other._values)
def remove(self, elem):
"""Removes an item from the list. Similar to list.remove()."""
self._values.remove(elem)
self._message_listener.Modified()
def __getslice__(self, start, stop):
"""Retrieves the subset of items from between the specified indices."""
return self._values[start:stop]
def __delitem__(self, key):
"""Deletes the item at the specified position."""
del self._values[key]
self._message_listener.Modified()
def __delslice__(self, start, stop):
"""Deletes the subset of items from between the specified indices."""
del self._values[start:stop]
self._message_listener.Modified()
def __eq__(self, other):
"""Compares the current instance with another one."""
if self is other:
return True
if not isinstance(other, self.__class__):
raise TypeError('Can only compare repeated composite fields against '
'other repeated composite fields.')
return self._values == other._values
|
|
# Author: Christopher M. Shymansky <[email protected]>,
# License: ALv2
# Date created: 2016-11-25
# Basic tools
import itertools
# Scalers
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import Binarizer
# Feature selection tools
from sklearn.feature_selection import SelectKBest, f_classif
# Unsupervised learning tools
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.manifold import TSNE
# Classifiers
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import AdaBoostClassifier
# Regression tools
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
class PipelineBuilder(object):
"""
Builds a collection of scikit-learn pipelines based on a combinatorial
schematic.
"""
def build_pipeline_bundle(self, pipeline_bundle_schematic):
"""
Returns a list of scikit-learn pipelines given a pipeline bundle
schematic.
TODO: Create a comprehensive description of the pipeline schematic
The general form of the pipeline bundle schematic is:
pipeline_bundle_schematic = [
step_1,
...
step_n
]
Steps take the form:
step_n = {
'step_n_type': {
'none': {}, # optional, used to not include the step as a permutation
step_n_option_1: {},
}
}
pipeline_bundle_schematic = [
{'step_1_type': {
'none': {}
'step_1': {
'step_1_parameter_1': [step_1_parameter_1_value_1 ... step_1_parameter_1_value_p]
...
'step_1_parameter_2': [step_1_parameter_2_value_1 ... step_1_parameter_2_value_m]
}
}},
...
]
"""
# Get supported scikit-learn objects
sklearn_packages = self.get_supported_sklearn_objects()
# Obtain all corresponding scikit-learn package options with all
# parameter combinations for each step
pipeline_options = []
for step in pipeline_bundle_schematic:
step_name = list(step.keys())[0]
step_options = step[step_name]
step_iterations = []
for step_option, step_parameters in step_options.items():
if step_option != 'none':
# Get the parameter names for the current step option
parameter_names = [parameter_name for parameter_name \
in step_parameters.keys()]
# Obtain scikit-learn object for the step option
if 'sklo' in parameter_names:
# Use user-provided object if they mark one of the
# step-option parameter names as 'sklo' (scikit-learn
# object)
sklearn_object = step_parameters['sklo']
else:
# Use default object if supported
if step_option != 'none':
sklearn_object = sklearn_packages[step_name][step_option]
# Form all parameter combinations for the current step option
parameter_combos = [step_parameters[step_parameter_name] \
for step_parameter_name in parameter_names \
if step_parameter_name != 'sklo']
# Remove 'sklo'
parameter_names = [parameter_name for parameter_name \
in parameter_names \
if parameter_name != 'sklo']
# Form all parameter combinations for current step option
# and form and append step tuple
for parameter_combo in list(itertools.product(*parameter_combos)):
parameter_kwargs = {pair[0]: pair[1] \
for pair in zip(parameter_names,
parameter_combo)}
step_addendum = (step_name,
sklearn_object(**parameter_kwargs))
step_iterations.append(step_addendum)
else:
# Append nothing if the step is to be ignored
step_iterations.append(None)
pipeline_options.append(step_iterations)
# Form all step/parameter permutations and convert to scikit-learn
# pipelines
pipelines = []
for pipeline_skeleton in itertools.product(*pipeline_options):
pipelines.append(Pipeline([step for step in pipeline_skeleton \
if step]))
return pipelines
def get_supported_sklearn_objects(self):
"""
Returns supported scikit-learn estimators, selectors, and transformers
"""
sklearn_packages = {
'feature_selection': {
'select_k_best': SelectKBest
},
'scaler': {
'standard': StandardScaler,
'normal': Normalizer,
'min_max': MinMaxScaler,
'binary': Binarizer
},
'transform': {
'pca': PCA
# 't-sne': pipeline_TSNE(n_components=2, init='pca')
},
'pre_estimator': {
'polynomial_features': PolynomialFeatures
},
'estimator': {
'knn': KNeighborsClassifier,
'logistic_regression': LogisticRegression,
'svm': SVC,
'linear_regression': LinearRegression,
'multilayer_perceptron': MLPClassifier,
'random_forest': RandomForestClassifier,
'adaboost': AdaBoostClassifier
}
}
return sklearn_packages
def get_default_pipeline_step_parameters(self,feature_count):
# Set pre-processing pipeline step parameters
pre_processing_grid_parameters = {
'select_k_best': {
'k': range(1,feature_count+1)
}
}
# Set classifier pipeline step parameters
classifier_grid_parameters = {
'knn': {
'n_neighbors': range(1,31),
'weights': ['uniform','distance']
},
'logistic_regression': {
'C': np.logspace(-10,10,5)
},
'svm': {},
'multilayer_perceptron': {
'hidden_layer_sizes': [[x] for x in range(min(3,feature_count),
max(3,feature_count)+1)]
},
'random_forest': {
'n_estimators': range(90,100)
},
'adaboost': {}
}
# Set regression pipeline step parameters
regression_grid_parameters = {
'polynomial_regression': {
'degree': range(1,5)
}
}
# Return defaults
return pre_processing_grid_parameters,classifier_grid_parameters,regression_grid_parameters
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import date
import tg
from alluratest.tools import assert_equal, assert_in
from allura.lib import helpers as h
from allura.tests import decorators as td
from allura import model as M
from alluratest.controller import TestRestApiBase
from forgeblog import model as BM
class TestBlogApi(TestRestApiBase):
def setUp(self):
super().setUp()
self.setup_with_tools()
@td.with_tool('test', 'Blog', 'blog')
def setup_with_tools(self):
h.set_context('test', 'blog', neighborhood='Projects')
def test_create_post(self):
data = {
'title': 'test',
'text': 'test text',
'state': 'published',
'labels': 'label1, label2'
}
r = self.api_post('/rest/p/test/blog/', **data)
assert_equal(
r.location, 'http://localhost/rest/p/test/blog/%s/%s/test/' %
(date.today().strftime("%Y"), date.today().strftime("%m")))
assert_equal(r.status_int, 201)
url = '/rest' + BM.BlogPost.query.find().first().url()
r = self.api_get('/rest/p/test/blog/')
assert_equal(r.json['posts'][0]['title'], 'test')
assert_in(url, r.json['posts'][0]['url'])
r = self.api_get(url)
assert_equal(r.json['title'], data['title'])
assert_equal(r.json['text'], data['text'])
assert_equal(r.json['author'], 'test-admin')
assert_equal(r.json['state'], data['state'])
assert_equal(r.json['labels'], data['labels'].split(','))
def test_update_post(self):
data = {
'title': 'test',
'text': 'test text',
'state': 'published',
'labels': 'label1, label2'
}
r = self.api_post('/rest/p/test/blog/', **data)
assert_equal(r.status_int, 201)
url = '/rest' + BM.BlogPost.query.find().first().url()
data = {
'text': 'test text2',
'state': 'draft',
'labels': 'label3'
}
self.api_post(url, **data)
r = self.api_get(url)
assert_equal(r.json['title'], 'test')
assert_equal(r.json['text'], data['text'])
assert_equal(r.json['state'], data['state'])
assert_equal(r.json['labels'], data['labels'].split(','))
def test_delete_post(self):
data = {
'title': 'test',
'state': 'published',
'labels': 'label1, label2'
}
r = self.api_post('/rest/p/test/blog/', **data)
assert_equal(r.status_int, 201)
url = '/rest' + BM.BlogPost.query.find().first().url()
self.api_post(url, delete='')
r = self.api_get(url, status=404)
def test_post_does_not_exist(self):
r = self.api_get('/rest/p/test/blog/2013/07/fake/', status=404)
def test_read_permissons(self):
self.api_post('/rest/p/test/blog/', title='test',
text='test text', state='published')
self.app.get('/rest/p/test/blog/',
extra_environ={'username': '*anonymous'}, status=200)
p = M.Project.query.get(shortname='test')
acl = p.app_instance('blog').config.acl
anon = M.ProjectRole.by_name('*anonymous')._id
anon_read = M.ACE.allow(anon, 'read')
acl.remove(anon_read)
self.app.get('/rest/p/test/blog/',
extra_environ={'username': '*anonymous'},
status=401)
def test_new_post_permissons(self):
self.app.post('/rest/p/test/blog/',
params=dict(title='test', text='test text',
state='published'),
extra_environ={'username': '*anonymous'},
status=401)
p = M.Project.query.get(shortname='test')
acl = p.app_instance('blog').config.acl
anon = M.ProjectRole.by_name('*anonymous')._id
anon_write = M.ACE.allow(anon, 'write')
acl.append(anon_write)
self.app.post('/rest/p/test/blog/',
params=dict(title='test', text='test text',
state='published'),
extra_environ={'username': '*anonymous'},
status=201)
def test_update_post_permissons(self):
self.api_post('/rest/p/test/blog/', title='test',
text='test text', state='published')
url = '/rest' + BM.BlogPost.query.find().first().url()
self.app.post(url,
params=dict(title='test2', text='test text2',
state='published'),
extra_environ={'username': '*anonymous'},
status=401)
p = M.Project.query.get(shortname='test')
acl = p.app_instance('blog').config.acl
anon = M.ProjectRole.by_name('*anonymous')._id
anon_write = M.ACE.allow(anon, 'write')
acl.append(anon_write)
self.app.post(url,
params=dict(title='test2', text='test text2',
state='published'),
extra_environ={'username': '*anonymous'},
status=200)
r = self.api_get(url)
assert_equal(r.json['title'], 'test2')
assert_equal(r.json['text'], 'test text2')
assert_equal(r.json['state'], 'published')
def test_permission_draft_post(self):
self.api_post('/rest/p/test/blog/', title='test',
text='test text', state='draft')
r = self.app.get('/rest/p/test/blog/',
extra_environ={'username': '*anonymous'})
assert_equal(r.json['posts'], [])
url = '/rest' + BM.BlogPost.query.find().first().url()
self.app.post(url,
params=dict(title='test2', text='test text2',
state='published'),
extra_environ={'username': '*anonymous'},
status=401)
p = M.Project.query.get(shortname='test')
acl = p.app_instance('blog').config.acl
anon = M.ProjectRole.by_name('*anonymous')._id
anon_write = M.ACE.allow(anon, 'write')
acl.append(anon_write)
r = self.app.get('/rest/p/test/blog/',
extra_environ={'username': '*anonymous'})
assert_equal(r.json['posts'][0]['title'], 'test')
def test_draft_post(self):
self.api_post('/rest/p/test/blog/', title='test',
text='test text', state='draft')
r = self.app.get('/rest/p/test/blog/',
extra_environ={'username': '*anonymous'})
assert_equal(r.json['posts'], [])
url = '/rest' + BM.BlogPost.query.find().first().url()
self.api_post(url, state='published')
r = self.app.get('/rest/p/test/blog/',
extra_environ={'username': '*anonymous'})
assert_equal(r.json['posts'][0]['title'], 'test')
def test_pagination(self):
self.api_post('/rest/p/test/blog/', title='test1',
text='test text1', state='published')
self.api_post('/rest/p/test/blog/', title='test2',
text='test text2', state='published')
self.api_post('/rest/p/test/blog/', title='test3',
text='test text3', state='published')
r = self.api_get('/rest/p/test/blog/', limit='1', page='0')
assert_equal(r.json['posts'][0]['title'], 'test3')
assert_equal(len(r.json['posts']), 1)
assert_equal(r.json['count'], 3)
assert_equal(r.json['limit'], 1)
assert_equal(r.json['page'], 0)
r = self.api_get('/rest/p/test/blog/', limit='2', page='0')
assert_equal(r.json['posts'][0]['title'], 'test3')
assert_equal(r.json['posts'][1]['title'], 'test2')
assert_equal(len(r.json['posts']), 2)
assert_equal(r.json['count'], 3)
assert_equal(r.json['limit'], 2)
assert_equal(r.json['page'], 0)
r = self.api_get('/rest/p/test/blog/', limit='1', page='2')
assert_equal(r.json['posts'][0]['title'], 'test1')
assert_equal(r.json['count'], 3)
assert_equal(r.json['limit'], 1)
assert_equal(r.json['page'], 2)
def test_has_access_no_params(self):
self.api_get('/rest/p/test/blog/has_access', status=404)
self.api_get('/rest/p/test/blog/has_access?user=root', status=404)
self.api_get('/rest/p/test/blog/has_access?perm=read', status=404)
def test_has_access_unknown_params(self):
"""Unknown user and/or permission always False for has_access API"""
r = self.api_get(
'/rest/p/test/blog/has_access?user=babadook&perm=read',
user='root')
assert_equal(r.status_int, 200)
assert_equal(r.json['result'], False)
r = self.api_get(
'/rest/p/test/blog/has_access?user=test-user&perm=jump',
user='root')
assert_equal(r.status_int, 200)
assert_equal(r.json['result'], False)
def test_has_access_not_admin(self):
"""
User which has no 'admin' permission on neighborhood can't use
has_access API
"""
self.api_get(
'/rest/p/test/blog/has_access?user=test-admin&perm=admin',
user='test-user',
status=403)
def test_has_access(self):
r = self.api_get(
'/rest/p/test/blog/has_access?user=test-admin&perm=post&access_token=ABCDEF',
user='root')
assert_equal(r.status_int, 200)
assert_equal(r.json['result'], True)
r = self.api_get(
'/rest/p/test/blog/has_access?user=*anonymous&perm=admin',
user='root')
assert_equal(r.status_int, 200)
assert_equal(r.json['result'], False)
def test_create_post_limit_by_project(self):
data = {
'title': 'test against limit',
'text': 'test text',
'state': 'published',
'labels': 'label1, label2'
}
# Set rate limit to 0 in first hour of project
with h.push_config(tg.config, **{'forgeblog.rate_limits': '{"3600": 0}'}):
self.api_post('/rest/p/test/blog/', status=429, **data)
def test_edit_post_limit_by_user(self):
data = {
'title': 'test abc',
'text': 'test text',
'state': 'published',
'labels': 'label1, label2'
}
self.api_post('/rest/p/test/blog/', status=201, **data)
url = '/rest' + BM.BlogPost.query.find().first().url()
data = {
'text': 'test xyz',
'state': 'published',
'labels': 'label3'
}
# Set rate limit to 1 in first hour of user
with h.push_config(tg.config, **{'forgeblog.rate_limits_per_user': '{"3600": 1}'}):
self.api_post(url, status=429, **data)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""File IO methods that wrap the C++ FileSystem API.
The C++ FileSystem API is SWIG wrapped in file_io.i. These functions call those
to accomplish basic File IO operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import binascii
import os
import uuid
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import errors
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# A good default block size depends on the system in question.
# A somewhat conservative default chosen here.
_DEFAULT_BLOCK_SIZE = 16 * 1024 * 1024
class FileIO(object):
"""FileIO class that exposes methods to read / write to / from files.
The constructor takes the following arguments:
name: name of the file
mode: one of 'r', 'w', 'a', 'r+', 'w+', 'a+'. Append 'b' for bytes mode.
Can be used as an iterator to iterate over lines in the file.
The default buffer size used for the BufferedInputStream used for reading
the file line by line is 1024 * 512 bytes.
"""
def __init__(self, name, mode):
self.__name = name
self.__mode = mode
self._read_buf = None
self._writable_file = None
self._binary_mode = "b" in mode
mode = mode.replace("b", "")
if mode not in ("r", "w", "a", "r+", "w+", "a+"):
raise errors.InvalidArgumentError(
None, None, "mode is not 'r' or 'w' or 'a' or 'r+' or 'w+' or 'a+'")
self._read_check_passed = mode in ("r", "r+", "a+", "w+")
self._write_check_passed = mode in ("a", "w", "r+", "a+", "w+")
@property
def name(self):
"""Returns the file name."""
return self.__name
@property
def mode(self):
"""Returns the mode in which the file was opened."""
return self.__mode
def _preread_check(self):
if not self._read_buf:
if not self._read_check_passed:
raise errors.PermissionDeniedError(None, None,
"File isn't open for reading")
with errors.raise_exception_on_not_ok_status() as status:
self._read_buf = pywrap_tensorflow.CreateBufferedInputStream(
compat.as_bytes(self.__name), 1024 * 512, status)
def _prewrite_check(self):
if not self._writable_file:
if not self._write_check_passed:
raise errors.PermissionDeniedError(None, None,
"File isn't open for writing")
with errors.raise_exception_on_not_ok_status() as status:
self._writable_file = pywrap_tensorflow.CreateWritableFile(
compat.as_bytes(self.__name), compat.as_bytes(self.__mode), status)
def _prepare_value(self, val):
if self._binary_mode:
return compat.as_bytes(val)
else:
return compat.as_str_any(val)
def size(self):
"""Returns the size of the file."""
return stat(self.__name).length
def write(self, file_content):
"""Writes file_content to the file. Appends to the end of the file."""
self._prewrite_check()
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.AppendToFile(
compat.as_bytes(file_content), self._writable_file, status)
def read(self, n=-1):
"""Returns the contents of a file as a string.
Starts reading from current position in file.
Args:
n: Read 'n' bytes if n != -1. If n = -1, reads to end of file.
Returns:
'n' bytes of the file (or whole file) in bytes mode or 'n' bytes of the
string if in string (regular) mode.
"""
self._preread_check()
with errors.raise_exception_on_not_ok_status() as status:
if n == -1:
length = self.size() - self.tell()
else:
length = n
return self._prepare_value(
pywrap_tensorflow.ReadFromStream(self._read_buf, length, status))
@deprecation.deprecated_args(
None,
"position is deprecated in favor of the offset argument.",
"position")
def seek(self, offset=None, whence=0, position=None):
# TODO(jhseu): Delete later. Used to omit `position` from docs.
# pylint: disable=g-doc-args
"""Seeks to the offset in the file.
Args:
offset: The byte count relative to the whence argument.
whence: Valid values for whence are:
0: start of the file (default)
1: relative to the current position of the file
2: relative to the end of file. offset is usually negative.
"""
# pylint: enable=g-doc-args
self._preread_check()
# We needed to make offset a keyword argument for backwards-compatibility.
# This check exists so that we can convert back to having offset be a
# positional argument.
# TODO(jhseu): Make `offset` a positional argument after `position` is
# deleted.
if offset is None and position is None:
raise TypeError("seek(): offset argument required")
if offset is not None and position is not None:
raise TypeError("seek(): offset and position may not be set "
"simultaneously.")
if position is not None:
offset = position
with errors.raise_exception_on_not_ok_status() as status:
if whence == 0:
pass
elif whence == 1:
offset += self.tell()
elif whence == 2:
offset += self.size()
else:
raise errors.InvalidArgumentError(
None, None,
"Invalid whence argument: {}. Valid values are 0, 1, or 2."
.format(whence))
ret_status = self._read_buf.Seek(offset)
pywrap_tensorflow.Set_TF_Status_from_Status(status, ret_status)
def readline(self):
r"""Reads the next line from the file. Leaves the '\n' at the end."""
self._preread_check()
return self._prepare_value(self._read_buf.ReadLineAsString())
def readlines(self):
"""Returns all lines from the file in a list."""
self._preread_check()
lines = []
while True:
s = self.readline()
if not s:
break
lines.append(s)
return lines
def tell(self):
"""Returns the current position in the file."""
self._preread_check()
return self._read_buf.Tell()
def __enter__(self):
"""Make usable with "with" statement."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make usable with "with" statement."""
self.close()
def __iter__(self):
return self
def next(self):
retval = self.readline()
if not retval:
raise StopIteration()
return retval
def __next__(self):
return self.next()
def flush(self):
"""Flushes the Writable file.
This only ensures that the data has made its way out of the process without
any guarantees on whether it's written to disk. This means that the
data would survive an application crash but not necessarily an OS crash.
"""
if self._writable_file:
with errors.raise_exception_on_not_ok_status() as status:
ret_status = self._writable_file.Flush()
pywrap_tensorflow.Set_TF_Status_from_Status(status, ret_status)
def close(self):
"""Closes FileIO. Should be called for the WritableFile to be flushed."""
self._read_buf = None
if self._writable_file:
with errors.raise_exception_on_not_ok_status() as status:
ret_status = self._writable_file.Close()
pywrap_tensorflow.Set_TF_Status_from_Status(status, ret_status)
self._writable_file = None
@tf_export(v1=["gfile.Exists"])
def file_exists(filename):
"""Determines whether a path exists or not.
Args:
filename: string, a path
Returns:
True if the path exists, whether its a file or a directory.
False if the path does not exist and there are no filesystem errors.
Raises:
errors.OpError: Propagates any errors reported by the FileSystem API.
"""
return file_exists_v2(filename)
@tf_export("io.gfile.exists", v1=[])
def file_exists_v2(path):
"""Determines whether a path exists or not.
Args:
path: string, a path
Returns:
True if the path exists, whether its a file or a directory.
False if the path does not exist and there are no filesystem errors.
Raises:
errors.OpError: Propagates any errors reported by the FileSystem API.
"""
try:
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.FileExists(compat.as_bytes(path), status)
except errors.NotFoundError:
return False
return True
@tf_export("gfile.Remove")
def delete_file(filename):
"""Deletes the file located at 'filename'.
Args:
filename: string, a filename
Raises:
errors.OpError: Propagates any errors reported by the FileSystem API. E.g.,
NotFoundError if the file does not exist.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.DeleteFile(compat.as_bytes(filename), status)
def read_file_to_string(filename, binary_mode=False):
"""Reads the entire contents of a file to a string.
Args:
filename: string, path to a file
binary_mode: whether to open the file in binary mode or not. This changes
the type of the object returned.
Returns:
contents of the file as a string or bytes.
Raises:
errors.OpError: Raises variety of errors that are subtypes e.g.
NotFoundError etc.
"""
if binary_mode:
f = FileIO(filename, mode="rb")
else:
f = FileIO(filename, mode="r")
return f.read()
def write_string_to_file(filename, file_content):
"""Writes a string to a given file.
Args:
filename: string, path to a file
file_content: string, contents that need to be written to the file
Raises:
errors.OpError: If there are errors during the operation.
"""
with FileIO(filename, mode="w") as f:
f.write(file_content)
@tf_export("gfile.Glob")
def get_matching_files(filename):
"""Returns a list of files that match the given pattern(s).
Args:
filename: string or iterable of strings. The glob pattern(s).
Returns:
A list of strings containing filenames that match the given pattern(s).
Raises:
errors.OpError: If there are filesystem / directory listing errors.
"""
with errors.raise_exception_on_not_ok_status() as status:
if isinstance(filename, six.string_types):
return [
# Convert the filenames to string from bytes.
compat.as_str_any(matching_filename)
for matching_filename in pywrap_tensorflow.GetMatchingFiles(
compat.as_bytes(filename), status)
]
else:
return [
# Convert the filenames to string from bytes.
compat.as_str_any(matching_filename)
for single_filename in filename
for matching_filename in pywrap_tensorflow.GetMatchingFiles(
compat.as_bytes(single_filename), status)
]
@tf_export("gfile.MkDir")
def create_dir(dirname):
"""Creates a directory with the name 'dirname'.
Args:
dirname: string, name of the directory to be created
Notes:
The parent directories need to exist. Use recursive_create_dir instead if
there is the possibility that the parent dirs don't exist.
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.CreateDir(compat.as_bytes(dirname), status)
@tf_export("gfile.MakeDirs")
def recursive_create_dir(dirname):
"""Creates a directory and all parent/intermediate directories.
It succeeds if dirname already exists and is writable.
Args:
dirname: string, name of the directory to be created
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.RecursivelyCreateDir(compat.as_bytes(dirname), status)
@tf_export("gfile.Copy")
def copy(oldpath, newpath, overwrite=False):
"""Copies data from oldpath to newpath.
Args:
oldpath: string, name of the file who's contents need to be copied
newpath: string, name of the file to which to copy to
overwrite: boolean, if false its an error for newpath to be occupied by an
existing file.
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.CopyFile(
compat.as_bytes(oldpath), compat.as_bytes(newpath), overwrite, status)
@tf_export("gfile.Rename")
def rename(oldname, newname, overwrite=False):
"""Rename or move a file / directory.
Args:
oldname: string, pathname for a file
newname: string, pathname to which the file needs to be moved
overwrite: boolean, if false it's an error for `newname` to be occupied by
an existing file.
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.RenameFile(
compat.as_bytes(oldname), compat.as_bytes(newname), overwrite, status)
def atomic_write_string_to_file(filename, contents, overwrite=True):
"""Writes to `filename` atomically.
This means that when `filename` appears in the filesystem, it will contain
all of `contents`. With write_string_to_file, it is possible for the file
to appear in the filesystem with `contents` only partially written.
Accomplished by writing to a temp file and then renaming it.
Args:
filename: string, pathname for a file
contents: string, contents that need to be written to the file
overwrite: boolean, if false it's an error for `filename` to be occupied by
an existing file.
"""
temp_pathname = filename + ".tmp" + uuid.uuid4().hex
write_string_to_file(temp_pathname, contents)
try:
rename(temp_pathname, filename, overwrite)
except errors.OpError:
delete_file(temp_pathname)
raise
@tf_export("gfile.DeleteRecursively")
def delete_recursively(dirname):
"""Deletes everything under dirname recursively.
Args:
dirname: string, a path to a directory
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.DeleteRecursively(compat.as_bytes(dirname), status)
@tf_export("gfile.IsDirectory")
def is_directory(dirname):
"""Returns whether the path is a directory or not.
Args:
dirname: string, path to a potential directory
Returns:
True, if the path is a directory; False otherwise
"""
status = c_api_util.ScopedTFStatus()
return pywrap_tensorflow.IsDirectory(compat.as_bytes(dirname), status)
@tf_export("gfile.ListDirectory")
def list_directory(dirname):
"""Returns a list of entries contained within a directory.
The list is in arbitrary order. It does not contain the special entries "."
and "..".
Args:
dirname: string, path to a directory
Returns:
[filename1, filename2, ... filenameN] as strings
Raises:
errors.NotFoundError if directory doesn't exist
"""
if not is_directory(dirname):
raise errors.NotFoundError(None, None, "Could not find directory")
with errors.raise_exception_on_not_ok_status() as status:
# Convert each element to string, since the return values of the
# vector of string should be interpreted as strings, not bytes.
return [
compat.as_str_any(filename)
for filename in pywrap_tensorflow.GetChildren(
compat.as_bytes(dirname), status)
]
@tf_export("gfile.Walk")
def walk(top, in_order=True):
"""Recursive directory tree generator for directories.
Args:
top: string, a Directory name
in_order: bool, Traverse in order if True, post order if False.
Errors that happen while listing directories are ignored.
Yields:
Each yield is a 3-tuple: the pathname of a directory, followed by lists of
all its subdirectories and leaf files.
(dirname, [subdirname, subdirname, ...], [filename, filename, ...])
as strings
"""
top = compat.as_str_any(top)
try:
listing = list_directory(top)
except errors.NotFoundError:
return
files = []
subdirs = []
for item in listing:
full_path = os.path.join(top, item)
if is_directory(full_path):
subdirs.append(item)
else:
files.append(item)
here = (top, subdirs, files)
if in_order:
yield here
for subdir in subdirs:
for subitem in walk(os.path.join(top, subdir), in_order):
yield subitem
if not in_order:
yield here
@tf_export("gfile.Stat")
def stat(filename):
"""Returns file statistics for a given path.
Args:
filename: string, path to a file
Returns:
FileStatistics struct that contains information about the path
Raises:
errors.OpError: If the operation fails.
"""
file_statistics = pywrap_tensorflow.FileStatistics()
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.Stat(compat.as_bytes(filename), file_statistics, status)
return file_statistics
def filecmp(filename_a, filename_b):
"""Compare two files, returning True if they are the same, False otherwise.
We check size first and return False quickly if the files are different sizes.
If they are the same size, we continue to generating a crc for the whole file.
You might wonder: why not use Python's filecmp.cmp() instead? The answer is
that the builtin library is not robust to the many different filesystems
TensorFlow runs on, and so we here perform a similar comparison with
the more robust FileIO.
Args:
filename_a: string path to the first file.
filename_b: string path to the second file.
Returns:
True if the files are the same, False otherwise.
"""
size_a = FileIO(filename_a, "rb").size()
size_b = FileIO(filename_b, "rb").size()
if size_a != size_b:
return False
# Size is the same. Do a full check.
crc_a = file_crc32(filename_a)
crc_b = file_crc32(filename_b)
return crc_a == crc_b
def file_crc32(filename, block_size=_DEFAULT_BLOCK_SIZE):
"""Get the crc32 of the passed file.
The crc32 of a file can be used for error checking; two files with the same
crc32 are considered equivalent. Note that the entire file must be read
to produce the crc32.
Args:
filename: string, path to a file
block_size: Integer, process the files by reading blocks of `block_size`
bytes. Use -1 to read the file as once.
Returns:
hexadecimal as string, the crc32 of the passed file.
"""
crc = 0
with FileIO(filename, mode="rb") as f:
chunk = f.read(n=block_size)
while chunk:
crc = binascii.crc32(chunk, crc)
chunk = f.read(n=block_size)
return hex(crc & 0xFFFFFFFF)
|
|
'''
Created on Jun 22, 2017
opyright 2017, Institute for Systems Biology.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: michael
'''
from datetime import date, datetime
from logging import getLogger
from json import dumps
from time import sleep
import traceback
from bq_wrapper import fetch_paged_results, query_bq_table
from gdc.test.test_setup import GDCTestSetup
from gdc.util.ThreadLauncher import launch_threads
from gdc.util.gdc_util import request
from isbcgc_cloudsql_model import ISBCGC_database_helper
from util import create_log
class CCLE_datasets:
def bio_sql2bq(self):
return {
'CCLE_metadata_clinical': ['[CCLE_bioclin_v0.clinical_v0]', 'case_barcode', None],
'CCLE_metadata_biospecimen': [None, 'case_barcode', 'sample_barcode']
}
def gcs_datasets(self):
return {
"Aligned_Reads": ["Aligned reads"]
}
def bq_datasets(self):
return {
}
class TARGET_datasets:
def bio_sql2bq(self):
return {
'TARGET_metadata_clinical': ['[isb-cgc:TARGET_bioclin_v0.Clinical]', 'case_barcode', None],
'TARGET_metadata_biospecimen': ['[isb-cgc:TARGET_bioclin_v0.Biospecimen]', 'case_barcode', 'sample_barcode']
}
def gcs_datasets(self):
return {
"Aligned_Reads": ["Aligned reads", "Aligned Reads"]
}
# isb_label, bq table, sample_barcode
def bq_datasets(self):
return {
# "miRNA Expression Quantification": [
# "miRNA_Gene_Quantification",
# "[isb-cgc:TARGET_hg38_data_v0.miRNAseq_Expression]",
# "sample_barcode",
# True
# ],
# "Isoform Expression Quantification": [
# "miRNA_Isoform_Quantification",
# "[isb-cgc:TARGET_hg38_data_v0.miRNAseq_Isoform_Expression]",
# "sample_barcode",
# True
# ],
"miRNA Expression Quantification": [
"miRNA_Gene_Quantification",
"[isb-cgc:test.TARGET_miRNAExpressionQuantification_HG38_170828]",
"sample_barcode",
True
],
"Isoform Expression Quantification": [
"miRNA_Isoform_Quantification",
"[isb-cgc:test.TARGET_miRNAIsoformQuantification_HG38]",
"sample_barcode",
True
],
# "Gene Expression Quantification": [
# "mRNA_Gene_Quantification",
# "[isb-cgc:TARGET_hg38_data_v0.RNAseq_Gene_Expression]",
# "sample_barcode",
# True
# ]
}
class TCGA_datasets:
def bio_sql2bq(self):
return {
'TCGA_metadata_annotation': ['[isb-cgc:TCGA_bioclin_v0.Annotations]', 'case_barcode', None],
'TCGA_metadata_clinical': ['[isb-cgc:TCGA_bioclin_v0.Clinical]', 'case_barcode', None],
'TCGA_metadata_biospecimen': ['[isb-cgc:TCGA_bioclin_v0.Biospecimen]', 'case_barcode', 'sample_barcode']
}
def gcs_datasets(self):
return {
"Aligned_Reads": ["Aligned reads", "Aligned Reads"],
"Pathology_Image": ["Diagnostic image", "Tissue slide image"],
"Genotypes": ["Genotypes"],
"DNA_Variation_VCF": ["Simple nucleotide variation"]
}
def bq_datasets(self):
return {
# "Copy number segmentation": [
# "Copy_Number_Segment_Masked",
# "[isb-cgc:TCGA_hg19_data_v0.Copy_Number_Segment_Masked]",
# "sample_barcode",
# False
# ],
# "Methylation beta value": [
# "DNA_Methylation_Beta",
# "[isb-cgc:TCGA_hg19_data_v0.DNA_Methylation]",
# "sample_barcode",
# False
# ],
# "miRNA gene quantification": [
# "miRNA_Gene_Quantification",
# "[isb-cgc:TCGA_hg19_data_v0.miRNAseq_Expression]",
# "sample_barcode",
# True
# ],
# "miRNA isoform quantification": [
# "miRNA_Isoform_Quantification",
# "[isb-cgc:TCGA_hg19_data_v0.miRNAseq_Isoform_Expression]",
# "sample_barcode",
# True
# ],
"miRNA gene quantification": [
"miRNA_Gene_Quantification",
"[isb-cgc:test.TCGA_miRNAExpressionQuantification_HG38_170828]",
"sample_barcode",
True
],
"miRNA isoform quantification": [
"miRNA_Isoform_Quantification",
"[isb-cgc:test.TCGA_miRNAIsoformQuantification_HG38_170828]",
"sample_barcode",
True
],
"Gene expression quantification": [
"mRNA_Gene_Quantification",
"[isb-cgc:TCGA_hg19_data_v0.RNAseq_Gene_Expression_UNC_RSEM]",
"sample_barcode",
False
],
"Protein expression quantification": [
"Protein_Quantification",
"[isb-cgc:TCGA_hg19_data_v0.Protein_Expression]",
"sample_barcode",
False
],
"Simple somatic mutation": [
"Somatic_Mutation",
["[isb-cgc:TCGA_hg19_data_v0.Somatic_Mutation_DCC]", "[isb-cgc:TCGA_hg19_data_v0.Somatic_Mutation_MC3]"],
["sample_barcode_tumor", "sample_barcode_normal"],
False
],
"Masked Copy Number Segment": [
"Copy_Number_Segment_Masked",
"[isb-cgc:TCGA_hg38_data_v0.Copy_Number_Segment_Masked]",
"sample_barcode",
True
],
"Methylation Beta Value": [
"DNA_Methylation_Beta",
"[isb-cgc:TCGA_hg38_data_v0.DNA_Methylation]",
"sample_barcode",
True
],
"miRNA Expression Quantification": [
"miRNA_Gene_Quantification",
"[isb-cgc:TCGA_hg38_data_v0.miRNAseq_Expression]",
"sample_barcode",
True
],
"Isoform Expression Quantification": [
"miRNA_Isoform_Quantification",
"[isb-cgc:TCGA_hg38_data_v0.miRNAseq_Isoform_Expression]",
"sample_barcode",
True
],
"Gene Expression Quantification": [
"mRNA_Gene_Quantification",
"[isb-cgc:TCGA_hg38_data_v0.RNAseq_Gene_Expression]",
"sample_barcode",
True
],
"Protein expression quantification": [
"Protein_Quantification",
"[isb-cgc:TCGA_hg38_data_v0.Protein_Expression]",
"sample_barcode",
False
],
"Masked Somatic Mutation": [
"Somatic_Mutation",
"[isb-cgc:TCGA_hg38_data_v0.Somatic_Mutation]",
["sample_barcode_tumor", "sample_barcode_normal"],
False
],
}
class GDCTestCloudSQLBQBarcodes(GDCTestSetup):
'''
for each of the datatypes either in cloudsql or in bigquery, checks the
consistency between the GDC, the datastore and the sample data availibility
of the sample and case barcodes
'''
def __init__(self, param):
super(GDCTestCloudSQLBQBarcodes, self).__init__(param)
def setUp(self):
super(GDCTestCloudSQLBQBarcodes, self).setUp()
def merge_set_lists(self, dict1, dict2):
keyset1 = set(dict1.keys())
keyset2 = set(dict2.keys())
merged = {}
for key in keyset1 - keyset2:
merged[key] = dict1[key]
for key in keyset2 - keyset1:
merged[key] = dict2[key]
for key in keyset1 & keyset2:
merged[key] = dict1[key] | dict2[key]
return merged
def request_response(self, endpt, params, msg):
response = None
retries = 5
while retries:
retries -= 1
try:
response = request(endpt, params, msg, self.log)
response.raise_for_status()
try:
rj = response.json()
break
except:
self.log.exception('problem with response, not json: %s' % (response.text))
raise
except:
if 0 == retries:
self.log.exception('giving up')
raise
sleep(10)
self.log.warning('retrying request...')
finally:
if response:
response.close
return rj
def request_gdc_barcode_info(self, batch, program_name, start, chunk, total):
endpt = 'https://gdc-api.nci.nih.gov/legacy/cases?expand=project,samples'
params = {
'filters': dumps({
"op":"in",
"content":{
"field":"project.program.name",
"value":[
program_name
]
}
}),
'sort':'case_id:asc',
'from':start,
'size':chunk
}
curstart = 1
case_barcode2info = {}
while True:
msg = '\t\tproblem getting filtered map for cases'
rj = self.request_response(endpt, params, msg)
params['from'] = params['from'] + params['size']
for index in range(len(rj['data']['hits'])):
themap = rj['data']['hits'][index]
case_barcode = themap['submitter_id'].strip()
project_id = themap['project']['project_id'].strip()
sample_barcodes = set()
info = {
'case_barcode': case_barcode,
'project': project_id,
'sample_barcodes': sample_barcodes,
'total': rj['data']['pagination']['total']
}
case_barcode2info[case_barcode] = info
if 'samples' in themap:
for j in range(len(themap['samples'])):
sample_barcode = themap['samples'][j]['submitter_id'].strip()
sample_barcodes.add(sample_barcode)
curstart += rj['data']['pagination']['count']
if curstart >= total or params['from'] >= rj['data']['pagination']['total']:
break
return case_barcode2info
def get_gdc_barcode_info(self, program_name, log_dir):
log = getLogger(create_log(log_dir, 'barcode_info'))
log.info('processing {} for barcode information'.format(program_name))
# get the total count to parallelize barcode fetches
barcode2info = self.request_gdc_barcode_info(program_name, program_name, 1, 1, 1)
# divide into into batches based on total
info = barcode2info.popitem()[1]
total = info['total']
log.info('\tfetching {} cases for {}'.format(total, info))
batch = total / 20
log.info('\tlooking at batches of {} repeated 20 times for {}'.format(batch, program_name))
params = []
cur_start = 1
for i in range(21):
params += [[program_name + '_%s' % (i), program_name, cur_start, min(batch, 200), batch]]
log.info('\t\tbatch {}: {}'.format(i, params[-1]))
cur_start += batch
calls = {
'fn': self.request_gdc_barcode_info,
'batches': {
'params': params
}
}
barcode2info = launch_threads(self.config, 'batches', calls, self.log)
samples = set()
for info in barcode2info.itervalues():
# if 0 != len(set(cursamples) & set(samples)):
# raise ValueError('saw repeated barcode: {}'.format(set(cursamples) & set(cursamples)))
samples |= set(info['sample_barcodes'])
log.info('\tfinished {} for barcode information. found {} case and {} samples'.format(program_name, len(barcode2info), len(samples)))
return set(barcode2info.keys()), samples
def get_sql_barcodes(self, tables, case = 'case_barcode', sample = 'sample_barcode'):
table2cases = {}
table2samples = {}
for table, info in tables.iteritems():
if not info[2]:
sql = 'select {}, "" from {}'.format(case, table)
elif not info[1]:
sql = 'select "", {} from {}'.format(sample, table)
else:
sql = 'select {}, {} from {}'.format(case, sample, table)
rows = ISBCGC_database_helper().select(self.config, sql, self.log, [])
cases = set()
samples = set()
for row in rows:
cases.add(row[0])
samples.add(row[1])
table2cases[table] = cases if 1 < len(cases) else set()
table2samples[table] = samples if 1 < len(samples) else set()
return table2cases, table2samples
def get_bq_barcodes(self, bq_tables, case = 'case_barcode', sample = 'sample_barcode', where = None):
bq2cases = {}
bq2samples = {}
bq2files = {}
for table in bq_tables:
if not table[0]:
continue
if not table[2]:
sql = 'select {}, "" from {}'.format(case, table[0])
elif not table[1]:
sql = 'select "", {} from {}'.format(sample, table[0])
else:
sql = 'select {}, {} from {}'.format(case, sample, table[0])
self.log.info('\tstart select for {} from bq{}'.format(table[0], ' where {}'.format(where) if where else ''))
results = query_bq_table(sql, True, 'isb-cgc', self.log)
count = 0
page_token = None
cases = set()
samples = set()
while True:
total, rows, page_token = fetch_paged_results(results, 1000, None, page_token, self.log)
count += 1000
for row in rows:
cases.add(row[0])
samples.add(row[1])
if not page_token:
self.log.info('\tfinished select from {}. select {} total rows'.format(table, total))
break
bq2cases[table[0]] = cases if 1 < len(cases) else set()
bq2samples[table[0]] = samples if 1 < len(samples) else set()
return bq2cases, bq2samples
def diff_barcodes(self, barcodes1, tag1, barcodes2, tag2, log):
diffs = ''
one_vs_two = barcodes1 - barcodes2
one_vs_two.discard(None)
if 0 < len(barcodes2) and 0 < len(one_vs_two):
# if 10 >= len(one_vs_two):
if 500 >= len(one_vs_two):
barcodes = ', '.join(sorted(one_vs_two))
else:
diff = sorted(one_vs_two)
barcodes = '{}...{}'.format(', '.join(diff[:5]), ', '.join(diff[-5:]))
diffs += '\t\t{} barcodes in {} than in {}--{}\n'.format(len(one_vs_two), tag1, tag2, barcodes)
else:
diffs += '\t\tall {} barcodes in {}\n'.format(tag1, tag2)
two_vs_one = barcodes2 - barcodes1
two_vs_one.discard(None)
if 0 < len(barcodes1) and 0 < len(two_vs_one):
if 10 >= len(two_vs_one):
try:
barcodes = ', '.join(sorted(two_vs_one))
except:
log.exception('problem printing barcode diff:\n\t{}'.format(two_vs_one))
barcodes = two_vs_one
else:
diff = sorted(two_vs_one)
barcodes = '{}...{}'.format(', '.join(diff[:5]), ', '.join(diff[-5:]))
diffs += '\t\t!!!{} barcodes in {} than in {}--{}!!!\n'.format(len(two_vs_one), tag2, tag1, barcodes)
else:
diffs += '\t\tall {} barcodes in {}\n'.format(tag2, tag1)
return diffs
def compare_barcodes(self, program_name, table, barcode_type, api, sql, label1, bq, label2, log):
diffs = ''
diffs += self.diff_barcodes(api, 'api', sql, label1, log)
diffs += self.diff_barcodes(api, 'api', bq, label2, log)
diffs += self.diff_barcodes(sql, label1, bq, label2, log)
retval = '{} compares for {}-{}:\n{}'.format(barcode_type, program_name, table, diffs)
log.info(retval)
return retval
def process_bio(self, program_name, program, log_dir):
log_dir = log_dir + 'bio' + '/'
log = getLogger(create_log(log_dir, 'bio'))
log.info('processing {} for bio'.format(program_name))
bio_storage2source2barcodes = {}
cases, samples = self.get_gdc_barcode_info(program_name, log_dir)
source2barcodes = bio_storage2source2barcodes.setdefault('gdc', {})
source2barcodes['api'] = (cases, samples)
sql2bq = program().bio_sql2bq()
sql2cases, sql2samples = self.get_sql_barcodes(sql2bq)
bq2cases, bq2samples = self.get_bq_barcodes(sql2bq.values())
for table, sqlcases in sql2cases.iteritems():
if sql2bq[table][0]:
bqcases = bq2cases[sql2bq[table][0]]
else:
bqcases = set()
sqlsamples = sql2samples[table]
if sql2bq[table][0]:
bqsamples = bq2samples[sql2bq[table][0]]
else:
bqsamples = set()
source2barcodes = bio_storage2source2barcodes.setdefault('sql', {})
source2barcodes[table] = (sqlcases, sqlsamples)
source2barcodes = bio_storage2source2barcodes.setdefault('bq', {})
source2barcodes[table] = (bqcases, bqsamples)
log.info('finished {} for bio'.format(program_name))
return bio_storage2source2barcodes
def get_api_data_type_barcodes(self, program_name, data_type, legacy=True, log = None):
endpt = 'https://gdc-api.nci.nih.gov/{}files?expand=cases,cases.project,cases.samples'.format('legacy/' if legacy else '')
params = {
'filters': dumps({
"op": "and",
"content": [
{
"op":"=",
"content":{
"field":"data_type",
"value":[
data_type
]
}
},
{
"op":"=",
"content":{
"field":"cases.project.program.name",
"value":[
program_name
]
}
}
]
}),
'sort':'file_id:asc',
'from':1,
'size':70
}
curstart = 1
project2cases = {}
project2samples = {}
project2files = {}
while True:
msg = '\t\tproblem getting filtered map for files'
rj = self.request_response(endpt, params, msg)
params['from'] = params['from'] + params['size']
for index in range(len(rj['data']['hits'])):
themap = rj['data']['hits'][index]
if 'cases' not in themap:
continue
project_id = themap['cases'][0]['project']['project_id'].strip()
file_barcodes = project2files.setdefault(project_id, set())
file_barcodes.add(themap['file_id'])
for i in range(len(themap['cases'])):
case_barcode = themap['cases'][i]['submitter_id'].strip()
case_barcodes = project2cases.setdefault(project_id, set())
case_barcodes.add(case_barcode)
if 'samples' in themap['cases'][i]:
for j in range(len(themap['cases'][i]['samples'])):
sample_barcode = themap['cases'][i]['samples'][j]['submitter_id'].strip()
sample_barcodes = project2samples.setdefault(project_id, set())
sample_barcodes.add(sample_barcode)
curstart += rj['data']['pagination']['count']
if curstart > rj['data']['pagination']['total'] or params['from'] > rj['data']['pagination']['total']:
self.log.info('retrieved total of {} cases and {} samples for {} files for {}:{}'.format(
len([case for cases in project2cases.itervalues() for case in cases]), len([sample for samples in project2samples.itervalues() for sample in samples]),
rj['data']['pagination']['total'], program_name, data_type))
break
return project2cases, project2samples, project2files
def get_api_data_types_barcodes(self, program_name, data_types, log):
log.info('\t\tgetting gcs data types barcodes {}-{} for gcs'.format(program_name, '"{}"'.format(', '.join(data_types))))
try:
total_project2cases = {}
total_project2samples = {}
total_project2files = {}
for data_type in data_types:
project2cases, project2samples, project2files = self.get_api_data_type_barcodes(program_name, data_type, False if data_type in ('Aligned Reads',
'miRNA Expression Quantification',
'Isoform Expression Quantification',
'Gene Expression Quantification',
'Masked Copy Number Segment',
'Methylation Beta Value',
'Masked Somatic Mutation')
else True, log)
total_project2cases = self.merge_set_lists(total_project2cases, project2cases)
total_project2samples = self.merge_set_lists(total_project2samples, project2samples)
total_project2files = self.merge_set_lists(total_project2files, project2files)
log.info('\t\tget_api_data_types_barcodes(): {}-{} cumulative counts, cases={}, samples={}'.format(
program_name, data_type, sum(len(cases) for cases in total_project2cases.itervalues()), sum(len(samples) for samples in total_project2samples.itervalues())))
log.info('\t\tfinished gcs data types barcodes {}-{} for gcs'.format(program_name, '"{}"'.format(', '.join(data_types))))
return total_project2cases, total_project2samples, total_project2files
except:
log.exception('problem in get_api_data_types_barcodes()')
raise
def get_gcs_data_types_barcodes(self, program_name, data_types, log):
log.info('\t\tgetting gcs data types barcodes {}-{} for gcs'.format(program_name, '"{}"'.format(', '.join(data_types))))
try:
stmt = 'select project_short_name, case_barcode, sample_barcode, file_gdc_id from {} where data_type in ({})'
project2cases = {}
project2samples = {}
project2files = {}
for data_type in data_types:
build = 'HG38' if data_type in ('Aligned Reads', \
'miRNA Expression Quantification', \
'Isoform Expression Quantification', \
'Gene Expression Quantification', \
'Masked Copy Number Segment', \
'Methylation Beta Value', \
'Masked Somatic Mutation') \
else 'HG19'
rows = ISBCGC_database_helper.select(self.config, stmt.format('{}_metadata_data_{}'.format(program_name, build), '"{}"'.format(data_type)), log, [])
for row in rows:
cases = project2cases.setdefault(row[0], set())
cases.add(row[1])
samples = project2samples.setdefault(row[0], set())
samples.add(row[2])
files = project2files.setdefault(row[0], set())
files.add(row[3])
log.info('\t\tget_gcs_data_types_barcodes(): {}-{} cumulative counts, cases={}, samples={}'.format(
program_name, data_type, sum(len(cases) for cases in project2cases.itervalues()), sum(len(samples) for samples in project2samples.itervalues())))
log.info('\t\tfinished gcs data types barcodes {}-{} for gcs'.format(program_name, '"{}"'.format(', '.join(data_types))))
return project2cases, project2samples, project2files
except:
log.exception('problem in get_gcs_data_types_barcodes()')
raise
def get_gcs_isb_label_barcodes(self, program_name, isb_label, log):
log.info('\t\tgetting isb_label barcodes {}-{} for gcs'.format(program_name, isb_label))
try:
project2cases = {}
project2samples = {}
stmt = 'select bs.project_short_name, bs.case_barcode, sa.sample_barcode ' \
'from {0}_metadata_sample_data_availability sa join {0}_metadata_data_type_availability da on sa.metadata_data_type_availability_id = da.metadata_data_type_availability_id ' \
'join {0}_metadata_biospecimen bs on sa.sample_barcode = bs.sample_barcode ' \
'where isb_label = %s group by 1, 2, 3'.format(program_name)
rows = ISBCGC_database_helper().select(self.config, stmt, log, [isb_label])
for row in rows:
cases = project2cases.setdefault(row[0], set())
cases.add(row[1])
samples = project2samples.setdefault(row[0], set())
samples.add(row[2])
log.info('\t\tget_bq_isb_label_barcodes(): {}-{} cumulative counts, cases={}, samples={}'.format(
program_name, isb_label, sum(len(cases) for cases in project2cases.itervalues()), sum(len(samples) for samples in project2samples.itervalues())))
log.info('\t\tfinished get_gcs_isb_label_barcodes() {}-{} for gcs'.format(program_name, isb_label))
return project2cases, project2samples
except:
log.exception('problem in get_bq_isb_label_barcodes()')
raise
log.info('\t\tfinished isb_label barcodes {}-{} for gcs'.format(program_name, isb_label))
def compare_isb_label_gcs(self, program_name, isb_label, data_types, results, log_dir):
log_dir = log_dir + isb_label + '/gcs/'
log = getLogger(create_log(log_dir, '{}_{}_gcs'.format(program_name, isb_label)))
log.info('\tprocessing {}-{} for gcs'.format(program_name, isb_label))
api_project2cases, api_project2samples, api_project2files = self.get_api_data_types_barcodes(program_name, data_types, log)
gcs_project2cases, gcs_project2samples, gcs_project2files = self.get_gcs_data_types_barcodes(program_name, data_types, log)
label_project2cases, label_project2samples = self.get_gcs_isb_label_barcodes(program_name, isb_label, log)
project2barcodes = results.setdefault(isb_label, {})
api_cases = set(case for cases in api_project2cases.itervalues() for case in cases)
gcs_cases = set(case for cases in gcs_project2cases.itervalues() for case in cases)
label_cases = set(case for cases in label_project2cases.itervalues() for case in cases)
api_samples = set(sample for samples in api_project2samples.itervalues() for sample in samples)
gcs_samples = set(sample for samples in gcs_project2samples.itervalues() for sample in samples)
label_samples = set(sample for samples in label_project2samples.itervalues() for sample in samples)
api_files = set(nextfile for files in api_project2files.itervalues() for nextfile in files)
gcs_files = set(nextfile for files in gcs_project2files.itervalues() for nextfile in files)
project2barcodes['all'] = (api_cases, gcs_cases, label_cases, api_samples, gcs_samples, label_samples, api_files, gcs_files)
for project, api_cases in api_project2cases.iteritems():
try:
gcs_cases = gcs_project2cases[project]
except:
log.info('no cases for gcs {}'.format(project))
gcs_cases = set()
try:
label_cases = label_project2cases[project]
except:
log.info('no cases for label {}'.format(project))
label_cases = set()
api_samples = api_project2samples[project]
try:
gcs_samples = gcs_project2samples[project]
except:
log.info('no samples for gcs {}'.format(project))
gcs_samples = set()
try:
label_samples = label_project2samples[project]
except:
log.info('no samples for label {}'.format(project))
label_samples = set()
api_files = api_project2files[project]
try:
gcs_files = gcs_project2files[project]
except:
log.info('no files for gcs {}'.format(project))
gcs_files = set()
project2barcodes[project] = (api_cases, gcs_cases, label_cases, api_samples, gcs_samples, label_samples, api_files, gcs_files)
log.info('\tfinished {}-{} for gcs'.format(program_name, isb_label))
return {}
def process_gcs(self, program_name, program, results, log_dir):
log_dir = log_dir + 'gcs' + '/'
log = getLogger(create_log(log_dir, '{}_gcs'.format(program_name)))
log.info('processing {} for gcs'.format(program_name))
isb_label2tables = program().gcs_datasets()
params = []
for isb_label, data_types in isb_label2tables.iteritems():
params += [[program_name, isb_label, data_types, results, log_dir]]
calls = {
'fn': self.compare_isb_label_gcs,
'labels': {
'params': params
}
}
launch_threads(self.config, 'labels', calls, self.log)
log.info('finished {} for gcs'.format(program_name))
def get_bq_data_type_barcodes(self, program_name, bq_table, sample_barcode, has_file, log):
log.info('\t\tgetting bq data type barcodes {}-{} for gcs'.format(program_name, bq_table))
try:
if 'Methylation' in bq_table:
project = '"ALL"'
else:
project = 'project_short_name'
if has_file:
stmt = 'select {}, case_barcode, {}, file_gdc_id from {} group by 1, 2, 3, 4'.format(project, sample_barcode, bq_table)
else:
stmt = 'select {}, case_barcode, {}, "" from {} group by 1, 2, 3'.format(project, sample_barcode, bq_table)
project2cases = {}
project2samples = {}
project2files = {}
results = query_bq_table(stmt, True, 'isb-cgc', self.log)
count = 0
page_token = None
while True:
total, rows, page_token = fetch_paged_results(results, 1000, None, page_token, self.log)
count += 1000
for row in rows:
cases = project2cases.setdefault(row[0], set())
cases.add(row[1])
samples = project2samples.setdefault(row[0], set())
samples.add(row[2])
files = project2files.setdefault(row[0], set())
if 0 < len(files):
files.add(row[3])
if not page_token:
self.log.info('\tfinished select from {}. selected {} total rows'.format(bq_table, total))
break
log.info('\t\tfinished bq data type barcodes {}-{} for gcs'.format(program_name, bq_table))
return project2cases, project2samples, project2files
except:
log.exception('problem in get_bq_data_type_barcodes()')
raise
def compare_isb_label_bq(self, program_name, data_type, isb_label, bq_table, sample_barcode, has_file, bq_results, log_dir):
log_dir = log_dir + isb_label + '/'
log = getLogger(create_log(log_dir, '{}_{}_bq'.format(program_name, data_type)))
log.info('\tprocessing {}-{} for bq'.format(program_name, isb_label))
api_project2cases, api_project2samples, api_project2files = self.get_api_data_types_barcodes(program_name, [data_type], log)
if 'somatic' not in data_type.lower():
bq_project2cases, bq_project2samples, bq_project2files = self.get_bq_data_type_barcodes(program_name, bq_table, sample_barcode, has_file, log)
else:
if "Simple somatic mutation" == data_type:
bq_project2cases1, bq_project2samples1, bq_project2files1 = self.get_bq_data_type_barcodes(program_name, bq_table[0], sample_barcode[0], has_file, log)
bq_project2cases_normal1, bq_project2samples_normal1, bq_project2files_normal1 = self.get_bq_data_type_barcodes(program_name, bq_table[0], sample_barcode[1], has_file, log)
bq_project2cases2, bq_project2samples2, bq_project2files2 = self.get_bq_data_type_barcodes(program_name, bq_table[1], sample_barcode[0], has_file, log)
bq_project2cases_normal2, bq_project2samples_normal2, bq_project2files_normal2 = self.get_bq_data_type_barcodes(program_name, bq_table[1], sample_barcode[1], has_file, log)
bq_project2cases = self.merge_set_lists(bq_project2cases1, bq_project2cases2)
bq_project2samples = self.merge_set_lists(bq_project2samples1, bq_project2samples2)
bq_project2files = self.merge_set_lists(bq_project2files1, bq_project2files2)
else:
bq_project2cases, bq_project2samples, bq_project2files = self.get_bq_data_type_barcodes(program_name, bq_table, sample_barcode[0], has_file, log)
bq_project2cases_normal, bq_project2samples_normal, bq_project2files_normal = self.get_bq_data_type_barcodes(program_name, bq_table, sample_barcode[1], has_file, log)
label_project2cases, label_project2samples = self.get_gcs_isb_label_barcodes(program_name, isb_label, log)
project2barcodes = bq_results.setdefault(isb_label, {})
api_cases = set(case for cases in api_project2cases.itervalues() for case in cases)
bq_cases = set(case for cases in bq_project2cases.itervalues() for case in cases)
label_cases = set(case for cases in label_project2cases.itervalues() for case in cases)
api_samples = set(sample for samples in api_project2samples.itervalues() for sample in samples)
bq_samples = set(sample for samples in bq_project2samples.itervalues() for sample in samples)
label_samples = set(sample for samples in label_project2samples.itervalues() for sample in samples)
api_files = set(file for files in api_project2files.itervalues() for file in files)
bq_files = set(file for files in bq_project2files.itervalues() for file in files)
project2barcodes['all'] = (api_cases, bq_cases, label_cases, api_samples, bq_samples, label_samples, api_files, bq_files)
for project, api_cases in api_project2cases.iteritems():
try:
bq_cases = bq_project2cases[project]
except:
log.info('no cases for gcs {}'.format(project))
bq_cases = set()
try:
label_cases = label_project2cases[project]
except:
log.info('no cases for label {}'.format(project))
label_cases = set()
api_samples = api_project2samples[project]
try:
bq_samples = bq_project2samples[project]
except:
log.info('no samples for gcs {}'.format(project))
bq_samples = set()
try:
label_samples = label_project2samples[project]
except:
log.info('no samples for label {}'.format(project))
label_samples = set()
api_files = api_project2files[project]
try:
bq_files = bq_project2files[project]
except:
log.info('no files for gcs {}'.format(project))
bq_files = set()
project2barcodes[project] = (api_cases, bq_cases, label_cases, api_samples, bq_samples, label_samples, api_files, bq_files)
log.info('\tfinished {}-{} for gcs'.format(program_name, isb_label))
return {}
def process_bq(self, program_name, program, bq_results, log_dir):
log_dir = log_dir + 'bq' + '/'
log = getLogger(create_log(log_dir, '{}_bq'.format(program_name)))
# data type: isb_label, bq table, sample_barcode
isb_label2tables = program().bq_datasets()
params = []
for data_type, info in isb_label2tables.iteritems():
params += [[program_name, data_type, info[0], info[1], info[2], info[3], bq_results, log_dir]]
calls = {
'fn': self.compare_isb_label_bq,
'labels': {
'params': params
}
}
launch_threads(self.config, 'labels', calls, self.log)
log.info('processing {} bq'.format(program_name))
log.info('finished {} bq'.format(program_name))
def process_program(self, program_name, program, log_dir):
try:
log_dir = log_dir + program_name + '/'
log = getLogger(create_log(log_dir, program_name))
log.info('processing {}'.format(program_name))
output_bio_compare = 'case and sample compare:\n'
bio_storage2source2barcodes = self.process_bio(program_name, program, log_dir)
cases, samples = bio_storage2source2barcodes['gdc']['api']
for sql_source, barcodes in bio_storage2source2barcodes['sql'].iteritems():
sqlcases, sqlsamples = barcodes
sources = sorted(bio_storage2source2barcodes['bq'].keys())
for bq_source in sources:
barcodes = bio_storage2source2barcodes['bq'][bq_source]
bqcases, bqsamples = barcodes
output_bio_compare += self.compare_barcodes(program_name, 'sql-{}:bq-{}'.format(sql_source, bq_source), 'case', cases, sqlcases, 'sql', bqcases, 'bq', log) + '\n'
output_bio_compare += self.compare_barcodes(program_name, 'sql-{}:bq-{}'.format(sql_source, bq_source), 'sample', samples, sqlsamples, 'sql', bqsamples, 'bq', log) + '\n{}\n'
output_bio_counts = 'Case and Sample compares for {} clinical and biospecimen\n\nGDC Case API:\ncases\tsamples\n{}\t{}\n\nCloud SQL\n'.format(program_name, len(cases), len(samples))
for source, barcodes in bio_storage2source2barcodes['sql'].iteritems():
sqlcases, sqlsamples = barcodes
output_bio_counts += '{}:\ncases\tsamples\n{}\t{}\n\n'.format(source, len(sqlcases), len(sqlsamples))
output_bio_counts += 'BigQuery\n'
sources = sorted(bio_storage2source2barcodes['bq'].keys())
for source in sources:
bqcases, bqsamples = bio_storage2source2barcodes['bq'][source]
output_bio_counts += '{}:\ncases\tsamples\n{}\t{}\n\n'.format(source, len(bqcases), len(bqsamples))
gcs_results = {}
self.process_gcs(program_name, program, gcs_results, log_dir)
output_gcs_compare = 'case, sample and file compare for gcs vs. isb_label:\n'
output_gcs_counts = ''
for isb_label in gcs_results:
for project, barcodes in gcs_results[isb_label].iteritems():
output_gcs_compare += self.compare_barcodes(program_name, '{0}:project-{1}:label-{2}'.format(program_name, project, isb_label), 'case', barcodes[0], barcodes[1], 'gcs', barcodes[2], 'label', log) + '\n'
output_gcs_compare += self.compare_barcodes(program_name, '{0}:project-{1}:label-{2}'.format(program_name, project, isb_label), 'sample', barcodes[3], barcodes[4], 'gcs', barcodes[5], 'label', log) + '\n'
output_gcs_compare += self.compare_barcodes(program_name, '{0}:project-{1}:label-{2}'.format(program_name, project, isb_label), 'file', barcodes[6], barcodes[7], 'gcs', set(), 'label', log) + '\n{}\n'
if 'all' == project:
output_gcs_counts = '{}Case and Sample compares for {} Google Cloud Storage\n\nTotals:\ncases\napi\tgcs\tisb_label\n{}\t{}\t{}\nsamples\napi\tgcs\tisb_label\n{}\t{}\t{}\nfiles\napi\tgcs\n{}\t{}\n\n' \
.format('{}\n'.format('*' * 20), program_name, len(barcodes[0]), len(barcodes[1]), len(barcodes[2]), len(barcodes[3]), len(barcodes[4]), len(barcodes[5]), len(barcodes[6]), len(barcodes[7]))
bq_results = {}
self.process_bq(program_name, program, bq_results, log_dir)
output_bq_compare = 'case, sample and file compare for bq vs. isb_label:\n'
output_bq_counts = ''
for isb_label in bq_results:
for project, barcodes in bq_results[isb_label].iteritems():
output_bq_compare += self.compare_barcodes(program_name, '{0}:project-{1}:label-{2}'.format(program_name, project, isb_label), 'case', barcodes[0], barcodes[1], 'bq', barcodes[2], 'label', log) + '\n'
output_bq_compare += self.compare_barcodes(program_name, '{0}:project-{1}:label-{2}'.format(program_name, project, isb_label), 'sample', barcodes[3], barcodes[4], 'bq', barcodes[5], 'label', log) + '\n'
output_bq_compare += self.compare_barcodes(program_name, '{0}:project-{1}:label-{2}'.format(program_name, project, isb_label), 'file', barcodes[6], barcodes[7], 'bq', set(), 'label', log) + '\n'
if 'all' == project:
output_bq_counts = '{}Case and Sample compares for {} Google BigQuery\n\nTotals:\ncases\napi\tbq\tisb_label\n{}\t{}\t{}\nsamples\napi\tbq\tisb_label\n{}\t{}\t{}\nfiles\napi\tbq\n{}\t{}\n\n' \
.format('{}\n'.format('*' * 20), program_name, len(barcodes[0]), len(barcodes[1]), len(barcodes[2]), len(barcodes[3]), len(barcodes[4]), len(barcodes[5]), len(barcodes[6]), len(barcodes[7]))
with open('gdc/doc/' + str(date.today()).replace('-', '_') + '_{}_validate_bq_gcs_label.txt'.format(program_name), 'w') as out:
out.writelines(['Validity Report\n\n', output_bio_counts, output_bio_compare, output_gcs_counts, output_gcs_counts, output_bq_counts, output_bq_compare])
out.write('Differences:\n\tapi\tgcs\tisb_label\tbq\t\napi\t{}\n')
log.info('finished {}'.format(program_name))
except:
log.exception('problem processing {}'.format(program_name))
raise
return {}
def test_gcs_bq_validity(self):
log_dir = str(date.today()).replace('-', '_') + '_validate/'
calls = {
'fn': self.process_program,
'validity': {
'params': [
['CCLE', CCLE_datasets, log_dir],
['TARGET', TARGET_datasets, log_dir],
['TCGA', TCGA_datasets, log_dir]
]
}
}
# self.process_gcs('TARGET', TARGET_datasets, log_dir + 'TARGET/')
launch_threads(self.config, 'validity', calls, self.log)
|
|
import networkx as nx
import numpy as np
from bayesianpy.jni import bayesServer
import bayesianpy.data
import pandas as pd
import math
import scipy.stats as ss
from typing import List, Dict
import sklearn.metrics
import logging
class NetworkLayout:
def __init__(self, jnetwork):
self._jnetwork = jnetwork
self._graph = None
self._multiplier = 500
def build_graph(self):
g = nx.DiGraph()
for node in self._jnetwork.getNodes():
g.add_node(node.getName())
for link in self._jnetwork.getLinks():
fr = link.getFrom().getName()
to = link.getTo().getName()
g.add_edge(fr, to)
return g
def visualise(self, graph, pos):
import pylab
nx.draw_networkx_nodes(graph, pos)
nx.draw(graph, pos, with_labels=True, node_size=2000, node_color='w')
pylab.show()
def spring_layout(self, graph):
pos = nx.spring_layout(graph,center=[0.5,0.5])
return pos
def fruchterman_reingold_layout(self, graph):
return nx.fruchterman_reingold_layout(graph,center=[0.5,0.5])
def circular_layout(self, graph):
return nx.circular_layout(graph, center=[0.5,0.5])
def random_layout(self, graph):
return nx.random_layout(graph,center=[0.5,0.5])
def update_network_layout(self, pos):
for key, value in pos.items():
node = self._jnetwork.getNodes().get(key)
b = node.getBounds()
height = b.getHeight()
width = b.getWidth()
x = value[0]*self._multiplier
y = value[1]*self._multiplier
if x < 0:
x = 0.0
if y < 0:
y = 0.0
node.setBounds(bayesServer().Bounds(x, y, width, height))
class JointDistribution:
# http://stackoverflow.com/questions/12301071/multidimensional-confidence-intervals
@staticmethod
def _plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
Parameters
----------
cov : The 2x2 covariance matrix to base the ellipse on
pos : The location of the center of the ellipse. Expects a 2-element
sequence of [x0, y0].
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:, order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta,
**kwargs)
ax.add_artist(ellip)
return ellip
def plot_distribution_with_variance(self, ax, df: pd.DataFrame, head_variables: List[str],
results: Dict[str, bayesianpy.model.Distribution]):
import seaborn as sns
for i, hv in enumerate(head_variables):
x = np.arange(df[hv].min() - df[hv].std(), df[hv].max() + df[hv].std(), ((df[hv].max() + df[hv].std()) - (df[hv].min()-df[hv].std())) / 100)
pdfs = [ss.norm.pdf(x, v.get_mean(), v.get_std()) for k, v in results.items()]
density = np.sum(np.array(pdfs), axis=0)
ax.plot(x, density, label='Joint PDF', linestyle='dashed')
ax.set_ylabel("pdf")
for k, v in results.items():
s = df
for tv, st in v.get_tail():
s = s[s[tv] == bayesianpy.data.DataFrame.cast2(s[tv].dtype, st)]
sns.distplot(s[hv], hist=False, label=v.pretty_print_tail(), ax=ax)
def plot_distribution_with_covariance(self, ax, df: pd.DataFrame, head_variables: tuple,
results: Dict[str, bayesianpy.model.Distribution], labels=None):
hv = head_variables
ax.plot(df[hv[0]].tolist(), df[hv[1]].tolist(), 'o', markeredgecolor='#e2edff', markeredgewidth=1,marker='o',
fillstyle='full', color='#84aae8')
#ax.set_title("{} vs {}".format(hv[0], hv[1]))
for k, v in results.items():
self._plot_cov_ellipse(cov=v.get_cov_by_variable(hv[0], hv[1]),
pos=v.get_mean_by_variable(hv[0], hv[1]),
nstd=3, edgecolor='#ffb24f', lw=2, facecolor='none',
ax=ax)
ax.set_xlim([df[hv[0]].min() - 3, df[hv[0]].max() + 3])
ax.set_ylim([df[hv[1]].min() - 3, df[hv[1]].max() + 3])
if labels is not None:
label0 = labels[0]
label1 = labels[1]
else:
label0 = hv[0]
label1 = hv[1]
ax.set_xlabel(label0)
ax.set_ylabel(label1)
def plot_with_variance(self, df: pd.DataFrame,
head_variables: List[str],
results: List[Dict[str, bayesianpy.model.Distribution]],
plots_per_page=6):
import matplotlib.pyplot as plt
cols = 2 if len(head_variables) > 1 else 1
rows = math.ceil(len(head_variables) / cols)
for i, r in enumerate(results):
if i == 0 or k == plots_per_page:
k = 0
if i > 0:
yield fig
plt.close()
fig = plt.figure(figsize=(12, 12))
k += 1
ax = fig.add_subplot(rows, cols, i + 1)
self.plot_distribution_with_variance(ax, df, head_variables, r)
yield fig
plt.close()
def plot_with_covariance(self, df: pd.DataFrame,
head_variables: List[str],
results: Dict[str, bayesianpy.model.Distribution],
plots_per_page=6):
import matplotlib.pyplot as plt
n = len(head_variables) - 1
cols = 2
total = (n * (n + 1) / 2) / cols
k = 0
for i, hv in enumerate(head_variables):
for j in range(i + 1, len(head_variables)):
if i == 0 or k == plots_per_page:
k = 0
if i > 0:
yield fig
plt.close()
fig = plt.figure(figsize=(12, 12))
k += 1
ax = fig.add_subplot(total / 2, 2, k)
self.plot_distribution_with_covariance(ax, df,
(head_variables[i], head_variables[j]), results)
yield fig
from matplotlib import pyplot as plt
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
real_values = cm
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
real_values = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, "{:0.2f} ({:0.2f})".format(cm[i, j], real_values[i,j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('Actual')
plt.xlabel('Predicted')
def _split_df(df, actual_col, predicted_col):
rows = []
for group in np.array_split(df, 10):
score = sklearn.metrics.accuracy_score(group[actual_col].tolist(),
group[predicted_col].tolist(),
normalize=False)
rows.append({'NumCases': len(group), 'NumCorrectPredictions': score})
return pd.DataFrame(rows)
def calc_cumulative_gains(df: pd.DataFrame, actual_col: str, predicted_col:str, probability_col:str):
df.sort_values(by=probability_col, ascending=True, inplace=True)
subset = df[df[predicted_col] == True]
lift = _split_df(subset, actual_col, predicted_col)
#Cumulative Gains Calculation
lift['RunningCorrect'] = lift['NumCorrectPredictions'].cumsum()
lift['PercentCorrect'] = lift.apply(
lambda x: (100 / lift['NumCorrectPredictions'].sum()) * x['RunningCorrect'], axis=1)
lift['CumulativeCorrectBestCase'] = lift['NumCases'].cumsum()
lift['PercentCorrectBestCase'] = lift['CumulativeCorrectBestCase'].apply(
lambda x: 100 if (100 / lift['NumCorrectPredictions'].sum()) * x > 100 else (100 / lift[
'NumCorrectPredictions'].sum()) * x)
lift['AvgCase'] = lift['NumCorrectPredictions'].sum() / len(lift)
lift['CumulativeAvgCase'] = lift['AvgCase'].cumsum()
lift['PercentAvgCase'] = lift['CumulativeAvgCase'].apply(
lambda x: (100 / lift['NumCorrectPredictions'].sum()) * x)
#Lift Chart
lift['NormalisedPercentAvg'] = 1
lift['NormalisedPercentWithModel'] = lift['PercentCorrect'] / lift['PercentAvgCase']
return lift
def plot_binned_response_rate(lift: pd.DataFrame):
import seaborn as sns
plt.figure()
sns.barplot(y=lift['NumCorrectPredictions'] / lift['NumCases'], x=lift.index.tolist(), color='salmon', saturation=0.5)
plt.show()
def plot_cumulative_gains(lift: pd.DataFrame):
fig, ax = plt.subplots()
fig.canvas.draw()
handles = []
handles.append(ax.plot(lift['PercentCorrect'], 'r-', label='Percent Correct Predictions'))
handles.append(ax.plot(lift['PercentCorrectBestCase'], 'g-', label='Best Case (for current model)'))
handles.append(ax.plot(lift['PercentAvgCase'], 'b-', label='Average Case (for current model)'))
ax.set_xlabel('Total Population (%)')
ax.set_ylabel('Number of Respondents (%)')
ax.set_xlim([0, 9])
ax.set_ylim([10, 100])
try:
labels = [int((label+1)*10) for label in [float(item.get_text()) for item in ax.get_xticklabels() if len(item.get_text()) > 0]]
except BaseException as e:
print([item.get_text() for item in ax.get_xticklabels()])
ax.set_xticklabels(labels)
fig.legend(handles, labels=[h[0].get_label() for h in handles])
fig.show()
def plot_lift_chart(lift: pd.DataFrame):
plt.figure()
plt.plot(lift['NormalisedPercentAvg'], 'r-', label='Normalised \'response rate\' with no model')
plt.plot(lift['NormalisedPercentWithModel'], 'g-', label='Normalised \'response rate\' with using model')
plt.legend()
plt.show()
|
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the refresh network command.
These tests don't do much, but they do verify that the command doesn't fail
immediately.
"""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from ipaddress import IPv4Address, IPv4Network
from brokertest import TestBrokerCommand
class TestRefreshNetwork(TestBrokerCommand):
# NOTE: The --all switch is not tested here because it would
# delay a standard run by minutes. Please test manually.
# NOTE: There's currently no way to test updates. Please test
# any changes manually.
def striplock(self, err):
filtered = []
for line in err.splitlines():
if line.find("Acquiring lock") == 0:
continue
if line.find("Lock acquired.") == 0:
continue
if line.find("Released lock") == 0:
continue
filtered.append(line)
return "".join("%s\n" % line for line in filtered)
def check_network(self, addr, net_name, net_ip, prefix):
name = "test-%s.aqd-unittest.ms.com" % addr.replace('.', '-')
command = ["show", "address", "--fqdn", name]
out = self.commandtest(command)
self.matchoutput(out, "Network: %s [%s/%d]" % (net_name, net_ip,
prefix), command)
command = ["cat", "--networkip", net_ip]
out = self.commandtest(command)
self.matchoutput(out, '"prefix_length" = %s;' % prefix, command)
net = IPv4Network(u"%s/%s" % (net_ip, prefix))
self.matchoutput(out, '"netmask" = "%s";' % net.netmask, command)
self.matchoutput(out, '"broadcast" = "%s";' % net.broadcast_address, command)
# 100 sync up building np
def test_100_syncfirst(self):
command = "refresh network --building np"
out = self.statustest(command.split(" "))
net = self.net["refreshtest3"]
self.matchoutput(out,
"Setting network refreshtest3 [%s/%d] "
"compartment to interior.ut" % (net.ip, net.prefixlen),
command)
net = self.net["refreshtest5"]
self.matchoutput(out,
"Setting network refreshtest5 [%s/28] "
"prefix length to 29" % net.ip,
command)
def test_105_verify_plenary_update(self):
net = self.net["refreshtest3"]
command = ["cat", "--networkip", net.ip]
out = self.commandtest(command)
self.matchoutput(out, '"network_compartment/name" = "interior.ut";',
command)
net = self.net["refreshtest5"]
command = ["cat", "--networkip", net.ip]
out = self.commandtest(command)
self.matchoutput(out, '"prefix_length" = 29;', command)
# 110 sync up building np expecting no output
def test_110_syncclean(self):
command = "refresh network --building np"
err = self.statustest(command.split(" "))
# Technically this could have changed in the last few seconds,
# but the test seems worth the risk. :)
err = self.striplock(err)
for line in err.rstrip().split('\n'):
if line.startswith('Unknown compartment nonexistant'):
continue
self.fail("Unexpected output '%s'" % line)
# 120 sync up building np dryrun expecting no output
def test_120_dryrun(self):
command = "refresh network --building np --dryrun"
err = self.statustest(command.split(" "))
# Technically this also could have changed in the last few seconds,
# but the test again seems worth the risk. :)
err = self.striplock(err)
for line in err.rstrip().split('\n'):
if line.startswith('Unknown compartment nonexistant'):
continue
self.fail("Unexpected output '%s'" % line)
def test_130_updates(self):
net = self.net["refreshtest1"]
self.noouttest(["del", "network", "--ip", net.ip])
self.noouttest(["add", "network", "--network", "wrong-params",
"--ip", net.ip, "--netmask", net.netmask,
"--side", "a", "--type", "transit", "--building", "ut"])
self.noouttest(["add", "router", "address", "--ip", net[3],
"--fqdn", "extrartr.aqd-unittest.ms.com"])
def test_135_syncagain(self):
net = self.net["refreshtest1"]
command = "refresh network --building np"
err = self.statustest(command.split(" "))
self.matchoutput(err, "Setting network wrong-params [%s/25] "
"name to refreshtest1" % net.ip, command)
msg = "Setting network refreshtest1 [%s/25] " % net.ip
self.matchoutput(err, msg + "type to unknown", command)
self.matchoutput(err, msg + "side to b", command)
self.matchoutput(err, msg + "location to building np", command)
self.matchoutput(err, "Removing router %s from network "
"refreshtest1" % net[3], command)
def test_138_router_gone(self):
command = "search system --fqdn extrartr.aqd-unittest.ms.com"
self.noouttest(command.split())
# 150 test adds with the sync of another building
def test_150_addhq(self):
command = "refresh network --building hq"
err = self.statustest(command.split(" "))
err = self.striplock(err)
self.matchoutput(err, "Adding", command)
self.matchclean(err, "Setting", command)
# Make sure the refresh logic does not try to remove networks in other
# buildings
self.matchclean(err, "Deleting", command)
# 200 add a dummy 0.1.1.0/24 network to np
def test_200_adddummynetwork(self):
command = ["add_network", "--network=0.1.1.0", "--ip=0.1.1.0",
"--prefixlen=24", "--building=np"]
self.noouttest(command)
command = ["add", "router", "address", "--ip", "0.1.1.1",
"--fqdn", "dummydyn.aqd-unittest.ms.com"]
self.noouttest(command)
def test_250_addtestnets(self):
networks = [
# Merge various sized subnets, one is missing
u"0.1.2.0/25", u"0.1.2.192/26",
# Merge various sized subnets, first is missing
u"0.1.3.64/26", u"0.1.3.128/25",
# Split in QIP
u"0.1.4.0/24",
# Another split in QIP
u"0.1.5.0/24"
]
for net in networks:
ipnet = IPv4Network(net)
self.noouttest(["add", "network", "--network", ipnet.network_address,
"--ip", ipnet.network_address,
"--prefixlen", ipnet.prefixlen,
"--building", "nettest"])
def test_255_add_addresses(self):
ips = ["0.1.2.1", "0.1.2.193",
"0.1.3.65", "0.1.3.129",
"0.1.4.1", "0.1.4.193",
"0.1.5.129", "0.1.5.193"]
for ip in ips:
name = "test-%s.aqd-unittest.ms.com" % ip.replace('.', '-')
self.dsdb_expect_add(name, ip)
self.noouttest(["add", "address", "--ip", ip, "--fqdn", name,
"--grn=grn:/ms/ei/aquilon/unittest"] + self.valid_just_tcm)
self.dsdb_verify()
def test_260_test_split_merge(self):
command = ["refresh", "network", "--building", "nettest"]
err = self.statustest(command)
# 0.1.2.x
self.matchoutput(err, "Setting network 0.1.2.0 [0.1.2.0/25] "
"name to merge_1", command)
self.matchoutput(err, "Adding router 0.1.2.1 to network merge_1",
command)
self.matchoutput(err, "Setting network merge_1 [0.1.2.0/25] "
"prefix length to 24", command)
self.matchoutput(err, "Deleting network 0.1.2.192 [0.1.2.192/26]",
command)
# 0.1.3.x
self.matchclean(err, "Setting network 0.1.3.0", command)
self.matchoutput(err, "Adding network merge_2 [0.1.3.0/24]", command)
self.matchoutput(err, "Adding router 0.1.3.1 to network merge_2",
command)
self.matchoutput(err, "Deleting network 0.1.3.64 [0.1.3.64/26]",
command)
self.matchoutput(err, "Deleting network 0.1.3.128 [0.1.3.128/25]",
command)
# 0.1.4.x
self.matchoutput(err, "Setting network 0.1.4.0 [0.1.4.0/24] "
"name to split_1", command)
self.matchoutput(err, "Adding router 0.1.4.1 to network split_1",
command)
self.matchoutput(err, "Setting network split_1 [0.1.4.0/24] "
"prefix length to 25", command)
self.matchoutput(err, "Adding network split_2 [0.1.4.192/26]", command)
self.matchoutput(err, "Adding router 0.1.4.193 to network split_2",
command)
# 0.1.5.x
self.matchclean(err, "Setting network 0.1.5.0", command)
self.matchoutput(err, "Adding network split_3 [0.1.5.128/26]", command)
self.matchoutput(err, "Adding router 0.1.5.129 to network split_3",
command)
self.matchoutput(err, "Adding network split_4 [0.1.5.192/26]", command)
self.matchoutput(err, "Adding router 0.1.5.193 to network split_4",
command)
self.matchoutput(err, "Deleting network 0.1.5.0", command)
def test_270_check_addresses(self):
self.check_network("0.1.2.1", "merge_1", "0.1.2.0", 24)
self.check_network("0.1.2.193", "merge_1", "0.1.2.0", 24)
self.check_network("0.1.3.65", "merge_2", "0.1.3.0", 24)
self.check_network("0.1.3.129", "merge_2", "0.1.3.0", 24)
self.check_network("0.1.4.1", "split_1", "0.1.4.0", 25)
self.check_network("0.1.4.193", "split_2", "0.1.4.192", 26)
self.check_network("0.1.5.129", "split_3", "0.1.5.128", 26)
self.check_network("0.1.5.193", "split_4", "0.1.5.192", 26)
# 300 add a small dynamic range to 0.1.1.0
def test_300_adddynamicrange(self):
for ip in range(int(IPv4Address(u"0.1.1.4")),
int(IPv4Address(u"0.1.1.8")) + 1):
self.dsdb_expect_add(self.dynname(IPv4Address(ip)), IPv4Address(ip))
command = ["add_dynamic_range", "--startip=0.1.1.4", "--endip=0.1.1.8",
"--dns_domain=aqd-unittest.ms.com"] + self.valid_just_tcm
self.statustest(command)
self.dsdb_verify()
def test_310_verifynetwork(self):
command = "show network --ip 0.1.1.0"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Dynamic Ranges: 0.1.1.4-0.1.1.8", command)
def test_310_verifynetwork_proto(self):
command = "show network --ip 0.1.1.0 --format proto"
net = self.protobuftest(command.split(" "), expect=1)[0]
self.assertEqual(len(net.dynamic_ranges), 1)
self.assertEqual(net.dynamic_ranges[0].start, "0.1.1.4")
self.assertEqual(net.dynamic_ranges[0].end, "0.1.1.8")
def failsync(self, command):
"""Common code for the two tests below."""
err = self.partialerrortest(command.split(" "))
err = self.striplock(err)
self.matchclean(err,
"Deleting network 0.1.1.0",
command)
for i in range(4, 9):
self.matchoutput(err,
"Network 0.1.1.0 [0.1.1.0/24] cannot be deleted "
"because DNS record "
"dynamic-0-1-1-%d.aqd-unittest.ms.com [0.1.1.%d] "
"still exists." %
(i, i),
command)
return err
# 400 normal should fail
def test_400_syncclean(self):
command = "refresh network --building np"
err = self.failsync(command)
self.matchoutput(err, "No changes applied because of errors.", command)
# 410 dryrun should fail, no real difference in this case...
def test_410_refreshnetworkdryrun(self):
command = "refresh network --building np --dryrun"
err = self.failsync(command)
self.matchoutput(err, "No changes applied because of errors.", command)
# 450 verify network still exists
def test_450_verifynetwork(self):
command = "show network --ip 0.1.1.0"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "IP: 0.1.1.0", command)
# 500 incrental should be a partial fail
def test_500_incremental_fail(self):
command = "refresh network --building np --incremental"
err = self.failsync(command)
self.matchclean(err, "No changes applied because of errors.", command)
# 550 verify network still exists
def test_550_verifynetwork(self):
command = "show network --ip 0.1.1.0"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "IP: 0.1.1.0", command)
# 650 delete the dynamic range
def test_650_deldynamicrange(self):
for ip in range(int(IPv4Address(u"0.1.1.4")),
int(IPv4Address(u"0.1.1.8")) + 1):
self.dsdb_expect_delete(IPv4Address(ip))
command = ["del_dynamic_range", "--startip=0.1.1.4", "--endip=0.1.1.8"] + self.valid_just_tcm
self.statustest(command)
self.dsdb_verify()
def test_670_cleanup_addresses(self):
ips = ["0.1.2.1", "0.1.2.193",
"0.1.3.65", "0.1.3.129",
"0.1.4.1", "0.1.4.193",
"0.1.5.129", "0.1.5.193"]
for ip in ips:
name = "test-%s.aqd-unittest.ms.com" % ip.replace('.', '-')
self.dsdb_expect_delete(ip)
self.noouttest(["del", "address", "--ip", ip, "--fqdn", name] + self.valid_just_tcm)
self.dsdb_verify()
def test_680_cleanup_nettest(self):
networks = ["0.1.2.0", "0.1.3.0", "0.1.4.0", "0.1.4.192", "0.1.5.128",
"0.1.5.192"]
for net in networks:
self.noouttest(["del", "network", "--ip", net])
# 700 sync up building np
# One last time to clean up the dummy network
def test_700_syncclean(self):
command = "refresh network --building np"
err = self.statustest(command.split(" "))
err = self.striplock(err)
self.matchoutput(err, "Deleting network 0.1.1.0", command)
self.matchoutput(err, "Removing router 0.1.1.1", command)
def test_710_cleanrouter(self):
command = ["search", "system", "--fqdn", "dummydyn.aqd-unittest.ms.com"]
self.noouttest(command)
def test_720_syncbu(self):
command = "refresh network --building bu"
err = self.statustest(command.split(" "))
err = self.striplock(err)
self.matchoutput(err, "Unknown compartment nonexistant, ignoring", command)
def test_800_bunker_added(self):
net = self.net["aurora2"]
command = ["show", "network", "--ip", net.ip]
out = self.commandtest(command)
self.matchoutput(out, "Network: aurora2", command)
self.matchoutput(out, "IP: %s" % net.ip, command)
self.matchoutput(out, "Network Type: transit", command)
self.matchoutput(out, "Comments: Test aurora net", command)
self.matchoutput(out, "Bunker: nyb10.np", command)
def test_800_bunker_cleared(self):
net = self.net["np06bals03_v103"]
command = ["show", "network", "--ip", net.ip]
out = self.commandtest(command)
self.matchoutput(out, "Network: np06bals03_v103", command)
self.matchoutput(out, "IP: %s" % net.ip, command)
self.matchoutput(out, "Building: np", command)
def test_800_compartment_set(self):
net = self.net["refreshtest3"]
command = ["show", "network", "--ip", net.ip]
out = self.commandtest(command)
self.matchoutput(out, "Network Compartment: interior.ut", command)
def test_800_compartment_skipped(self):
net = self.net["refreshtest4"]
command = ["show", "network", "--ip", net.ip]
out = self.commandtest(command)
self.matchclean(out, "Network Compartment", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestRefreshNetwork)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
import csv
from os.path import basename
import pickle
import wx
OK_BACKGROUND_COLOR = 'PALE GREEN'
def determine_wildcard(extension=None, file_type=None):
"""
Assemble a wildcard string of the form:
[[file_type ](*.extension)|*.extension|]<all_files>
"""
all_files = 'All files|*'
if extension is not None:
if '|' in extension:
raise ValueError(extension)
if file_type is not None:
if '|' in file_type:
raise ValueError(file_type)
wildcard = '{0} (*.{1})|*.{1}|{2}'.format(file_type, extension, all_files)
else:
wildcard = '(*.{0})|*.{0}|{1}'.format(extension, all_files)
else:
wildcard = all_files
return wildcard
def load_pickled(parent, extension=None, file_type=None):
"""
Unpickle data from a file based on a file dialog.
"""
wildcard = determine_wildcard(extension, file_type)
dlg = wx.FileDialog(parent=parent, message='Load...', wildcard=wildcard,
style=wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
with open(path, 'rb') as f:
try:
return pickle.load(f)
except Exception as e:
# Wrap all problems.
raise IOError('Could not load data.', e)
def save_pickled(parent, values, extension=None, file_type=None):
"""
Pickle data to a file based on a file dialog.
"""
wildcard = determine_wildcard(extension, file_type)
dlg = wx.FileDialog(parent=parent, message='Save...',
wildcard=wildcard, style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
# Automatically append extension if none given.
if extension is not None and '.' not in path:
path = '{0}.{1}'.format(path, extension)
with open(path, 'wb') as f:
try:
pickle.dump(values, f, protocol=pickle.HIGHEST_PROTOCOL)
except Exception as e:
# Wrap all problems:
raise IOError('Could not save data.', e)
def load_csv(parent, extension='csv', file_type='CSV'):
"""
Load data from a CSV file based on a file dialog.
"""
wildcard = determine_wildcard(extension, file_type)
dlg = wx.FileDialog(parent=parent, message='Load...', wildcard=wildcard,
style=wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
filename = basename(path)
with open(path, 'rb') as f:
try:
result = list(csv.reader(f))
try:
has_header = len(result[0]) > 0
except IndexError:
has_header = False
else:
# Remove empty row.
if not has_header:
result = result[1:]
return (has_header, result, filename)
except Exception as e:
# Wrap all problems.
raise IOError('Could not load data.', e)
def save_csv(parent, values, headers=None, extension='csv', file_type='CSV'):
"""
Save data to a CSV file based on a file dialog.
"""
wildcard = determine_wildcard(extension, file_type)
dlg = wx.FileDialog(parent=parent, message='Save...',
wildcard=wildcard, style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
# Automatically append extension if none given.
if extension is not None and '.' not in path:
path = '{0}.{1}'.format(path, extension)
with open(path, 'wb') as f:
try:
w = csv.writer(f)
if headers is not None:
w.writerow(headers)
else:
w.writerow([])
w.writerows(values)
except Exception as e:
# Wrap all problems:
raise IOError('Could not save data.', e)
class Dialog(wx.Dialog):
"""
Auto-destroying dialog.
"""
def __init__(self, parent, auto_destroy=True, *args, **kwargs):
wx.Dialog.__init__(self, parent, *args, **kwargs)
self.auto_destroy = auto_destroy
self.Bind(wx.EVT_SHOW, self.OnShow)
def OnShow(self, evt):
"""
Destroy the dialog when it disappears.
"""
if self.auto_destroy and not evt.Show:
if not self.IsBeingDeleted():
self.Destroy()
class MessageDialog(Dialog):
"""
A simple error message dialog.
"""
def __init__(self, parent, message, title='', unclosable=False, monospace=False,
*args, **kwargs):
kwargs['style'] = kwargs.get('style', wx.DEFAULT_DIALOG_STYLE) | wx.RESIZE_BORDER
if unclosable:
kwargs['style'] &= ~wx.CLOSE_BOX
Dialog.__init__(self, parent=parent, title=title, *args, **kwargs)
# Dialog.
dialog_box = wx.BoxSizer(wx.VERTICAL)
## Message.
message_text = wx.StaticText(self, label=message)
if monospace:
font = message_text.Font
message_text.Font = wx.Font(font.PointSize, wx.MODERN, font.Style, font.Weight)
message_text.SetMinSize((450, 100))
dialog_box.Add(message_text, proportion=1, flag=wx.EXPAND|wx.ALL, border=20)
## OK button.
if not unclosable:
ok_button = wx.Button(self, wx.ID_OK)
dialog_box.Add(ok_button, flag=wx.EXPAND)
self.SetSizerAndFit(dialog_box)
class YesNoQuestionDialog(Dialog):
"""
A yes/no question dialog.
"""
def __init__(self, parent, prompt, yes_callback=None, no_callback=None, title='',
*args, **kwargs):
Dialog.__init__(self, parent=parent, title=title,
style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER,
*args, **kwargs)
self.yes_callback = yes_callback
self.no_callback = no_callback
# Dialog.
dialog_box = wx.BoxSizer(wx.VERTICAL)
## Prompt.
prompt_text = wx.StaticText(self, label=prompt)
dialog_box.Add(prompt_text, proportion=1, flag=wx.EXPAND|wx.ALL, border=20)
## Buttons.
button_box = wx.BoxSizer(wx.HORIZONTAL)
dialog_box.Add(button_box, flag=wx.CENTER)
yes_button = wx.Button(self, wx.ID_YES)
self.Bind(wx.EVT_BUTTON, self.OnYes, yes_button)
button_box.Add(yes_button)
no_button = wx.Button(self, wx.ID_NO)
self.Bind(wx.EVT_BUTTON, self.OnNo, no_button)
button_box.Add(no_button)
self.SetSizerAndFit(dialog_box)
self.Bind(wx.EVT_CLOSE, self.OnNo)
def OnYes(self, evt=None):
if self.yes_callback is not None:
self.yes_callback()
self.Destroy()
def OnNo(self, evt=None):
if self.no_callback is not None:
self.no_callback()
self.Destroy()
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Dict client protocol implementation.
@author: Pavel Pergamenshchik
"""
from twisted.protocols import basic
from twisted.internet import defer, protocol
from twisted.python import log
from io import BytesIO
def parseParam(line):
"""Chew one dqstring or atom from beginning of line and return (param, remaningline)"""
if line == b'':
return (None, b'')
elif line[0:1] != b'"': # atom
mode = 1
else: # dqstring
mode = 2
res = b""
io = BytesIO(line)
if mode == 2: # skip the opening quote
io.read(1)
while 1:
a = io.read(1)
if a == b'"':
if mode == 2:
io.read(1) # skip the separating space
return (res, io.read())
elif a == b'\\':
a = io.read(1)
if a == b'':
return (None, line) # unexpected end of string
elif a == b'':
if mode == 1:
return (res, io.read())
else:
return (None, line) # unexpected end of string
elif a == b' ':
if mode == 1:
return (res, io.read())
res += a
def makeAtom(line):
"""Munch a string into an 'atom'"""
# FIXME: proper quoting
return filter(lambda x: not (x in map(chr, range(33)+[34, 39, 92])), line)
def makeWord(s):
mustquote = range(33)+[34, 39, 92]
result = []
for c in s:
if ord(c) in mustquote:
result.append(b"\\")
result.append(c)
s = b"".join(result)
return s
def parseText(line):
if len(line) == 1 and line == b'.':
return None
else:
if len(line) > 1 and line[0:2] == b'..':
line = line[1:]
return line
class Definition:
"""A word definition"""
def __init__(self, name, db, dbdesc, text):
self.name = name
self.db = db
self.dbdesc = dbdesc
self.text = text # list of strings not terminated by newline
class DictClient(basic.LineReceiver):
"""dict (RFC2229) client"""
data = None # multiline data
MAX_LENGTH = 1024
state = None
mode = None
result = None
factory = None
def __init__(self):
self.data = None
self.result = None
def connectionMade(self):
self.state = "conn"
self.mode = "command"
def sendLine(self, line):
"""Throw up if the line is longer than 1022 characters"""
if len(line) > self.MAX_LENGTH - 2:
raise ValueError("DictClient tried to send a too long line")
basic.LineReceiver.sendLine(self, line)
def lineReceived(self, line):
try:
line = line.decode("utf-8")
except UnicodeError: # garbage received, skip
return
if self.mode == "text": # we are receiving textual data
code = "text"
else:
if len(line) < 4:
log.msg("DictClient got invalid line from server -- %s" % line)
self.protocolError("Invalid line from server")
self.transport.LoseConnection()
return
code = int(line[:3])
line = line[4:]
method = getattr(self, 'dictCode_%s_%s' % (code, self.state), self.dictCode_default)
method(line)
def dictCode_default(self, line):
"""Unknown message"""
log.msg("DictClient got unexpected message from server -- %s" % line)
self.protocolError("Unexpected server message")
self.transport.loseConnection()
def dictCode_221_ready(self, line):
"""We are about to get kicked off, do nothing"""
pass
def dictCode_220_conn(self, line):
"""Greeting message"""
self.state = "ready"
self.dictConnected()
def dictCode_530_conn(self):
self.protocolError("Access denied")
self.transport.loseConnection()
def dictCode_420_conn(self):
self.protocolError("Server temporarily unavailable")
self.transport.loseConnection()
def dictCode_421_conn(self):
self.protocolError("Server shutting down at operator request")
self.transport.loseConnection()
def sendDefine(self, database, word):
"""Send a dict DEFINE command"""
assert self.state == "ready", "DictClient.sendDefine called when not in ready state"
self.result = None # these two are just in case. In "ready" state, result and data
self.data = None # should be None
self.state = "define"
command = "DEFINE %s %s" % (makeAtom(database.encode("UTF-8")), makeWord(word.encode("UTF-8")))
self.sendLine(command)
def sendMatch(self, database, strategy, word):
"""Send a dict MATCH command"""
assert self.state == "ready", "DictClient.sendMatch called when not in ready state"
self.result = None
self.data = None
self.state = "match"
command = "MATCH %s %s %s" % (makeAtom(database), makeAtom(strategy), makeAtom(word))
self.sendLine(command.encode("UTF-8"))
def dictCode_550_define(self, line):
"""Invalid database"""
self.mode = "ready"
self.defineFailed("Invalid database")
def dictCode_550_match(self, line):
"""Invalid database"""
self.mode = "ready"
self.matchFailed("Invalid database")
def dictCode_551_match(self, line):
"""Invalid strategy"""
self.mode = "ready"
self.matchFailed("Invalid strategy")
def dictCode_552_define(self, line):
"""No match"""
self.mode = "ready"
self.defineFailed("No match")
def dictCode_552_match(self, line):
"""No match"""
self.mode = "ready"
self.matchFailed("No match")
def dictCode_150_define(self, line):
"""n definitions retrieved"""
self.result = []
def dictCode_151_define(self, line):
"""Definition text follows"""
self.mode = "text"
(word, line) = parseParam(line)
(db, line) = parseParam(line)
(dbdesc, line) = parseParam(line)
if not (word and db and dbdesc):
self.protocolError("Invalid server response")
self.transport.loseConnection()
else:
self.result.append(Definition(word, db, dbdesc, []))
self.data = []
def dictCode_152_match(self, line):
"""n matches found, text follows"""
self.mode = "text"
self.result = []
self.data = []
def dictCode_text_define(self, line):
"""A line of definition text received"""
res = parseText(line)
if res == None:
self.mode = "command"
self.result[-1].text = self.data
self.data = None
else:
self.data.append(line)
def dictCode_text_match(self, line):
"""One line of match text received"""
def l(s):
p1, t = parseParam(s)
p2, t = parseParam(t)
return (p1, p2)
res = parseText(line)
if res == None:
self.mode = "command"
self.result = map(l, self.data)
self.data = None
else:
self.data.append(line)
def dictCode_250_define(self, line):
"""ok"""
t = self.result
self.result = None
self.state = "ready"
self.defineDone(t)
def dictCode_250_match(self, line):
"""ok"""
t = self.result
self.result = None
self.state = "ready"
self.matchDone(t)
def protocolError(self, reason):
"""override to catch unexpected dict protocol conditions"""
pass
def dictConnected(self):
"""override to be notified when the server is ready to accept commands"""
pass
def defineFailed(self, reason):
"""override to catch reasonable failure responses to DEFINE"""
pass
def defineDone(self, result):
"""override to catch successful DEFINE"""
pass
def matchFailed(self, reason):
"""override to catch resonable failure responses to MATCH"""
pass
def matchDone(self, result):
"""override to catch successful MATCH"""
pass
class InvalidResponse(Exception):
pass
class DictLookup(DictClient):
"""Utility class for a single dict transaction. To be used with DictLookupFactory"""
def protocolError(self, reason):
if not self.factory.done:
self.factory.d.errback(InvalidResponse(reason))
self.factory.clientDone()
def dictConnected(self):
if self.factory.queryType == "define":
apply(self.sendDefine, self.factory.param)
elif self.factory.queryType == "match":
apply(self.sendMatch, self.factory.param)
def defineFailed(self, reason):
self.factory.d.callback([])
self.factory.clientDone()
self.transport.loseConnection()
def defineDone(self, result):
self.factory.d.callback(result)
self.factory.clientDone()
self.transport.loseConnection()
def matchFailed(self, reason):
self.factory.d.callback([])
self.factory.clientDone()
self.transport.loseConnection()
def matchDone(self, result):
self.factory.d.callback(result)
self.factory.clientDone()
self.transport.loseConnection()
class DictLookupFactory(protocol.ClientFactory):
"""Utility factory for a single dict transaction"""
protocol = DictLookup
done = None
def __init__(self, queryType, param, d):
self.queryType = queryType
self.param = param
self.d = d
self.done = 0
def clientDone(self):
"""Called by client when done."""
self.done = 1
del self.d
def clientConnectionFailed(self, connector, error):
self.d.errback(error)
def clientConnectionLost(self, connector, error):
if not self.done:
self.d.errback(error)
def buildProtocol(self, addr):
p = self.protocol()
p.factory = self
return p
def define(host, port, database, word):
"""Look up a word using a dict server"""
d = defer.Deferred()
factory = DictLookupFactory("define", (database, word), d)
from twisted.internet import reactor
reactor.connectTCP(host, port, factory)
return d
def match(host, port, database, strategy, word):
"""Match a word using a dict server"""
d = defer.Deferred()
factory = DictLookupFactory("match", (database, strategy, word), d)
from twisted.internet import reactor
reactor.connectTCP(host, port, factory)
return d
|
|
# Copyright 2015 Hitachi Data Systems.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from tempest import config
from tempest.lib.common.utils import data_utils
import testtools
from testtools import testcase as tc
from manila_tempest_tests.common import constants
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
CONF = config.CONF
class MigrationBase(base.BaseSharesAdminTest):
"""Base test class for Share Migration.
Tests share migration in multi-backend environment.
This class covers:
1) Driver-assisted migration: force_host_assisted_migration, nondisruptive,
writable and preserve-metadata are False.
2) Host-assisted migration: force_host_assisted_migration is True,
nondisruptive, writable, preserve-metadata and preserve-snapshots are
False.
3) 2-phase migration of both Host-assisted and Driver-assisted.
4) Cancelling migration past first phase.
5) Changing driver modes through migration.
No need to test with writable, preserve-metadata and non-disruptive as
True, values are supplied to the driver which decides what to do. Test
should be positive, so not being writable, not preserving metadata and
being disruptive is less restrictive for drivers, which would abort if they
cannot handle them.
Drivers that implement driver-assisted migration should enable the
configuration flag to be tested.
"""
protocol = None
@classmethod
def resource_setup(cls):
super(MigrationBase, cls).resource_setup()
if cls.protocol not in CONF.share.enable_protocols:
message = "%s tests are disabled." % cls.protocol
raise cls.skipException(message)
if not (CONF.share.run_host_assisted_migration_tests or
CONF.share.run_driver_assisted_migration_tests):
raise cls.skipException("Share migration tests are disabled.")
cls.pools = cls.shares_v2_client.list_pools(detail=True)['pools']
if len(cls.pools) < 2:
raise cls.skipException("At least two different pool entries are "
"needed to run share migration tests.")
cls.new_type = cls.create_share_type(
name=data_utils.rand_name('new_share_type_for_migration'),
cleanup_in_class=True,
extra_specs=utils.get_configured_extra_specs())
cls.new_type_opposite = cls.create_share_type(
name=data_utils.rand_name('new_share_type_for_migration_opposite'),
cleanup_in_class=True,
extra_specs=utils.get_configured_extra_specs(
variation='opposite_driver_modes'))
def _setup_migration(self, share, opposite=False):
if opposite:
dest_type = self.new_type_opposite['share_type']
else:
dest_type = self.new_type['share_type']
dest_pool = utils.choose_matching_backend(share, self.pools, dest_type)
if opposite:
if not dest_pool:
raise self.skipException(
"This test requires two pools enabled with different "
"driver modes.")
else:
self.assertIsNotNone(dest_pool)
self.assertIsNotNone(dest_pool.get('name'))
old_exports = self.shares_v2_client.list_share_export_locations(
share['id'])
self.assertNotEmpty(old_exports)
old_exports = [x['path'] for x in old_exports
if x['is_admin_only'] is False]
self.assertNotEmpty(old_exports)
self.shares_v2_client.create_access_rule(
share['id'], access_to="50.50.50.50", access_level="rw")
self.shares_v2_client.wait_for_share_status(
share['id'], constants.RULE_STATE_ACTIVE,
status_attr='access_rules_status')
self.shares_v2_client.create_access_rule(
share['id'], access_to="51.51.51.51", access_level="ro")
self.shares_v2_client.wait_for_share_status(
share['id'], constants.RULE_STATE_ACTIVE,
status_attr='access_rules_status')
dest_pool = dest_pool['name']
share = self.shares_v2_client.get_share(share['id'])
return share, dest_pool
def _validate_migration_successful(self, dest_pool, share, status_to_wait,
version=CONF.share.max_api_microversion,
complete=True, share_network_id=None,
share_type_id=None):
statuses = ((status_to_wait,)
if not isinstance(status_to_wait, (tuple, list, set))
else status_to_wait)
new_exports = self.shares_v2_client.list_share_export_locations(
share['id'], version=version)
self.assertNotEmpty(new_exports)
new_exports = [x['path'] for x in new_exports if
x['is_admin_only'] is False]
self.assertNotEmpty(new_exports)
self.assertIn(share['task_state'], statuses)
if share_network_id:
self.assertEqual(share_network_id, share['share_network_id'])
if share_type_id:
self.assertEqual(share_type_id, share['share_type'])
# Share migrated
if complete:
self.assertEqual(dest_pool, share['host'])
rules = self.shares_v2_client.list_access_rules(share['id'])
expected_rules = [{
'state': constants.RULE_STATE_ACTIVE,
'access_to': '50.50.50.50',
'access_type': 'ip',
'access_level': 'rw',
}, {
'state': constants.RULE_STATE_ACTIVE,
'access_to': '51.51.51.51',
'access_type': 'ip',
'access_level': 'ro',
}]
filtered_rules = [{'state': rule['state'],
'access_to': rule['access_to'],
'access_level': rule['access_level'],
'access_type': rule['access_type']}
for rule in rules]
for r in expected_rules:
self.assertIn(r, filtered_rules)
self.assertEqual(len(expected_rules), len(filtered_rules))
# Share not migrated yet
else:
self.assertNotEqual(dest_pool, share['host'])
def _check_migration_enabled(self, force_host_assisted):
if force_host_assisted:
if not CONF.share.run_host_assisted_migration_tests:
raise self.skipException(
"Host-assisted migration tests are disabled.")
else:
if not CONF.share.run_driver_assisted_migration_tests:
raise self.skipException(
"Driver-assisted migration tests are disabled.")
def _create_secondary_share_network(self, old_share_network_id):
old_share_network = self.shares_v2_client.get_share_network(
old_share_network_id)
new_share_network = self.create_share_network(
cleanup_in_class=True,
neutron_net_id=old_share_network['neutron_net_id'],
neutron_subnet_id=old_share_network['neutron_subnet_id'])
return new_share_network['id']
def _test_resize_post_migration(self, force_host_assisted, resize):
self._check_migration_enabled(force_host_assisted)
new_size = CONF.share.share_size + 1
share = self.create_share(self.protocol, size=new_size)
share = self.shares_v2_client.get_share(share['id'])
share, dest_pool = self._setup_migration(share)
task_state, new_share_network_id, new_share_type_id = (
self._get_migration_data(share, force_host_assisted))
share = self.migrate_share(
share['id'], dest_pool,
force_host_assisted_migration=force_host_assisted,
wait_for_status=task_state, new_share_type_id=new_share_type_id,
new_share_network_id=new_share_network_id)
share = self.migration_complete(share['id'], dest_pool)
if resize == 'extend':
new_size = CONF.share.share_size + 2
self.shares_v2_client.extend_share(share['id'], new_size)
self.shares_v2_client.wait_for_share_status(
share['id'], constants.STATUS_AVAILABLE)
share = self.shares_v2_client.get_share(share["id"])
self.assertEqual(new_size, int(share["size"]))
else:
new_size = CONF.share.share_size
self.shares_v2_client.shrink_share(share['id'], new_size)
self.shares_v2_client.wait_for_share_status(
share['id'], constants.STATUS_AVAILABLE)
share = self.shares_v2_client.get_share(share["id"])
self.assertEqual(new_size, int(share["size"]))
self._cleanup_share(share)
def _get_migration_data(self, share, force_host_assisted=False):
task_state = (constants.TASK_STATE_DATA_COPYING_COMPLETED
if force_host_assisted
else constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE)
old_share_network_id = share['share_network_id']
if CONF.share.multitenancy_enabled:
new_share_network_id = self._create_secondary_share_network(
old_share_network_id)
else:
new_share_network_id = None
new_share_type_id = self.new_type['share_type']['id']
return task_state, new_share_network_id, new_share_type_id
def _validate_snapshot(self, share, snapshot1, snapshot2):
snapshot_list = self.shares_v2_client.list_snapshots_for_share(
share['id'])
msg = "Share %s has no snapshot." % share['id']
# Verify that snapshot list is not empty
self.assertNotEmpty(snapshot_list, msg)
snapshot_id_list = [snap['id'] for snap in snapshot_list]
# verify that after migration original snapshots are retained
self.assertIn(snapshot1['id'], snapshot_id_list)
self.assertIn(snapshot2['id'], snapshot_id_list)
# Verify that a share can be created from a snapshot after migration
snapshot1_share = self.create_share(
self.protocol, size=share['size'], snapshot_id=snapshot1['id'],
share_network_id=share['share_network_id'])
self.assertEqual(snapshot1['id'], snapshot1_share['snapshot_id'])
self._cleanup_share(share)
def _validate_share_migration_with_different_snapshot_capability_type(
self, force_host_assisted, snapshot_capable):
self._check_migration_enabled(force_host_assisted)
ss_type, no_ss_type = self._create_share_type_for_snapshot_capability()
if snapshot_capable:
share_type = ss_type['share_type']
share_type_id = no_ss_type['share_type']['id']
new_share_type_id = ss_type['share_type']['id']
else:
share_type = no_ss_type['share_type']
share_type_id = ss_type['share_type']['id']
new_share_type_id = no_ss_type['share_type']['id']
share = self.create_share(
self.protocol, share_type_id=share_type_id)
share = self.shares_v2_client.get_share(share['id'])
if snapshot_capable:
self.assertEqual(False, share['snapshot_support'])
else:
# Verify that share has snapshot support capability
self.assertTrue(share['snapshot_support'])
dest_pool = utils.choose_matching_backend(share, self.pools,
share_type)
task_state, new_share_network_id, __ = (
self._get_migration_data(share, force_host_assisted))
share = self.migrate_share(
share['id'], dest_pool['name'],
force_host_assisted_migration=force_host_assisted,
wait_for_status=task_state,
new_share_type_id=new_share_type_id,
new_share_network_id=new_share_network_id)
share = self.migration_complete(share['id'], dest_pool)
if snapshot_capable:
# Verify that migrated share does have snapshot support capability
self.assertTrue(share['snapshot_support'])
else:
# Verify that migrated share don't have snapshot support capability
self.assertEqual(False, share['snapshot_support'])
self._cleanup_share(share)
def _create_share_type_for_snapshot_capability(self):
# Share type with snapshot support
st_name = data_utils.rand_name(
'snapshot_capable_share_type_for_migration')
extra_specs = self.add_extra_specs_to_dict({"snapshot_support": True})
ss_type = self.create_share_type(st_name, extra_specs=extra_specs)
# New share type with no snapshot support capability
# to which a share will be migrated
new_st_name = data_utils.rand_name(
'snapshot_noncapable_share_type_for_migration')
extra_specs = {
"driver_handles_share_servers": CONF.share.multitenancy_enabled
}
no_ss_type = self.create_share_type(new_st_name,
extra_specs=extra_specs)
return ss_type, no_ss_type
def _cleanup_share(self, share):
resource = {"type": "share", "id": share["id"],
"client": self.shares_v2_client}
# NOTE(Yogi1): Share needs to be cleaned up explicitly at the end of
# test otherwise, newly created share_network will not get cleaned up.
self.method_resources.insert(0, resource)
@ddt.ddt
class MigrationCancelNFSTest(MigrationBase):
protocol = "nfs"
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@base.skip_if_microversion_lt("2.29")
@ddt.data(True, False)
def test_migration_cancel(self, force_host_assisted):
self._check_migration_enabled(force_host_assisted)
share = self.create_share(self.protocol)
share = self.shares_v2_client.get_share(share['id'])
share, dest_pool = self._setup_migration(share)
task_state = (constants.TASK_STATE_DATA_COPYING_COMPLETED
if force_host_assisted
else constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE)
share = self.migrate_share(
share['id'], dest_pool, wait_for_status=task_state,
force_host_assisted_migration=force_host_assisted)
self._validate_migration_successful(
dest_pool, share, task_state, complete=False)
progress = self.shares_v2_client.migration_get_progress(share['id'])
self.assertEqual(task_state, progress['task_state'])
self.assertEqual(100, progress['total_progress'])
share = self.migration_cancel(share['id'], dest_pool)
progress = self.shares_v2_client.migration_get_progress(share['id'])
self.assertEqual(
constants.TASK_STATE_MIGRATION_CANCELLED, progress['task_state'])
self.assertEqual(100, progress['total_progress'])
self._validate_migration_successful(
dest_pool, share, constants.TASK_STATE_MIGRATION_CANCELLED,
complete=False)
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@base.skip_if_microversion_lt("2.29")
@testtools.skipUnless(
CONF.share.run_snapshot_tests, 'Snapshot tests are disabled.')
@testtools.skipUnless(
CONF.share.run_driver_assisted_migration_tests,
'Driver-assisted migration tests are disabled.')
@testtools.skipUnless(
CONF.share.run_migration_with_preserve_snapshots_tests,
'Migration with preserve snapshots tests are disabled.')
def test_migration_cancel_share_with_snapshot(self):
share = self.create_share(self.protocol)
share = self.shares_v2_client.get_share(share['id'])
share, dest_pool = self._setup_migration(share)
snapshot1 = self.create_snapshot_wait_for_active(share['id'])
snapshot2 = self.create_snapshot_wait_for_active(share['id'])
task_state, new_share_network_id, new_share_type_id = (
self._get_migration_data(share))
share = self.migrate_share(
share['id'], dest_pool,
wait_for_status=task_state, new_share_type_id=new_share_type_id,
new_share_network_id=new_share_network_id, preserve_snapshots=True)
share = self.migration_cancel(share['id'], dest_pool)
self._validate_snapshot(share, snapshot1, snapshot2)
@ddt.ddt
class MigrationOppositeDriverModesNFSTest(MigrationBase):
protocol = "nfs"
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@base.skip_if_microversion_lt("2.29")
@ddt.data(True, False)
def test_migration_opposite_driver_modes(self, force_host_assisted):
self._check_migration_enabled(force_host_assisted)
share = self.create_share(self.protocol)
share = self.shares_v2_client.get_share(share['id'])
share, dest_pool = self._setup_migration(share, opposite=True)
if not CONF.share.multitenancy_enabled:
# If currently configured is DHSS=False,
# then we need it for DHSS=True
new_share_network_id = self.provide_share_network(
self.shares_v2_client,
self.os_admin.networks_client,
isolated_creds_client=None,
ignore_multitenancy_config=True,
)
else:
# If currently configured is DHSS=True,
# then we must pass None for DHSS=False
new_share_network_id = None
old_share_network_id = share['share_network_id']
old_share_type_id = share['share_type']
new_share_type_id = self.new_type_opposite['share_type']['id']
task_state = (constants.TASK_STATE_DATA_COPYING_COMPLETED
if force_host_assisted
else constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE)
share = self.migrate_share(
share['id'], dest_pool,
force_host_assisted_migration=force_host_assisted,
wait_for_status=task_state, new_share_type_id=new_share_type_id,
new_share_network_id=new_share_network_id)
self._validate_migration_successful(
dest_pool, share, task_state, complete=False,
share_network_id=old_share_network_id,
share_type_id=old_share_type_id)
progress = self.shares_v2_client.migration_get_progress(share['id'])
self.assertEqual(task_state, progress['task_state'])
self.assertEqual(100, progress['total_progress'])
share = self.migration_complete(share['id'], dest_pool)
progress = self.shares_v2_client.migration_get_progress(share['id'])
self.assertEqual(
constants.TASK_STATE_MIGRATION_SUCCESS, progress['task_state'])
self.assertEqual(100, progress['total_progress'])
self._validate_migration_successful(
dest_pool, share, constants.TASK_STATE_MIGRATION_SUCCESS,
complete=True, share_network_id=new_share_network_id,
share_type_id=new_share_type_id)
@ddt.ddt
class MigrationTwoPhaseNFSTest(MigrationBase):
protocol = "nfs"
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@base.skip_if_microversion_lt("2.29")
@ddt.data(True, False)
def test_migration_2phase(self, force_host_assisted):
self._check_migration_enabled(force_host_assisted)
share = self.create_share(self.protocol)
share = self.shares_v2_client.get_share(share['id'])
share, dest_pool = self._setup_migration(share)
old_share_network_id = share['share_network_id']
old_share_type_id = share['share_type']
task_state, new_share_network_id, new_share_type_id = (
self._get_migration_data(share, force_host_assisted))
share = self.migrate_share(
share['id'], dest_pool,
force_host_assisted_migration=force_host_assisted,
wait_for_status=task_state, new_share_type_id=new_share_type_id,
new_share_network_id=new_share_network_id)
self._validate_migration_successful(
dest_pool, share, task_state, complete=False,
share_network_id=old_share_network_id,
share_type_id=old_share_type_id)
progress = self.shares_v2_client.migration_get_progress(share['id'])
self.assertEqual(task_state, progress['task_state'])
self.assertEqual(100, progress['total_progress'])
share = self.migration_complete(share['id'], dest_pool)
progress = self.shares_v2_client.migration_get_progress(share['id'])
self.assertEqual(
constants.TASK_STATE_MIGRATION_SUCCESS, progress['task_state'])
self.assertEqual(100, progress['total_progress'])
self._validate_migration_successful(
dest_pool, share, constants.TASK_STATE_MIGRATION_SUCCESS,
complete=True, share_network_id=new_share_network_id,
share_type_id=new_share_type_id)
self._cleanup_share(share)
@ddt.ddt
class MigrationWithShareExtendingNFSTest(MigrationBase):
protocol = "nfs"
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@base.skip_if_microversion_lt("2.29")
@testtools.skipUnless(
CONF.share.run_extend_tests, 'Extend share tests are disabled.')
@ddt.data(True, False)
def test_extend_on_migrated_share(self, force_host_assisted):
self._test_resize_post_migration(force_host_assisted, resize='extend')
@ddt.ddt
class MigrationWithShareShrinkingNFSTest(MigrationBase):
protocol = "nfs"
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@base.skip_if_microversion_lt("2.29")
@testtools.skipUnless(
CONF.share.run_shrink_tests, 'Shrink share tests are disabled.')
@ddt.data(True, False)
def test_shrink_on_migrated_share(self, force_host_assisted):
self._test_resize_post_migration(force_host_assisted, resize='shrink')
@ddt.ddt
class MigrationOfShareWithSnapshotNFSTest(MigrationBase):
protocol = "nfs"
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@base.skip_if_microversion_lt("2.29")
@testtools.skipUnless(
CONF.share.run_snapshot_tests, 'Snapshot tests are disabled.')
@testtools.skipUnless(
CONF.share.run_driver_assisted_migration_tests,
'Driver-assisted migration tests are disabled.')
@testtools.skipUnless(
CONF.share.run_migration_with_preserve_snapshots_tests,
'Migration with preserve snapshots tests are disabled.')
def test_migrating_share_with_snapshot(self):
ss_type, __ = self._create_share_type_for_snapshot_capability()
share = self.create_share(self.protocol, cleanup_in_class=False)
share = self.shares_v2_client.get_share(share['id'])
share, dest_pool = self._setup_migration(share)
snapshot1 = self.create_snapshot_wait_for_active(
share['id'], cleanup_in_class=False)
snapshot2 = self.create_snapshot_wait_for_active(
share['id'], cleanup_in_class=False)
task_state, new_share_network_id, __ = self._get_migration_data(share)
share = self.migrate_share(
share['id'], dest_pool,
wait_for_status=task_state,
new_share_type_id=ss_type['share_type']['id'],
new_share_network_id=new_share_network_id, preserve_snapshots=True)
share = self.migration_complete(share['id'], dest_pool)
self._validate_snapshot(share, snapshot1, snapshot2)
@ddt.ddt
class MigrationWithDifferentSnapshotSupportNFSTest(MigrationBase):
protocol = "nfs"
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@base.skip_if_microversion_lt("2.29")
@testtools.skipUnless(CONF.share.run_snapshot_tests,
'Snapshot tests are disabled.')
@ddt.data(True, False)
def test_migrate_share_to_snapshot_capability_share_type(
self, force_host_assisted):
# Verify that share with no snapshot support type can be migrated
# to new share type which supports the snapshot
self._validate_share_migration_with_different_snapshot_capability_type(
force_host_assisted, True)
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@base.skip_if_microversion_lt("2.29")
@testtools.skipUnless(CONF.share.run_snapshot_tests,
'Snapshot tests are disabled.')
@ddt.data(True, False)
def test_migrate_share_to_no_snapshot_capability_share_type(
self, force_host_assisted):
# Verify that share with snapshot support type can be migrated
# to new share type which doesn't support the snapshot
self._validate_share_migration_with_different_snapshot_capability_type(
force_host_assisted, False)
# NOTE(u_glide): this function is required to exclude MigrationBase from
# executed test cases.
# See: https://docs.python.org/2/library/unittest.html#load-tests-protocol
# for details.
def load_tests(loader, tests, _):
result = []
for test_case in tests:
if not test_case._tests or type(test_case._tests[0]) is MigrationBase:
continue
result.append(test_case)
return loader.suiteClass(result)
|
|
# coding: utf-8
import json
from typing import Union
from django.db.models import Q
from django.db.models.signals import pre_delete
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.utils import six
from django.utils.translation import ugettext as _
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from rest_framework.exceptions import ParseError
from rest_framework.serializers import ValidationError
from rest_framework.settings import api_settings
from onadata.apps.api.exceptions import NoConfirmationProvidedException
from onadata.apps.api.viewsets.xform_viewset import custom_response_handler
from onadata.apps.api.tools import add_tags_to_instance, \
add_validation_status_to_instance, get_validation_status, \
remove_validation_status_from_instance
from onadata.apps.logger.models.xform import XForm
from onadata.apps.logger.models.instance import Instance
from onadata.apps.viewer.models.parsed_instance import _remove_from_mongo, ParsedInstance
from onadata.libs.renderers import renderers
from onadata.libs.mixins.anonymous_user_public_forms_mixin import (
AnonymousUserPublicFormsMixin)
from onadata.apps.api.permissions import (
EnketoSubmissionEditPermissions,
EnketoSubmissionViewPermissions,
XFormDataPermissions,
)
from onadata.libs.serializers.data_serializer import (
DataSerializer, DataListSerializer, DataInstanceSerializer)
from onadata.libs import filters
from onadata.libs.utils.viewer_tools import (
EnketoError,
get_enketo_submission_url,
)
SAFE_METHODS = ['GET', 'HEAD', 'OPTIONS']
class DataViewSet(AnonymousUserPublicFormsMixin, ModelViewSet):
"""
This endpoint provides access to submitted data in JSON format. Where:
* `pk` - the form unique identifier
* `dataid` - submission data unique identifier
* `owner` - username of the owner of the data point
## GET JSON List of data end points
Lists the data endpoints accessible to requesting user, for anonymous access
a list of public data endpoints is returned.
<pre class="prettyprint">
<b>GET</b> /api/v1/data
</pre>
> Example
>
> curl -X GET https://example.com/api/v1/data
> Response
>
> [{
> "id": 4240,
> "id_string": "dhis2form"
> "title": "dhis2form"
> "description": "dhis2form"
> "url": "https://example.com/api/v1/data/4240"
> },
> ...
> ]
## Download data in `csv` format
<pre class="prettyprint">
<b>GET</b> /api/v1/data.csv</pre>
>
> curl -O https://example.com/api/v1/data.csv
## GET JSON List of data end points filter by owner
Lists the data endpoints accessible to requesting user, for the specified
`owner` as a query parameter.
<pre class="prettyprint">
<b>GET</b> /api/v1/data?<code>owner</code>=<code>owner_username</code>
</pre>
> Example
>
> curl -X GET https://example.com/api/v1/data?owner=ona
## Get Submitted data for a specific form
Provides a list of json submitted data for a specific form.
<pre class="prettyprint">
<b>GET</b> /api/v1/data/<code>{pk}</code></pre>
> Example
>
> curl -X GET https://example.com/api/v1/data/22845
> Response
>
> [
> {
> "_id": 4503,
> "expense_type": "service",
> "_xform_id_string": "exp",
> "_geolocation": [
> null,
> null
> ],
> "end": "2013-01-03T10:26:25.674+03",
> "start": "2013-01-03T10:25:17.409+03",
> "expense_date": "2011-12-23",
> "_status": "submitted_via_web",
> "today": "2013-01-03",
> "_uuid": "2e599f6fe0de42d3a1417fb7d821c859",
> "imei": "351746052013466",
> "formhub/uuid": "46ea15e2b8134624a47e2c4b77eef0d4",
> "kind": "monthly",
> "_submission_time": "2013-01-03T02:27:19",
> "required": "yes",
> "_attachments": [],
> "item": "Rent",
> "amount": "35000.0",
> "deviceid": "351746052013466",
> "subscriberid": "639027...60317"
> },
> {
> ....
> "subscriberid": "639027...60317"
> }
> ]
## Get a single data submission for a given form
Get a single specific submission json data providing `pk`
and `dataid` as url path parameters, where:
* `pk` - is the identifying number for a specific form
* `dataid` - is the unique id of the data, the value of `_id` or `_uuid`
<pre class="prettyprint">
<b>GET</b> /api/v1/data/<code>{pk}</code>/<code>{dataid}</code></pre>
> Example
>
> curl -X GET https://example.com/api/v1/data/22845/4503
> Response
>
> {
> "_id": 4503,
> "expense_type": "service",
> "_xform_id_string": "exp",
> "_geolocation": [
> null,
> null
> ],
> "end": "2013-01-03T10:26:25.674+03",
> "start": "2013-01-03T10:25:17.409+03",
> "expense_date": "2011-12-23",
> "_status": "submitted_via_web",
> "today": "2013-01-03",
> "_uuid": "2e599f6fe0de42d3a1417fb7d821c859",
> "imei": "351746052013466",
> "formhub/uuid": "46ea15e2b8134624a47e2c4b77eef0d4",
> "kind": "monthly",
> "_submission_time": "2013-01-03T02:27:19",
> "required": "yes",
> "_attachments": [],
> "item": "Rent",
> "amount": "35000.0",
> "deviceid": "351746052013466",
> "subscriberid": "639027...60317"
> },
> {
> ....
> "subscriberid": "639027...60317"
> }
> ]
## Query submitted data of a specific form
Provides a list of json submitted data for a specific form. Use `query`
parameter to apply form data specific, see
<a href="http://docs.mongodb.org/manual/reference/operator/query/">
http://docs.mongodb.org/manual/reference/operator/query/</a>.
For more details see
<a href="https://github.com/modilabs/formhub/wiki/Formhub-Access-Points-(API)#
api-parameters">
API Parameters</a>.
<pre class="prettyprint">
<b>GET</b> /api/v1/data/<code>{pk}</code>?query={"field":"value"}</b>
<b>GET</b> /api/v1/data/<code>{pk}</code>?query={"field":{"op": "value"}}"</b>
</pre>
> Example
>
> curl -X GET 'https://example.com/api/v1/data/22845?query={"kind": \
"monthly"}'
> curl -X GET 'https://example.com/api/v1/data/22845?query={"date": \
{"$gt": "2014-09-29T01:02:03+0000"}}'
> Response
>
> [
> {
> "_id": 4503,
> "expense_type": "service",
> "_xform_id_string": "exp",
> "_geolocation": [
> null,
> null
> ],
> "end": "2013-01-03T10:26:25.674+03",
> "start": "2013-01-03T10:25:17.409+03",
> "expense_date": "2011-12-23",
> "_status": "submitted_via_web",
> "today": "2013-01-03",
> "_uuid": "2e599f6fe0de42d3a1417fb7d821c859",
> "imei": "351746052013466",
> "formhub/uuid": "46ea15e2b8134624a47e2c4b77eef0d4",
> "kind": "monthly",
> "_submission_time": "2013-01-03T02:27:19",
> "required": "yes",
> "_attachments": [],
> "item": "Rent",
> "amount": "35000.0",
> "deviceid": "351746052013466",
> "subscriberid": "639027...60317"
> },
> {
> ....
> "subscriberid": "639027...60317"
> }
> ]
## Query submitted data of a specific form using Tags
Provides a list of json submitted data for a specific form matching specific
tags. Use the `tags` query parameter to filter the list of forms, `tags`
should be a comma separated list of tags.
<pre class="prettyprint">
<b>GET</b> /api/v1/data?<code>tags</code>=<code>tag1,tag2</code></pre>
<pre class="prettyprint">
<b>GET</b> /api/v1/data/<code>{pk}</code>?<code>tags\
</code>=<code>tag1,tag2</code></pre>
> Example
>
> curl -X GET https://example.com/api/v1/data/22845?tags=monthly
## Tag a submission data point
A `POST` payload of parameter `tags` with a comma separated list of tags.
Examples
- `animal fruit denim` - space delimited, no commas
- `animal, fruit denim` - comma delimited
<pre class="prettyprint">
<b>POST</b> /api/v1/data/<code>{pk}</code>/<code>{dataid}</code>/labels</pre>
Payload
{"tags": "tag1, tag2"}
## Delete a specific tag from a submission
<pre class="prettyprint">
<b>DELETE</b> /api/v1/data/<code>{pk}</code>/<code>\
{dataid}</code>/labels/<code>tag_name</code></pre>
> Request
>
> curl -X DELETE \
https://example.com/api/v1/data/28058/20/labels/tag1
or to delete the tag "hello world"
>
> curl -X DELETE \
https://example.com/api/v1/data/28058/20/labels/hello%20world
>
> Response
>
> HTTP 200 OK
## Query submitted validation status of a specific submission
<pre class="prettyprint">
<b>GET</b> /api/v1/data/<code>{pk}</code>/<code>{dataid}</code>/validation_status</pre>
> Example
>
> curl -X GET https://example.com/api/v1/data/22845/56/validation_status
> Response
>
> {
> "timestamp": 1513299978,
> "by_whom ": "John Doe",
> "uid": "validation_status_approved",
> "color": "#00ff00",
> "label: "Approved"
> }
## Change validation status of a submission data point
A `PATCH` payload of parameter `validation_status`.
<pre class="prettyprint">
<b>PATCH</b> /api/v1/data/<code>{pk}</code>/<code>{dataid}</code>/validation_status</pre>
Payload
> {
> "validation_status_uid": "validation_status_not_approved"
> }
> Example
>
> curl -X PATCH https://example.com/api/v1/data/22845/56/validation_status
> Response
>
> {
> "timestamp": 1513299978,
> "by_whom ": "John Doe",
> "uid": "validation_status_not_approved",
> "color": "#ff0000",
> "label": "Not Approved"
> }
## Get enketo edit link for a submission instance
<pre class="prettyprint">
<b>GET</b> /api/v1/data/<code>{pk}</code>/<code>{dataid}</code>/enketo
</pre>
> Example
>
> curl -X GET https://example.com/api/v1/data/28058/20/enketo?return_url=url
> Response
> {"url": "https://hmh2a.enketo.formhub.org"}
>
>
## Delete a specific submission instance
Delete a specific submission in a form
<pre class="prettyprint">
<b>DELETE</b> /api/v1/data/<code>{pk}</code>/<code>{dataid}</code>
</pre>
> Example
>
> curl -X DELETE https://example.com/api/v1/data/28058/20
> Response
>
> HTTP 204 No Content
>
>
"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES + [
renderers.XLSRenderer,
renderers.XLSXRenderer,
renderers.CSVRenderer,
renderers.RawXMLRenderer
]
content_negotiation_class = renderers.InstanceContentNegotiation
filter_backends = (filters.RowLevelObjectPermissionFilter,
filters.XFormOwnerFilter)
permission_classes = (XFormDataPermissions,)
lookup_field = 'pk'
lookup_fields = ('pk', 'dataid')
extra_lookup_fields = None
queryset = XForm.objects.all()
def bulk_delete(self, request, *args, **kwargs):
"""
Bulk delete instances
"""
xform = self.get_object()
postgres_query, mongo_query = self.__build_db_queries(xform, request.data)
# Disconnect redundant parsed instance pre_delete signal
pre_delete.disconnect(_remove_from_mongo, sender=ParsedInstance)
# Delete Postgres & Mongo
all_count, results = Instance.objects.filter(**postgres_query).delete()
identifier = f'{Instance._meta.app_label}.Instance'
deleted_records_count = results[identifier]
ParsedInstance.bulk_delete(mongo_query)
# Pre_delete signal needs to be re-enabled for parsed instance
pre_delete.connect(_remove_from_mongo, sender=ParsedInstance)
return Response({
'detail': _('{} submissions have been deleted').format(
deleted_records_count)
}, status.HTTP_200_OK)
def bulk_validation_status(self, request, *args, **kwargs):
xform = self.get_object()
try:
new_validation_status_uid = request.data['validation_status.uid']
except KeyError:
raise ValidationError({
'payload': _('No `validation_status.uid` provided')
})
# Create new validation_status object
new_validation_status = get_validation_status(
new_validation_status_uid, xform, request.user.username)
postgres_query, mongo_query = self.__build_db_queries(xform,
request.data)
# Update Postgres & Mongo
updated_records_count = Instance.objects.filter(
**postgres_query
).update(validation_status=new_validation_status)
ParsedInstance.bulk_update_validation_statuses(mongo_query,
new_validation_status)
return Response({
'detail': _('{} submissions have been updated').format(
updated_records_count)
}, status.HTTP_200_OK)
def get_serializer_class(self):
pk_lookup, dataid_lookup = self.lookup_fields
pk = self.kwargs.get(pk_lookup)
dataid = self.kwargs.get(dataid_lookup)
if pk is not None and dataid is None:
serializer_class = DataListSerializer
elif pk is not None and dataid is not None:
serializer_class = DataInstanceSerializer
else:
serializer_class = DataSerializer
return serializer_class
def get_object(self) -> Union[XForm, Instance]:
"""
Return a `XForm` object or a `Instance` object if its primary key is
present in the url. If no results are found, a HTTP 404 error is raised
"""
xform = super().get_object()
pk_lookup, dataid_lookup = self.lookup_fields
pk = self.kwargs.get(pk_lookup)
dataid = self.kwargs.get(dataid_lookup)
if pk is None or dataid is None:
return xform
try:
int(pk)
except ValueError:
raise ParseError(_("Invalid pk `%(pk)s`" % {'pk': pk}))
try:
int(dataid)
except ValueError:
raise ParseError(_("Invalid dataid `%(dataid)s`"
% {'dataid': dataid}))
return get_object_or_404(Instance, pk=dataid, xform__pk=pk)
def _get_public_forms_queryset(self):
return XForm.objects.filter(Q(shared=True) | Q(shared_data=True))
def _filtered_or_shared_qs(self, qs, pk):
filter_kwargs = {self.lookup_field: pk}
qs = qs.filter(**filter_kwargs)
if not qs:
filter_kwargs['shared_data'] = True
qs = XForm.objects.filter(**filter_kwargs)
if not qs:
raise Http404(_("No data matches with given query."))
return qs
def filter_queryset(self, queryset, view=None):
qs = super().filter_queryset(queryset)
pk = self.kwargs.get(self.lookup_field)
tags = self.request.query_params.get('tags', None)
if tags and isinstance(tags, six.string_types):
tags = tags.split(',')
qs = qs.filter(tags__name__in=tags).distinct()
if pk:
try:
int(pk)
except ValueError:
raise ParseError(_("Invalid pk %(pk)s" % {'pk': pk}))
else:
qs = self._filtered_or_shared_qs(qs, pk)
return qs
@action(detail=True, methods=["GET", "PATCH", "DELETE"])
def validation_status(self, request, *args, **kwargs):
"""
View or modify validation status of specific instance.
User needs 'validate_xform' permission to update the data.
:param request: Request
:return: Response
"""
http_status = status.HTTP_200_OK
instance = self.get_object()
data = {}
if request.method != 'GET':
if (
request.method == 'PATCH'
and not add_validation_status_to_instance(request, instance)
):
http_status = status.HTTP_400_BAD_REQUEST
elif request.method == 'DELETE':
if remove_validation_status_from_instance(instance):
http_status = status.HTTP_204_NO_CONTENT
data = None
else:
http_status = status.HTTP_400_BAD_REQUEST
if http_status == status.HTTP_200_OK:
data = instance.validation_status
return Response(data, status=http_status)
@action(
detail=True,
methods=['GET', 'POST', 'DELETE'],
extra_lookup_fields=['label'],
)
def labels(self, request, *args, **kwargs):
http_status = status.HTTP_400_BAD_REQUEST
instance = self.get_object()
if request.method == 'POST':
if add_tags_to_instance(request, instance):
http_status = status.HTTP_201_CREATED
tags = instance.tags
label = kwargs.get('label', None)
if request.method == 'GET' and label:
data = [tag['name'] for tag in
tags.filter(name=label).values('name')]
elif request.method == 'DELETE' and label:
count = tags.count()
tags.remove(label)
# Accepted, label does not exist hence nothing removed
http_status = status.HTTP_200_OK if count == tags.count() \
else status.HTTP_404_NOT_FOUND
data = list(tags.names())
else:
data = list(tags.names())
if request.method == 'GET':
http_status = status.HTTP_200_OK
return Response(data, status=http_status)
@action(
detail=True,
methods=['GET'],
permission_classes=[EnketoSubmissionEditPermissions],
)
def enketo(self, request, *args, **kwargs):
# keep `/enketo` for retro-compatibility
return self._enketo_request(request, action_='edit', *args, **kwargs)
@action(
detail=True,
methods=['GET'],
permission_classes=[EnketoSubmissionEditPermissions],
)
def enketo_edit(self, request, *args, **kwargs):
return self._enketo_request(request, action_='edit', *args, **kwargs)
@action(
detail=True,
methods=['GET'],
permission_classes=[EnketoSubmissionViewPermissions],
)
def enketo_view(self, request, *args, **kwargs):
return self._enketo_request(request, action_='view', *args, **kwargs)
def _enketo_request(self, request, action_, *args, **kwargs):
object_ = self.get_object()
data = {}
if isinstance(object_, XForm):
raise ParseError(_('Data id not provided.'))
elif isinstance(object_, Instance):
return_url = request.query_params.get('return_url')
if not return_url and not action_ == 'view':
raise ParseError(_('`return_url` not provided.'))
try:
data['url'] = get_enketo_submission_url(
request, object_, return_url, action=action_
)
except EnketoError as e:
data['detail'] = str(e)
return Response(data=data)
def retrieve(self, request, *args, **kwargs):
# XML rendering does not a serializer
if request.accepted_renderer.format == "xml":
instance = self.get_object()
return Response(instance.xml)
else:
return super().retrieve(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
if isinstance(instance, XForm):
raise ParseError(_('Data id not provided'))
elif isinstance(instance, Instance):
instance.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
def list(self, request, *args, **kwargs):
lookup_field = self.lookup_field
lookup = self.kwargs.get(lookup_field)
if lookup_field not in kwargs.keys():
self.object_list = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(self.object_list, many=True)
return Response(serializer.data)
xform = self.get_object()
query = request.GET.get("query", {})
export_type = kwargs.get('format')
if export_type is None or export_type in ['json']:
# perform default viewset retrieve, no data export
# With DRF ListSerializer are automatically created and wraps
# everything in a list. Since this returns a list
# # already, we unwrap it.
res = super().list(request, *args, **kwargs)
res.data = res.data[0]
return res
return custom_response_handler(request, xform, query, export_type)
@staticmethod
def __build_db_queries(xform_, request_data):
"""
Gets instance ids based on the request payload.
Useful to narrow down set of instances for bulk actions
Args:
xform_ (XForm)
request_data (dict)
Returns:
tuple(<dict>, <dict>): PostgreSQL filters, Mongo filters.
They are meant to be used respectively with Django Queryset
and PyMongo query.
"""
mongo_query = ParsedInstance.get_base_query(xform_.user.username,
xform_.id_string)
postgres_query = {'xform_id': xform_.id}
instance_ids = None
# Remove empty values
payload = {
key_: value_ for key_, value_ in request_data.items() if value_
}
###################################################
# Submissions can be retrieve in 3 different ways #
###################################################
# First of all,
# users cannot send `query` and `submission_ids` in POST/PATCH request
#
if all(key_ in payload for key_ in ('query', 'submission_ids')):
raise ValidationError({
'payload': _("`query` and `instance_ids` can't be used together")
})
# First scenario / Get submissions based on user's query
try:
query = payload['query']
except KeyError:
pass
else:
try:
query.update(mongo_query) # Overrides `_userform_id` if exists
except AttributeError:
raise ValidationError({
'payload': _('Invalid query: %(query)s')
% {'query': json.dumps(query)}
})
query_kwargs = {
'query': json.dumps(query),
'fields': '["_id"]'
}
cursor = ParsedInstance.query_mongo_no_paging(**query_kwargs)
instance_ids = [record.get('_id') for record in list(cursor)]
# Second scenario / Get submissions based on list of ids
try:
submission_ids = payload['submission_ids']
except KeyError:
pass
else:
try:
# Use int() to test if list of integers is valid.
instance_ids = [int(submission_id)
for submission_id in submission_ids]
except ValueError:
raise ValidationError({
'payload': _('Invalid submission ids: %(submission_ids)s')
% {'submission_ids':
json.dumps(payload['submission_ids'])}
})
if instance_ids is not None:
# Narrow down queries with list of ids.
postgres_query.update({'id__in': instance_ids})
mongo_query.update({'_id': {'$in': instance_ids}})
elif payload.get('confirm', False) is not True:
# Third scenario / get all submissions in form,
# but confirmation param must be among payload
raise NoConfirmationProvidedException()
return postgres_query, mongo_query
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""
Poisson point process penalised likelihood regression.
"""
try:
import autograd
import autograd.numpy as np
import autograd.scipy as sp
have_autograd = True
except ImportError as e:
import numpy as np
import scipy as sp
have_autograd = False
from scipy.stats import gaussian_kde
from . import influence
from . import background
def _as_mu_args(
mu=None,
omega=None,
tau=None,
# _default={},
**kwargs):
"""
utility function to convert model arguments to kernel arguments.
This renames omega and mu, and *deletes* tau.
"""
kwargs = dict(**kwargs)
# kwargs.setdefault(**_default)
if omega is not None:
kwargs['kappa'] = omega
if mu is not None:
kwargs['mu'] = mu
return kwargs
def _as_phi_args(
kappa=None,
tau=None,
# _default={},
**kwargs):
"""
utility function to convert model arguments to kernel arguments
"""
kwargs = dict(**kwargs)
# kwargs.setdefault(**_default)
if kappa is not None:
kwargs['kappa'] = kappa
if tau is not None:
kwargs['tau'] = tau
return kwargs
def lam(
ts,
eval_ts=None,
bw=1.0):
"""
"""
if eval_ts is None:
eval_ts = ts
fn = gaussian_kde(
ts,
bw * (np.amax(ts)-np.amin(ts))/ts.size
# * (ts.size**(-0.8))
)
return fn(eval_ts) * ts.size
def lam_hawkes(
ts,
eval_ts=None,
max_floats=1e8,
phi_kernel=None,
mu_kernel=None,
**kwargs):
"""
Intensity of Hawkes process given time series and parameters.
Memory-hungry per default; could be improved with numba.
"""
ts = ts.ravel()
if eval_ts is None:
eval_ts = ts
eval_ts = eval_ts.ravel()
phi_kernel = influence.as_influence_kernel(phi_kernel)
mu_kernel = background.as_background_kernel(mu_kernel)
if ((ts.size) * (eval_ts.size)) > max_floats:
return _lam_hawkes_lite(
ts=ts,
phi_kernel=phi_kernel,
mu_kernel=mu_kernel,
eval_ts=eval_ts,
**kwargs
)
mu_kwargs = _as_mu_args(**kwargs)
phi_kwargs = _as_phi_args(**kwargs)
deltas = eval_ts.reshape(1, -1) - ts.reshape(-1, 1)
mask = deltas > 0.0
endo = phi_kernel(
deltas.ravel(),
**phi_kwargs
).reshape(deltas.shape) * mask
exo = mu_kernel(
eval_ts, **mu_kwargs
)
return endo.sum(0) + exo
def _lam_hawkes_lite(
ts,
eval_ts,
mu_kernel,
phi_kernel,
t_start=0.0,
**kwargs):
"""
Intensity of Hawkes process given time series and parameters.
Memory-lite version. CPU-hungry, could be improved with numba.
Uses assignment so may need to be altered for differentiability.
"""
endo = np.zeros_like(eval_ts)
mu_kwargs = _as_mu_args(**kwargs)
phi_kwargs = _as_phi_args(**kwargs)
for i in range(eval_ts.size):
deltas = eval_ts[i] - ts
mask = deltas > 0.0
endo[i] = np.sum(phi_kernel(deltas, **phi_kwargs) * mask)
exo = mu_kernel(eval_ts, **mu_kwargs)
return endo + exo
def big_lam_hawkes(
ts,
eval_ts,
t_start=0.0,
mu_kernel=None,
phi_kernel=None,
**kwargs
):
"""
True integrated intensity of hawkes process.
since you are probably evaluating this only at one point,
this is only available in a vectorised high-memory version.
"""
phi_kernel = influence.as_influence_kernel(phi_kernel)
mu_kernel = background.as_background_kernel(mu_kernel)
ts = np.asfarray(ts).ravel()
mu_kwargs = _as_mu_args(**kwargs)
phi_kwargs = _as_phi_args(**kwargs)
deltas = eval_ts.reshape(1, -1) - ts.reshape(-1, 1)
mask = deltas > 0.0
big_endo = phi_kernel.integrate(
deltas.ravel(),
**phi_kwargs
).reshape(deltas.shape) * mask
big_exo = (
mu_kernel.integrate(eval_ts, **mu_kwargs) -
mu_kernel.integrate(t_start, **mu_kwargs)
)
return big_endo.sum(0) + big_exo
def loglik(
ts,
t_start=0.0,
t_end=None,
eval_ts=None,
phi_kernel=None,
mu_kernel=None,
**kwargs):
phi_kernel = influence.as_influence_kernel(phi_kernel)
mu_kernel = background.as_background_kernel(mu_kernel)
if t_end is None:
t_end = ts[-1]
# as an optimisation we allow passing in an eval_ts array,
# in which case t_start and t_end are ignored.
if eval_ts is None:
if t_end > ts[-1]:
eval_ts = np.concatenate((ts[ts > t_start], [t_end]))
else:
eval_ts = ts[np.logical_and((ts > t_start), (ts < t_end))]
lam = lam_hawkes(
ts=ts,
phi_kernel=phi_kernel,
mu_kernel=mu_kernel,
eval_ts=eval_ts,
**kwargs
)
big_lam = big_lam_hawkes(
ts=ts,
phi_kernel=phi_kernel,
mu_kernel=mu_kernel,
t_start=t_start,
eval_ts=np.array(t_end),
**kwargs
)
# if not np.isfinite(np.sum(np.log(lam)) - big_lam):
# from IPython.core.debugger import Tracer; Tracer()()
return np.sum(np.log(lam)) - big_lam
|
|
__author__ = "John Kirkham <[email protected]>"
__date__ = "$Nov 09, 2015 12:47$"
from contextlib import contextmanager
import collections
import copy
import gc
import itertools
import math
import numbers
import os
from time import sleep
from psutil import cpu_count
import numpy
import zarr
import dask
import dask.array
import dask.distributed
try:
import dask_drmaa
except (ImportError, OSError, RuntimeError):
dask_drmaa = None
from builtins import (
map as imap,
range as irange,
zip as izip,
)
from kenjutsu.measure import len_slices
from kenjutsu.blocks import num_blocks, split_blocks
from metawrap.metawrap import tied_call_args, unwrap
from nanshe_workflow.data import concat_dask, DataBlocks
from nanshe_workflow.ipy import Client, display, FloatProgress
def set_num_workers(num_workers=None):
"""
Sets environment variable ``$CORES`` based on the number of workers.
Note:
If the number of workers is ``None`` or ``-1``, then the number of
workers will be set to ``1`` less than ``$CORES`` (if set) or the
number of logical cores as determined by ``psutil``.
Args:
num_workers(int): The number of workers for the cluster.
Returns:
num_workers(int): The number of workers that will be used.
"""
if (num_workers is None) or (num_workers == -1):
num_workers = int(os.environ.get("CORES", cpu_count()))
num_workers -= 1
else:
assert isinstance(num_workers, numbers.Integral), \
"Number of workers must be an integeral value."
num_workers = int(num_workers)
assert num_workers > 0, "Must have at least 1 worker."
os.environ["CORES"] = str(num_workers + 1)
return(num_workers)
def cleanup_cluster_files(profile):
"""
Cleans up iPython cluster files before startup and after shutdown.
Args:
profile(str): Which iPython profile to clean up for.
"""
from IPython.paths import locate_profile
from ipykernel.connect import find_connection_file
for each_file in ["tasks.db", "tasks.db-journal"]:
try:
os.remove(os.path.join(locate_profile(profile), each_file))
except OSError:
pass
for each_file in [profile + "_engines", profile + "_controller"]:
try:
os.remove(each_file)
except OSError:
pass
for each_file in ["ipcontroller-client.json", "ipcontroller-engine.json"]:
try:
os.remove(find_connection_file(each_file, profile=profile))
except IOError:
pass
except OSError:
pass
def get_client(profile):
"""
Sets up returns an active client with engines connected.
Args:
profile(str): Which iPython profile to get client for.
Returns:
Client: A client to the specified iPython cluster.
"""
client = None
while client is None:
try:
client = Client(profile=profile)
except IOError:
sleep(1.0)
while not client.ids:
sleep(1.0)
return(client)
def startup_distributed(nworkers,
cluster_kwargs=None,
client_kwargs=None,
adaptive_kwargs=None):
nworkers = int(nworkers)
if cluster_kwargs is None:
cluster_kwargs = {}
if client_kwargs is None:
client_kwargs = {}
cluster_kwargs_pass = {}
cluster_kwargs_pass.update(cluster_kwargs)
if dask_drmaa:
cluster_kwargs_pass.setdefault(
"template",
{
"args": [
"--nthreads", "1"
],
"jobEnvironment": os.environ
}
)
cluster = dask_drmaa.DRMAACluster(**cluster_kwargs_pass)
cluster.start_workers(nworkers)
else:
# Either `dask_drmaa` is unavailable or DRMAA cannot start.
# Fallback to a local Distributed client instead.
cluster_kwargs_pass.setdefault("n_workers", nworkers)
cluster_kwargs_pass.setdefault("threads_per_worker", 1)
cluster = dask.distributed.LocalCluster(**cluster_kwargs_pass)
if adaptive_kwargs is not None:
cluster.adapt(**adaptive_kwargs)
client = dask.distributed.Client(cluster, **client_kwargs)
while (
(client.status == "running") and
(len(client.scheduler_info()["workers"]) < nworkers)
):
sleep(1.0)
dask.config.set({"distributed.dashboard.link": "/proxy/{port}/status"})
return client
def shutdown_distributed(client):
cluster = client.cluster
# Will close and clear an existing adaptive instance
with dask.distributed.utils.ignoring(AttributeError):
cluster._adaptive.stop()
del cluster._adaptive
client.close()
while (
(client.status == "running") and
(len(client.scheduler_info()["workers"]) != 0)
):
sleep(1)
workers = list(cluster.workers)
try:
cluster.stop_worker
except AttributeError:
cluster.stop_workers(workers, sync=True)
else:
for w in workers:
cluster.stop_worker(w)
cluster.close()
@contextmanager
def get_executor(client):
executor = client.become_dask()
try:
yield executor
finally:
executor.shutdown()
client.stop_dask()
def map_dask(client, calculate_block, data, block_shape, block_halo, blocks=True):
n_blocks = num_blocks(data.shape, block_shape)
block_indices, data_blocks, data_halo_blocks, result_halos_trim = split_blocks(
data.shape, block_shape, block_halo, index=True
)
result = numpy.empty(
n_blocks,
dtype=object
)
for i, each_shape, each_haloed_block, each_trim in izip(
block_indices,
imap(len_slices, data_blocks),
DataBlocks(data, data_halo_blocks),
result_halos_trim):
each_haloed_block = dask.delayed(calculate_block)(
each_haloed_block, each_trim
)
each_haloed_block = dask.array.from_delayed(
each_haloed_block,
each_shape,
data.dtype
)
result[i] = each_haloed_block
result = concat_dask(result)
if blocks:
result_blocks = []
for i in data_blocks:
result_blocks.append(result[i])
result = data_blocks, result_blocks
return result
def map_ipyparallel(client, calculate_block, data, block_shape, block_halo):
block_indices, data_blocks, data_halo_blocks, result_halos_trim = split_blocks(
data.shape, block_shape, block_halo, index=True
)
lview = client.load_balanced_view()
result_blocks = lview.map(
calculate_block,
DataBlocks(data, data_halo_blocks),
result_halos_trim
)
return data_blocks, result_blocks
def store_dask(data_blocks, result_blocks, out):
tasks = []
lock = dask.utils.SerializableLock()
with lock:
for each_data_block, each_result_block in izip(
data_blocks, result_blocks
):
r = dask.array.store(
each_result_block,
out,
regions=each_data_block,
lock=lock,
compute=False
)
each_task = lambda r = r: dask.compute(r)
tasks.append(each_task)
return tasks
def store_ipyparallel(data_blocks, result_blocks, out):
class AsyncStore(collections.Sized, collections.Iterable):
def __init__(self,
data_blocks=data_blocks,
result_blocks=result_blocks,
out=out):
self.data_blocks = data_blocks
self.result_blocks = result_blocks
self.out = out
def __len__(self):
return len(self.data_blocks)
def __iter__(self):
def _store(each_data_block,
each_result_block,
out=self.out):
out[each_data_block] = each_result_block[...]
for each_data_block, each_result_block in izip(
self.data_blocks, self.result_blocks
):
each_task = lambda: _store(
each_data_block=each_data_block,
each_result_block=each_result_block
)
yield each_task
tasks = AsyncStore()
return tasks
def block_parallel(client, calculate_block_shape, calculate_halo):
"""
Take a single core function and construct a form that can work on
haloed blocks in parallel.
Notes:
To do this we need access to the client responsible for submitting
jobs. Also, we need to estimate a block's shape and halo.
We make some assumptions here.
* There is only one piece of data that we will need to block.
* All blocks are position invariant. (means their size is
independent of their position)
* All blocks will have a halo that is position invariant.
Args:
client(Client): client to send computations to.
calculate_block_shape(callable): computes blocks shape by having
a similar signature to
``calculate`` except the data's
shape and dtype are passed in
first.
calculate_halo(callable): computes halo shape by having
a similar signature to
``calculate`` except the data's
shape and dtype are passed in
first
Returns:
callable: parallel version of
``calculate`` computed on the
iPython Cluster.
"""
def build_block_parallel(calculate):
def wrapper(data, *args, **kwargs):
client[:].apply(gc.collect).get()
gc.collect()
ordered_bound_args, new_args, new_kwargs = tied_call_args(
unwrap(calculate), data, *args, **kwargs
)
out = None
if "out" in ordered_bound_args:
out = ordered_bound_args.pop("out")
elif "out" in new_kwargs:
out = new_kwargs.pop("out")
if out is None:
out = numpy.empty(
data.shape,
data.dtype
)
new_args = tuple(ordered_bound_args.values())[1:len(args)+1] + new_args
new_kwargs.update(dict(list(ordered_bound_args.items())[len(args)+1:]))
ordered_bound_args = None
block_shape = None
if callable(calculate_block_shape):
block_shape = calculate_block_shape(
data.shape, data.dtype, *new_args, **new_kwargs
)
else:
block_shape = calculate_block_shape
assert (
isinstance(block_shape, tuple) and
len(block_shape) == len(data.shape)
)
block_halo = None
if callable(calculate_halo):
block_halo = calculate_halo(
data.shape, data.dtype, *new_args, **new_kwargs)
else:
block_halo = calculate_halo
assert (
isinstance(block_halo, tuple) and
len(block_halo) == len(data.shape)
)
with get_executor(client) as executor:
calculate_block = lambda dhb, rht: zarr.array(
calculate(dhb[...], *new_args, **new_kwargs)[rht]
)
data_blocks, result_blocks = map_ipyparallel(
client, calculate_block, data, block_shape, block_halo
)
progress_bar = FloatProgress(min=0.0, max=1.0)
display(progress_bar)
tasks = store_ipyparallel(data_blocks, result_blocks, out)
for i, each_task in enumerate(tasks):
progress_bar.value = i / float(len(tasks))
each_task()
progress_bar.value = 1.0
client[:].apply(gc.collect).get()
gc.collect()
return(out)
return(wrapper)
return(build_block_parallel)
def shape_block_parallel(client):
"""
Same as ``block_parallel``, but with restructured argument order.
Args:
client(Client): client to send computations to.
Returns:
callable: parallelized callable.
"""
def prebuild_shape_block_parallel(calculate):
def build_shape_block_parallel(calculate_block_shape, calculate_halo):
return(
block_parallel(
client, calculate_block_shape, calculate_halo
)(calculate)
)
return(build_shape_block_parallel)
return(prebuild_shape_block_parallel)
def halo_block_parallel(client, calculate_halo):
"""
Same as ``block_parallel``, but with restructured argument order.
Args:
client(Client): client to send computations to.
calculate_halo(callable): computes halo shape by having a
similar signature to ``calculate``
except the data's shape and dtype
are passed in first.
Returns:
callable: parallelized callable.
"""
def prebuild_shape_block_parallel(calculate):
def build_shape_block_parallel(calculate_block_shape):
return(
block_parallel(
client, calculate_block_shape, calculate_halo
)(calculate)
)
return(build_shape_block_parallel)
return(prebuild_shape_block_parallel)
def block_generate_dictionary_parallel(client, calculate_block_shape, calculate_halo=None):
"""
Take a single core dictionary learning function and construct a form
that can work on haloed blocks in parallel.
Notes:
To do this we need access to the client responsible for submitting
jobs. Also, we need to estimate a block's shape and halo.
We make some assumptions here.
* There is only one piece of data that we will need to block.
* All blocks are position invariant. (means their size is
independent of their position)
* All blocks will have a halo that is position invariant.
Args:
client(Client): client to send computations to.
calculate_block_shape(callable): computes blocks shape by having
a similar signature to
``calculate`` except the data's
shape and dtype are passed in
first.
calculate_halo(callable): computes halo shape by having
a similar signature to
``calculate`` except the data's
shape and dtype are passed in
first
Returns:
callable: parallel version of
``calculate`` computed on the
iPython Cluster.
"""
assert calculate_halo is None
def build_block_parallel(calculate):
def wrapper(data, *args, **kwargs):
# Get `concurrent.futures` compatible `Executor`.
# Tries the `distributed` syntax and falls back for `ipyparallel`.
try:
executor = client.get_executor()
except AttributeError:
executor = client.executor()
ordered_bound_args, new_args, new_kwargs = tied_call_args(
unwrap(calculate), data, *args, **kwargs
)
if "initial_dictionary" in ordered_bound_args:
ordered_bound_args.pop("initial_dictionary")
elif "initial_dictionary" in new_kwargs:
new_kwargs.pop("initial_dictionary")
n_components = None
if "n_components" in ordered_bound_args:
n_components = ordered_bound_args.pop("n_components")
elif "n_components" in new_kwargs:
n_components = new_kwargs.pop("n_components")
if n_components is None:
raise ValueError("Must define `n_components`.")
out = None
if "out" in ordered_bound_args:
out = ordered_bound_args.pop("out")
elif "out" in new_kwargs:
out = new_kwargs.pop("out")
if out is None:
out = numpy.empty(
(n_components,) + data.shape[1:],
data.dtype
)
new_args = tuple(list(ordered_bound_args.values())[1:len(args)+1]) + new_args
new_kwargs.update(dict(list(ordered_bound_args.items())[len(args)+1:]))
ordered_bound_args = None
block_shape = None
if callable(calculate_block_shape):
block_shape = calculate_block_shape(
data.shape, data.dtype, *new_args, **new_kwargs
)
else:
block_shape = calculate_block_shape
assert isinstance(block_shape, tuple) and len(block_shape) == len(data.shape)
block_halo = calculate_halo
# Compute how many basis images per block. Take into account some blocks may be smaller.
data_shape_0_q, data_shape_0_r = divmod(
data.shape[0], block_shape[0]
)
full_block_k, full_block_k_rem = divmod(
n_components,
data_shape_0_q + bool(data_shape_0_r)
)
data_shape_0_r_k_diff = (
full_block_k -
int(math.ceil(full_block_k * data_shape_0_r / float(block_shape[0])))
) % full_block_k
full_block_k, full_block_k_rem = (
numpy.array(divmod(data_shape_0_r_k_diff, data_shape_0_q)) +
numpy.array([full_block_k, full_block_k_rem])
)
full_block_accum_1 = int(math.ceil((data_shape_0_q)/float(full_block_k_rem))) if full_block_k_rem else 0
end_block_k = n_components
end_block_k -= (
data_shape_0_q - (1 - bool(data_shape_0_r))
) * full_block_k
end_block_k -= data_shape_0_q / full_block_accum_1 if full_block_accum_1 else 0
data_blocks, data_halo_blocks, result_halos_trim = split_blocks(
data.shape, block_shape, block_halo
)
result_blocks_loc = []
data_blocks_kwargs = []
frame_dict_sample = dict()
data_blocks_dict_sample = []
k_offset = 0
for i, each_data_block in enumerate(data_blocks):
each_kwargs = copy.deepcopy(new_kwargs)
if each_data_block[0].start == 0:
j = 0
k_offset = 0
if (each_data_block[0].stop + 1) != data.shape[0]:
each_n_components = full_block_k
if ((j + 1) % full_block_accum_1) == 0:
each_n_components += 1
else:
# This block is shorter than normal.
each_n_components = end_block_k
j += 1
new_k_offset = min(
n_components,
k_offset + each_n_components
)
each_n_components = new_k_offset - k_offset
each_kwargs["n_components"] = each_n_components
each_result_block_loc = []
each_result_block_loc.append(slice(
k_offset,
new_k_offset,
1
))
each_result_block_loc += each_data_block[1:]
each_result_block_loc = tuple(each_result_block_loc)
k_offset = new_k_offset
result_blocks_loc.append(each_result_block_loc)
data_blocks_kwargs.append(each_kwargs)
each_data_block_time_key = (
each_data_block[0].start,
each_data_block[0].stop,
each_data_block[0].step
)
if each_data_block_time_key not in frame_dict_sample:
frame_dict_sample[each_data_block_time_key] = numpy.random.choice(
numpy.arange(*each_data_block_time_key),
each_n_components,
replace=False
).tolist()
data_blocks_dict_sample.append(
(frame_dict_sample[each_data_block_time_key],) +
each_data_block[1:]
)
class DataBlocksDictSampleType(object):
def __init__(self, data, data_blocks_dict_sample):
self.data = data
self.data_blocks_dict_sample = data_blocks_dict_sample
def __iter__(self):
for each_data_block_dict_sample in self.data_blocks_dict_sample:
try:
yield self.data[each_data_block_dict_sample]
except TypeError:
each_data_block_dict = numpy.empty(
(len(each_data_block_dict_sample[0]),) +
len_slices(each_data_block_dict_sample[1:]),
dtype=self.data.dtype
)
for i, j in enumerate(each_data_block_dict_sample[0]):
each_data_block_dict[i] = self.data[
(slice(j, j+1, 1),) +
each_data_block_dict_sample[1:]
]
yield each_data_block_dict
def __len__(self):
return(len(self.data_blocks_dict_sample))
def calculate_block(db, dbds, kw):
with dask.config.set(scheduler="single-threaded"):
return zarr.array(calculate(
numpy.asarray(db[...]), numpy.asarray(dbds[...]), *new_args, **kw
))
result_blocks = executor.map(
calculate_block,
DataBlocks(data, data_blocks),
DataBlocksDictSampleType(data, data_blocks_dict_sample),
data_blocks_kwargs
)
progress_bar = FloatProgress(min=0.0, max=1.0)
display(progress_bar)
for i, (each_data_block, each_result_blocks_loc, each_result_block) in enumerate(
izip(data_blocks, result_blocks_loc, result_blocks)
):
progress_bar.value = i / float(len(data_blocks))
out[each_result_blocks_loc] = each_result_block[...]
progress_bar.value = 1.0
return(out)
return(wrapper)
return(build_block_parallel)
def shape_block_generate_dictionary_parallel(client):
"""
Same as ``block_generate_dictionary_parallel``, but with restructured
argument order.
Args:
client(Client): client to send computations to.
Returns:
callable: parallelized callable.
"""
def prebuild_shape_block_parallel(calculate):
def build_shape_block_parallel(calculate_block_shape, calculate_halo):
return(block_generate_dictionary_parallel(client, calculate_block_shape, calculate_halo)(calculate))
return(build_shape_block_parallel)
return(prebuild_shape_block_parallel)
def halo_block_generate_dictionary_parallel(client, calculate_halo):
"""
Same as ``block_generate_dictionary_parallel``, but with restructured
argument order.
Args:
client(Client): client to send computations to.
calculate_halo(callable): computes halo shape by having a
similar signature to ``calculate``
except the data's shape and dtype
are passed in first.
Returns:
callable: parallelized callable.
"""
def prebuild_shape_block_parallel(calculate):
def build_shape_block_parallel(calculate_block_shape):
return(block_generate_dictionary_parallel(client, calculate_block_shape, calculate_halo)(calculate))
return(build_shape_block_parallel)
return(prebuild_shape_block_parallel)
def frame_stack_calculate_parallel(client, calculate):
"""
Wraps a frame stack parallel function to delay specification of number
of frames per block.
Args:
client(Client): client to send computations to.
calculate(callable): a frame stack parallel function to
wrap.
Returns:
callable: parallelized callable.
"""
def prebuild_frame_stack_calculate_parallel(num_frames):
return(calculate(client, num_frames))
return(prebuild_frame_stack_calculate_parallel)
|
|
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
# Author: Prabhu Ramachandran
# Copyright (c) 2004, Enthought, Inc.
# License: BSD Style.
# Modified for VisTrails by the VisTrails team.
"""This module parses the VTK methods, obtains the argument and return
type information, and organizes them.
"""
# Author: Prabhu Ramachandran
# Copyright (c) 2004-2007, Enthought, Inc.
# License: BSD Style.
from __future__ import division
import re
# Local imports (these are relative imports for a good reason).
import class_tree
import vtk_module as vtk
class VTKMethodParser:
"""This class provides useful methods for parsing methods of a VTK
class or instance.
The class allows one to categorize the methods of the VTK class
and also obtain the method signatures in a form that is easy to
use. When the `parse` method is called, it in turn calls the
`_organize_methods` method. This method organizes the VTK methods
into different instance variables described in the following.
`self.toggle_meths` contains a dictionary of all the boolean
methods of the form <Value>On/Off. The dictionary keys are
strings with the <Value>'s and the value of each item is the
default value (0/1) of the item (the example below will clarify
this). `self.state_meths` contains a dictionary which collects
the Set<Prop>To<Value> type of methods. The key is the <Prop> and
the value is a list containing the different string <Value>'s and
their corresponding mapped value. The first value in these is the
default value of the <Prop>. `self.get_set_meths` will contain a
dictionary which collects all the methods of the form
Set/Get<Prop> that are not already specified in
`self.toggle_meths` or `self.state_meths`. The default value of
the Get<Prop> is stored. If the value accepted by the method has
a range (via the methods `Get<Prop>MinValue` and
`Get<Prop>MaxValue`), then that range is computed and stored.
`self.get_meths` stores the methods that are of the form
`Get<Prop>`. `self.other_meths` stores the remaining methods.
The parsing is quite fast. Parsing every class in the VTK API
takes a couple of seconds (on a Pentium III @ 450Mhz).
Here is an example::
>>> import vtk
>>> p = VTKMethodParser()
>>> p.parse(vtk.vtkProperty)
>>> print p.get_toggle_methods()
{'EdgeVisibility': 0, 'BackfaceCulling': 0, 'FrontfaceCulling': 0}
>>> print p.get_state_methods()['Representation']
[['Surface', 2], ['Points', 0], ['Surface', 2], ['Wireframe', 1]]
>>> print p.get_get_set_methods()['Opacity']
(1.0, (0.0, 1.0))
>>> print p.get_get_methods()
['GetClassName']
>>> print p.get_other_methods()[:3]
['BackfaceRender', 'DeepCopy', 'IsA']
The class also provides a method called `get_method_signature`
that obtains the Python method signature given the VTK method
object. Here is an example::
>>> import vtk
>>> p = VTKMethodParser()
>>> o = vtk.vtkProperty
>>> print p.get_method_signature(o.GetClassName)
[(['string'], None)]
>>> print p.get_method_signature(o.GetColor)[0]
([('float', 'float', 'float')], None)
>>> print p.get_method_signature(o.GetColor)[1]
([None], (('float', 'float', 'float'),))
The `get_method_signature` is fairly efficient and obtaining the
signature for every method in every class in the VTK API takes
around 6 seconds (on a Pentium III @ 450Mhz).
"""
def __init__(self, use_tree=True):
"""Initializes the object.
Parameters
----------
- use_tree : `bool`
If True (default), use a ClassTree instance to obtain a
concrete subclass for an abstract base class. This is used
only to find the range and default values for some of the
methods. If False, no ClassTree instance is created.
This is optional because, creating a ClassTree is expensive.
The parser functionality can be very useful even without the
use of a ClassTree. For example, if one wants to save the
state of a VTK object one only needs to know the names of
the methods and not their default values, ranges etc. In
that case using a parser should be cheap.
"""
# The ClassTree is needed to find an instantiable child class
# for an abstract VTK parent class. This instance is used to
# obtain the state values and the ranges of the arguments
# accepted by the Get/Set methods that have a
# Get<Prop>{MaxValue,MinValue} method.
if use_tree:
self._tree = class_tree.ClassTree(vtk)
self._tree.create()
else:
self._tree = None
self._state_patn = re.compile('To[A-Z0-9]')
self._initialize()
#################################################################
# 'VTKMethodParser' interface.
#################################################################
def parse(self, obj, no_warn=True):
"""Parse the methods for a given VTK object/class.
Given a VTK class or object, this method parses the methods
and orgaizes them into useful categories. The categories and
their usage is documented in the documentation for the class.
Parameters
----------
- obj : VTK class or instance
- no_warn : `bool` (default: True)
If True (default), it suppresses any warnings generated by
the VTK object when parsing the methods. This is safe to
use.
"""
if not hasattr(obj, '__bases__'):
klass = obj.__class__
else:
klass = obj
methods = self.get_methods(klass)
if no_warn:
# Save warning setting and shut it off before parsing.
warn = vtk.vtkObject.GetGlobalWarningDisplay()
if klass.__name__ <> 'vtkObject':
vtk.vtkObject.GlobalWarningDisplayOff()
self._organize_methods(klass, methods)
if no_warn:
# Reset warning status.
vtk.vtkObject.SetGlobalWarningDisplay(warn)
def _get_parent_methods(self, klass):
"""Returns all the methods of the classes parents."""
methods = {}
while len(klass.__bases__) > 0:
klass = klass.__bases__[0]
meths = dir(klass)
d = methods.fromkeys(meths)
methods.update(d)
return methods.keys()
def get_methods(self, klass):
"""Returns all the relevant methods of the given VTK class."""
methods = dir(klass)[:]
if hasattr(klass, '__members__'):
# Only VTK versions < 4.5 have these.
for m in klass.__members__:
methods.remove(m)
# Ignore the parent methods.
ignore = self._get_parent_methods(klass)
# Skip some of the ignores.
skip = ['GetInput', 'SetInput']
# Sometimes the child has only GetInput while the parent has
# SetInput.
if hasattr(klass, 'SetInput') and \
'SetInput' not in methods and \
'GetInput' in methods:
methods.append('SetInput')
# Get/set pairs that are overridden. Basically, if a parent
# class has a 'GetThing' and the child overrides/has a
# 'SetThing' (or vice-versa), then the removal of the parent
# methods is wrong since the child changes the trait definition
# which breaks things. We therefore do not remove any of the
# Get/SetThings that are ignored due to them being in the
# parent. However one has to be careful about cases where these are
# really Toggle (ThingOn) or State (SetThingToThong) etc. methods and
# in those cases we really should ignore the method. So in essence,
# any Get/Set pair that is not a State or Toggle should be redefined.
overrides = []
for m in methods:
check = False
if m.startswith('Get'):
m1 = 'Set' + m[3:]
check = True
elif m.startswith('Set'):
m1 = 'Get' + m[3:]
check = True
if check:
if m1 in methods and (m1 in ignore or m in ignore):
# Skips are stored as Set followed by Get.
skip.extend(['Set' +m[3:], 'Get'+m[3:]])
for m in skip[:]:
if m.startswith('Set'):
base = m[3:]
mg, ms = 'Get' + base, 'Set' + base
m_st = 'Set' + base + 'To'
m_t = base + 'Off'
for method in methods:
if m_st in method or m_t == method:
skip.remove(ms)
skip.remove(mg)
break
if 'GetViewProp' in methods and 'GetProp' in methods:
ignore.extend(['GetProp', 'SetProp'])
if 'GetViewProps' in methods and 'GetProps' in methods:
ignore.extend(['GetProps', 'SetProps'])
# Remove any deprecated traits.
if 'GetScaledText' in methods and 'GetTextScaleMode' in methods:
ignore.extend(['GetScaledText', 'SetScaledText',
'ScaledTextOn', 'ScaledTextOff'])
# Now we can safely remove the methods.
for m in methods[:]:
if m in ignore and m not in skip:
methods.remove(m)
return methods
def get_toggle_methods(self):
"""Returns a dictionary of the parsed <Value>On/Off methods
along with the default value.
"""
return self.toggle_meths
def get_state_methods(self):
"""Returns a dict of the parsed Set<Prop>To<Value>.
The keys are the <Prop> string with a list of the different
<Value> strings along with their corresponding value (if
obtainable). The first value is the default value of the
state.
"""
return self.state_meths
def get_get_set_methods(self):
"""Returns a dict of the parsed Get/Set<Value> methods.
The keys of the dict are the <Value> strings and contain a
two-tuple containing the default value (or None if it is not
obtainable for some reason) and a pair of numbers specifying
an acceptable range of values (or None if not obtainable).
"""
return self.get_set_meths
def get_get_methods(self):
"""Return a list of parsed Get<Value> methods.
All of these methods do NOT have a corresponding Set<Value>.
"""
return self.get_meths
def get_other_methods(self):
"""Return list of all other methods, that are not
categorizable.
"""
return self.other_meths
def get_method_signature(self, method):
"""Returns information on the Python method signature given
the VTK method.
The doc string of the given method object to get the method
signature. The method returns a list of tuples, each of which
contains 2 items, the first is a list representing the return
value the second represents the arguments to be passed to the
function. If the method supports different return values and
arguments, this function returns all of their signatures.
Parameters
----------
- method : `method`
A VTK method object.
"""
# Remove all the C++ function signatures.
doc = method.__doc__
if not doc:
print "Ignoring method %r, no __doc__" % method
return []
doc = doc[:doc.find('\n\n')]
sig = []
c_sig = [] # The C++ signature
in_sig = False
in_c_sig = False
counter = 0
for line in doc.split('\n'):
if line.startswith('V.'):
in_sig = True
in_c_sig = False
sig.append(line.strip())
elif line.startswith('C++:'):
in_sig = False
in_c_sig = True
c_sig.append(line.strip())
counter += 1
elif in_sig:
sig[counter] = sig[counter] + line.strip()
elif in_c_sig:
c_sig[counter-1] = c_sig[counter-1] + line.strip()
# Remove the V.<method_name>
sig = [x.replace('V.' + method.__name__, '') for x in sig]
c_sig = [x[x.find('('):] for x in c_sig]
pat = re.compile(r'\b')
# Split into [return_value, arguments] after processing them.
tmp = list(sig)
sig = []
for sig_idx, i in enumerate(tmp):
# Split to get return values.
x = i.split('->')
# Strip each part.
x = [y.strip() for y in x]
if len(x) == 1: # No return value
x = [None, x[0]]
else:
x.reverse()
ret, arg = x
# Remove leading and trailing parens for arguments.
arg = arg[1:-1]
if not arg:
arg = None
if arg and arg[-1] in [')', ']']:
arg = arg + ','
# Check if we are able to parse all the arguments -- some
# unstable versions of VTK have problems generating the
# docstring and in this case we will try to use the C++
# docstring signature.
n_arg = 0
arg_map = {'unsigned int': 'int', 'unsigned char': 'int',
'unsigned long': 'long', 'unsigned short': 'int'}
if arg is not None and c_sig:
n_arg = arg.count(',') + 1
# The carguments have parenthesis like: (int, int)
carg = c_sig[sig_idx][1:-1].split(',')
if n_arg > 0:
args = []
if len(carg) == n_arg:
for idx, x in enumerate(arg.split(',')):
if len(x.strip()) == 0:
carg_val = carg[idx].strip()
if 'unsigned' in carg_val and \
carg_val in arg_map:
args.append(arg_map[carg_val])
elif 'void' in carg_val:
args.append("string")
else:
args.append(x)
else:
args.append(x)
arg = ', '.join(args)
if ret is not None and ret.startswith('(') and '...' in ret:
# A tuple (new in VTK-5.7)
ret = "tuple"
if arg is not None:
if '[float, ...]' in arg:
arg = arg.replace('[float, ...]', 'tuple')
elif '(float, ...)' in arg:
arg = arg.replace('(float, ...)', 'tuple')
if ret == '(, )':
ret = None
# Now quote the args and eval them. Easy!
try:
if ret:
ret = eval(pat.sub('\"', ret))
if arg:
arg = eval(pat.sub('\"', arg))
if type(arg) == type('str'):
arg = [arg]
except SyntaxError:
pass
else:
sig.append(([ret], arg))
return sig
def get_tree(self):
"""Return the ClassTree instance used by this class."""
return self._tree
#################################################################
# Non-public interface.
#################################################################
def _initialize(self):
"""Initializes the method categories."""
# Collects the <Value>On/Off methods.
self.toggle_meths = {}
# Collects the Set<Prop>To<Value> methods.
self.state_meths = {}
# Collects the Set/Get<Value> pairs.
self.get_set_meths = {}
# Collects the Get<Value> methods.
self.get_meths = []
# Collects all the remaining methods.
self.other_meths = []
def _organize_methods(self, klass, methods):
"""Organizes the given methods of a VTK class into different
categories.
Parameters
----------
- klass : A VTK class
- methods : `list` of `str`
A list of the methods to be categorized.
"""
self._initialize()
meths = methods[:]
meths = self._find_toggle_methods(klass, meths)
meths = self._find_state_methods(klass, meths)
meths = self._find_get_set_methods(klass, meths)
meths = self._find_get_methods(klass, meths)
self.other_meths = [x for x in meths \
if callable(getattr(klass, x)) and
'__' not in x and
not isinstance(getattr(klass, x), type)]
def _remove_method(self, meths, method):
try:
meths.remove(method)
except ValueError:
pass
def _find_toggle_methods(self, klass, methods):
"""Find/store methods of the form <Value>{On,Off} in the given
`methods`. Returns the remaining list of methods.
"""
meths = methods[:]
tm = self.toggle_meths
klass_name = klass.__name__
problem_methods = ['CopyVectors', 'CopyTensors',
'CopyTCoords', 'CopyScalars',
'CopyNormals', 'CopyGlobalIds',
'CopyPedigreeIds']
for method in meths[:]:
if klass_name == 'vtkDataSetAttributes' and \
method[:-2] in problem_methods:
continue
elif method[:-2] == 'AlphaBitPlanes':
continue
if method[-2:] == 'On':
key = method[:-2]
if (key + 'Off') in meths and ('Get' + key) in meths:
tm[key] = None
meths.remove(method)
meths.remove(key + 'Off')
self._remove_method(meths, 'Set' + key)
self._remove_method(meths, 'Get' + key)
# get defaults
if tm:
obj = self._get_instance(klass)
if obj:
for key in tm:
try:
tm[key] = getattr(obj, 'Get%s'%key)()
except (TypeError, AttributeError):
print klass.__name__, key
pass
return meths
def _find_state_methods(self, klass, methods):
"""Find/store methods of the form Set<Prop>To<Value> in the
given `methods`. Returns the remaining list of methods. The
method also computes the mapped value of the different
<Values>.
"""
# These ignored ones are really not state methods.
ignore = ['SetUpdateExtentToWholeExtent',
'SetDataExtentToWholeExtent',
'SetOutputSpacingToDefault', # In vtkImageReslice.
'SetOutputOriginToDefault', # In vtkImageReslice
'SetOutputExtentToDefault' # In vtkImageReslice
]
meths = methods[:]
sm = self.state_meths
for method in meths[:]:
if method not in ignore and method[:3] == 'Set':
# Methods of form Set<Prop>To<Value>
match = self._state_patn.search(method)
# Second cond. ensures that this is not an accident.
if match and (('Get'+method[3:]) not in meths):
key = method[3:match.start()] # The <Prop> part.
if (('Get' + key) in methods):
val = method[match.start()+2:] # <Value> part.
meths.remove(method)
if sm.has_key(key):
sm[key].append([val, None])
else:
sm[key] = [[val, None]]
meths.remove('Get'+ key)
self._remove_method(meths, 'Set'+ key)
if ('Get' + key + 'MaxValue') in meths:
meths.remove('Get' + key + 'MaxValue')
meths.remove('Get' + key + 'MinValue')
try:
meths.remove('Get' + key + 'AsString')
except ValueError:
pass
# Find the values for each of the states, i.e. find that
# vtkProperty.SetRepresentationToWireframe() corresponds to
# vtkProperty.SetRepresentation(1).
if sm:
obj = self._get_instance(klass)
klass_name = klass.__name__
if obj and not klass_name.endswith('Viewer'):
# We do not try to inspect viewers, because they'll
# trigger segfaults during the inspection
for key, values in sm.items():
default = getattr(obj, 'Get%s'%key)()
for x in values[:]:
try:
getattr(obj, 'Set%sTo%s'%(key, x[0]))()
except TypeError:
# vtkRenderedGraphRepresentation has some of
# its SetIvarToState methods that have
# non-standard arguments, this throws off
# the parser and we ignore these.
#print klass.__name__, key
pass
else:
val = getattr(obj, 'Get%s'%key)()
x[1] = val
if val == default:
values.insert(0, [x[0], val])
return meths
def _find_get_set_methods(self, klass, methods):
"""Find/store methods of the form {Get,Set}Prop in the given
`methods` and returns the remaining list of methods.
Note that it makes sense to call this *after*
`_find_state_methods` is called in order to avoid incorrect
duplication. This method also computes the default value and
the ranges of the arguments (when possible) by using the
Get<Prop>{MaxValue,MinValue} methods.
"""
meths = methods[:]
gsm = self.get_set_meths
klass_name = klass.__name__
for method in meths[:]:
# Methods of the Set/Get form.
if method in ['Get', 'Set']:
# This occurs with the vtkInformation class.
continue
elif klass_name == 'vtkProp' and method[3:] == 'AllocatedRenderTime':
# vtkProp.Get/SetAllocatedRenderTime is private and
# SetAllocatedRenderTime takes two args, don't wrap it.
continue
elif klass_name == 'vtkGenericAttributeCollection' and \
method[3:] == 'AttributesToInterpolate':
continue
elif klass_name == 'vtkOverlappingAMR' and method[3:] == 'Origin':
continue
elif (klass_name == 'vtkOrientationMarkerWidget'
and method[3:] in ['OutlineColor', 'Viewport']):
continue
elif (klass_name == 'vtkImageDataGeometryFilter'
and method[3:] == 'Extent'):
continue
elif (klass_name == 'vtkVolumeMapper'
and method[3:] == 'CroppingRegionPlanes'):
continue
elif (method[:3] == 'Set') and ('Get' + method[3:]) in methods:
key = method[3:]
meths.remove('Set' + key)
meths.remove('Get' + key)
if ('Get' + key + 'MaxValue') in meths:
meths.remove('Get' + key + 'MaxValue')
meths.remove('Get' + key + 'MinValue')
gsm[key] = 1
else:
gsm[key] = None
# Find the default and range of the values.
if gsm:
obj = self._get_instance(klass)
if obj:
for key, value in gsm.items():
if klass_name in ['vtkPolyData', 'vtkContext2D']:
# Evil hack, these classes segfault!
default = None
elif klass_name == 'vtkHyperOctree' and \
key == 'Dimension':
# This class breaks standard VTK conventions.
gsm[key] = (3, (1, 3))
continue
else:
try:
default = getattr(obj, 'Get%s'%key)()
except TypeError:
default = None
if value:
low = getattr(obj, 'Get%sMinValue'%key)()
high = getattr(obj, 'Get%sMaxValue'%key)()
gsm[key] = (default, (low, high))
else:
gsm[key] = (default, None)
else:
# We still might have methods that have a default range.
for key, value in gsm.items():
if value == 1:
gsm[key] = None
return meths
def _find_get_methods(self, klass, methods):
"""Find/store methods of the form Get<Value> in the given
`methods` and returns the remaining list of methods.
"""
meths = methods[:]
gm = self.get_meths
for method in meths[:]:
if method == 'Get':
# Occurs with vtkInformation
continue
elif method[:3] == 'Get':
gm.append(method)
meths.remove(method)
return meths
def _get_instance(self, klass):
"""Given a VTK class, `klass`, returns an instance of the
class.
If the class is abstract, it uses the class tree to return an
instantiable subclass. This is necessary to get the values of
the 'state' methods and the ranges for the Get/Set methods.
"""
obj = None
try:
obj = klass()
except (TypeError, NotImplementedError):
if self._tree:
t = self._tree
n = t.get_node(klass.__name__)
for c in n.children:
obj = self._get_instance(t.get_class(c.name))
if obj:
break
return obj
|
|
#!/usr/bin/env python
# pylint: disable=missing-docstring
#
# Copyright 2017, 2018 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import os
import pipes
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: docker_creds
short_description: Creates/updates a 'docker login' file in place of using 'docker login'
version_added: "2.4"
description:
- This module creates a docker config.json file in the directory provided by 'path'
on hosts that do not support 'docker login' but need the file present for
registry authentication purposes of various other services.
options:
path:
description:
- This is the message to send to the sample module
required: true
registry:
description:
- This is the registry the credentials are for.
required: true
username:
description:
- This is the username to authenticate to the registry with.
required: true
password:
description:
- This is the password to authenticate to the registry with.
required: true
test_login:
description:
- Attempt to connect to registry with username + password provided.
default: true
required: false
test_timeout:
description:
- Timeout in seconds for each attempt to connect to registry.
default: 20
required: false
author:
- "Michael Gugino <[email protected]>"
'''
EXAMPLES = '''
# Pass in a message
- name: Place credentials in file
docker_creds:
path: /root/.docker
registry: registry.example.com:443
username: myuser
password: mypassword
test_login: True
test_timeout: 30
'''
def check_dest_dir_exists(module, dest):
'''Check if dest dir is present and is a directory'''
dir_exists = os.path.exists(dest)
if dir_exists:
if not os.path.isdir(dest):
msg = "{} exists but is not a directory".format(dest)
result = {'failed': True,
'changed': False,
'msg': msg,
'state': 'unknown'}
module.fail_json(**result)
else:
return 1
else:
return 0
def create_dest_dir(module, dest):
try:
os.makedirs(dest, mode=0o700)
except OSError as oserror:
result = {'failed': True,
'changed': False,
'msg': str(oserror),
'state': 'unknown'}
module.fail_json(**result)
def load_config_file(module, dest):
'''load the config.json in directory dest'''
conf_file_path = os.path.join(dest, 'config.json')
if os.path.exists(conf_file_path):
# Try to open the file and load json data
try:
with open(conf_file_path) as conf_file:
data = conf_file.read()
jdata = json.loads(data)
except IOError as ioerror:
result = {'failed': True,
'changed': False,
'msg': str(ioerror),
'state': 'unknown'}
module.fail_json(**result)
except ValueError as jsonerror:
result = {'failed': True,
'changed': False,
'msg': str(jsonerror),
'state': 'unknown'}
module.fail_json(**result)
return jdata
else:
# File doesn't exist, we just return an empty dictionary.
return {}
# pylint: disable=too-many-arguments
def gen_skopeo_cmd(registry, username, password, proxy_vars, test_timeout, test_image, tls_verify):
'''Generate skopeo command to run'''
skopeo_temp = ("{proxy_vars} timeout {test_timeout} skopeo inspect"
" {creds} docker://{registry}/{test_image}")
# this will quote the entire creds argument to account for special chars.
creds = pipes.quote('--creds={}:{}'.format(username, password))
skopeo_args = {'proxy_vars': proxy_vars, 'test_timeout': test_timeout, 'creds': creds,
'registry': registry, 'test_image': test_image,
'tls_verify': tls_verify}
return skopeo_temp.format(**skopeo_args).strip()
def validate_registry_login(module, skopeo_command):
'''Attempt to use credentials to log into registry'''
# skopeo doesn't honor docker config file proxy settings; need to specify
# proxy vars on the cli.
rtnc, _, err = module.run_command(skopeo_command, use_unsafe_shell=True)
if rtnc:
result = {'failed': True,
'changed': False,
'msg': str(err),
'state': 'unknown'}
module.fail_json(**result)
def update_config(docker_config, registry, encoded_auth):
'''Add our registry auth credentials into docker_config dict'''
# Add anything that might be missing in our dictionary
if 'auths' not in docker_config:
docker_config['auths'] = {}
if registry not in docker_config['auths']:
docker_config['auths'][registry] = {}
# check if the same value is already present for idempotency.
if 'auth' in docker_config['auths'][registry]:
if docker_config['auths'][registry]['auth'] == encoded_auth:
# No need to go further, everything is already set in file.
return False
docker_config['auths'][registry]['auth'] = encoded_auth
return True
def write_config(module, docker_config, dest):
'''Write updated credentials into dest/config.json'''
if not isinstance(docker_config, dict):
docker_config = docker_config.decode()
conf_file_path = os.path.join(dest, 'config.json')
try:
with open(conf_file_path, 'w') as conf_file:
json.dump(docker_config, conf_file, indent=8)
except IOError as ioerror:
result = {'failed': True,
'changed': False,
'msg': str(ioerror),
'state': 'unknown'}
module.fail_json(**result)
def run_module():
'''Run this module'''
module_args = dict(
path=dict(aliases=['dest', 'name'], required=True, type='path'),
registry=dict(type='str', required=True),
username=dict(type='str', required=True),
password=dict(type='str', required=True, no_log=True),
test_login=dict(type='bool', required=False, default=True),
proxy_vars=dict(type='str', required=False, default=''),
test_timeout=dict(type='int', required=False, default=20),
test_image=dict(type='str', required=True),
tls_verify=dict(type='bool', required=False, default=True)
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False
)
# First, create our dest dir if necessary
dest = module.params['path']
registry = module.params['registry']
username = module.params['username']
password = module.params['password']
test_login = module.params['test_login']
proxy_vars = module.params['proxy_vars']
test_timeout = module.params['test_timeout']
test_image = module.params['test_image']
tls_verify = module.params['tls_verify']
if not check_dest_dir_exists(module, dest):
create_dest_dir(module, dest)
docker_config = {}
else:
# We want to scrape the contents of dest/config.json
# in case there are other registries/settings already present.
docker_config = load_config_file(module, dest)
# Test the credentials
if test_login:
skopeo_command = gen_skopeo_cmd(registry, username, password,
proxy_vars, test_timeout, test_image, tls_verify)
validate_registry_login(module, skopeo_command)
# base64 encode our username:password string
encoded_auth = base64.b64encode('{}:{}'.format(username, password).encode())
# Put the registry auth info into the config dict.
changed = update_config(docker_config, registry, encoded_auth)
if changed:
write_config(module, docker_config, dest)
result = {'changed': changed, 'rc': 0}
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import time
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class _CustomMapping(collections.Mapping):
def __init__(self, *args, **kwargs):
self._wrapped = dict(*args, **kwargs)
def __getitem__(self, key):
return self._wrapped[key]
def __iter__(self):
return iter(self._wrapped)
def __len__(self):
return len(self._wrapped)
class NestTest(parameterized.TestCase, test.TestCase):
PointXY = collections.namedtuple("Point", ["x", "y"]) # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPack(self):
structure = ((3, 4), 5, (6, 7, (9, 10), 8))
flat = ["a", "b", "c", "d", "e", "f", "g", "h"]
self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8])
self.assertEqual(
nest.pack_sequence_as(structure, flat), (("a", "b"), "c",
("d", "e", ("f", "g"), "h")))
structure = (NestTest.PointXY(x=4, y=2),
((NestTest.PointXY(x=1, y=0),),))
flat = [4, 2, 1, 0]
self.assertEqual(nest.flatten(structure), flat)
restructured_from_flat = nest.pack_sequence_as(structure, flat)
self.assertEqual(restructured_from_flat, structure)
self.assertEqual(restructured_from_flat[0].x, 4)
self.assertEqual(restructured_from_flat[0].y, 2)
self.assertEqual(restructured_from_flat[1][0][0].x, 1)
self.assertEqual(restructured_from_flat[1][0][0].y, 0)
self.assertEqual([5], nest.flatten(5))
self.assertEqual([np.array([5])], nest.flatten(np.array([5])))
self.assertEqual("a", nest.pack_sequence_as(5, ["a"]))
self.assertEqual(
np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])]))
with self.assertRaisesRegexp(ValueError, "Structure is a scalar"):
nest.pack_sequence_as("scalar", [4, 5])
with self.assertRaisesRegexp(TypeError, "flat_sequence"):
nest.pack_sequence_as([4, 5], "bad_sequence")
with self.assertRaises(ValueError):
nest.pack_sequence_as([5, 6, [7, 8]], ["a", "b", "c"])
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenDictOrder(self, mapping_type):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
plain = {"d": 3, "b": 1, "a": 0, "c": 2}
ordered_flat = nest.flatten(ordered)
plain_flat = nest.flatten(plain)
self.assertEqual([0, 1, 2, 3], ordered_flat)
self.assertEqual([0, 1, 2, 3], plain_flat)
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
def testPackDictOrder(self, mapping_type):
"""Packing orders dicts by key, including OrderedDicts."""
custom = mapping_type([("d", 0), ("b", 0), ("a", 0), ("c", 0)])
plain = {"d": 0, "b": 0, "a": 0, "c": 0}
seq = [0, 1, 2, 3]
custom_reconstruction = nest.pack_sequence_as(custom, seq)
plain_reconstruction = nest.pack_sequence_as(plain, seq)
self.assertIsInstance(custom_reconstruction, mapping_type)
self.assertIsInstance(plain_reconstruction, dict)
self.assertEqual(
mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)]),
custom_reconstruction)
self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction)
Abc = collections.namedtuple("A", ("b", "c")) # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPack_withDicts(self):
# A nice messy mix of tuples, lists, dicts, and `OrderedDict`s.
mess = [
"z",
NestTest.Abc(3, 4), {
"d": _CustomMapping({
41: 4
}),
"c": [
1,
collections.OrderedDict([
("b", 3),
("a", 2),
]),
],
"b": 5
}, 17
]
flattened = nest.flatten(mess)
self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 4, 17])
structure_of_mess = [
14,
NestTest.Abc("a", True),
{
"d": _CustomMapping({
41: 42
}),
"c": [
0,
collections.OrderedDict([
("b", 9),
("a", 8),
]),
],
"b": 3
},
"hi everybody",
]
unflattened = nest.pack_sequence_as(structure_of_mess, flattened)
self.assertEqual(unflattened, mess)
# Check also that the OrderedDict was created, with the correct key order.
unflattened_ordered_dict = unflattened[2]["c"][1]
self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict)
self.assertEqual(list(unflattened_ordered_dict.keys()), ["b", "a"])
unflattened_custom_mapping = unflattened[2]["d"]
self.assertIsInstance(unflattened_custom_mapping, _CustomMapping)
self.assertEqual(list(unflattened_custom_mapping.keys()), [41])
def testFlatten_numpyIsNotFlattened(self):
structure = np.array([1, 2, 3])
flattened = nest.flatten(structure)
self.assertEqual(len(flattened), 1)
def testFlatten_stringIsNotFlattened(self):
structure = "lots of letters"
flattened = nest.flatten(structure)
self.assertEqual(len(flattened), 1)
unflattened = nest.pack_sequence_as("goodbye", flattened)
self.assertEqual(structure, unflattened)
def testPackSequenceAs_notIterableError(self):
with self.assertRaisesRegexp(TypeError,
"flat_sequence must be a sequence"):
nest.pack_sequence_as("hi", "bye")
def testPackSequenceAs_wrongLengthsError(self):
with self.assertRaisesRegexp(
ValueError,
"Structure had 2 elements, but flat_sequence had 3 elements."):
nest.pack_sequence_as(["hello", "world"],
["and", "goodbye", "again"])
@test_util.assert_no_new_pyobjects_executing_eagerly
def testIsSequence(self):
self.assertFalse(nest.is_sequence("1234"))
self.assertTrue(nest.is_sequence([1, 3, [4, 5]]))
self.assertTrue(nest.is_sequence(((7, 8), (5, 6))))
self.assertTrue(nest.is_sequence([]))
self.assertTrue(nest.is_sequence({"a": 1, "b": 2}))
self.assertFalse(nest.is_sequence(set([1, 2])))
ones = array_ops.ones([2, 3])
self.assertFalse(nest.is_sequence(ones))
self.assertFalse(nest.is_sequence(math_ops.tanh(ones)))
self.assertFalse(nest.is_sequence(np.ones((4, 5))))
@parameterized.parameters({"mapping_type": _CustomMapping},
{"mapping_type": dict})
def testFlattenDictItems(self, mapping_type):
dictionary = mapping_type({(4, 5, (6, 8)): ("a", "b", ("c", "d"))})
flat = {4: "a", 5: "b", 6: "c", 8: "d"}
self.assertEqual(nest.flatten_dict_items(dictionary), flat)
with self.assertRaises(TypeError):
nest.flatten_dict_items(4)
bad_dictionary = mapping_type({(4, 5, (4, 8)): ("a", "b", ("c", "d"))})
with self.assertRaisesRegexp(ValueError, "not unique"):
nest.flatten_dict_items(bad_dictionary)
another_bad_dictionary = mapping_type({
(4, 5, (6, 8)): ("a", "b", ("c", ("d", "e")))
})
with self.assertRaisesRegexp(
ValueError, "Key had [0-9]* elements, but value had [0-9]* elements"):
nest.flatten_dict_items(another_bad_dictionary)
# pylint does not correctly recognize these as class names and
# suggests to use variable style under_score naming.
# pylint: disable=invalid-name
Named0ab = collections.namedtuple("named_0", ("a", "b"))
Named1ab = collections.namedtuple("named_1", ("a", "b"))
SameNameab = collections.namedtuple("same_name", ("a", "b"))
SameNameab2 = collections.namedtuple("same_name", ("a", "b"))
SameNamexy = collections.namedtuple("same_name", ("x", "y"))
SameName1xy = collections.namedtuple("same_name_1", ("x", "y"))
SameName1xy2 = collections.namedtuple("same_name_1", ("x", "y"))
NotSameName = collections.namedtuple("not_same_name", ("a", "b"))
# pylint: enable=invalid-name
class SameNamedType1(SameNameab):
pass
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAssertSameStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
structure_different_num_elements = ("spam", "eggs")
structure_different_nesting = (((1, 2), 3), 4, 5, (6,))
nest.assert_same_structure(structure1, structure2)
nest.assert_same_structure("abc", 1.0)
nest.assert_same_structure("abc", np.array([0, 1]))
nest.assert_same_structure("abc", constant_op.constant([0, 1]))
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
"More specifically: Substructure "
r'"type=tuple str=\(\(1, 2\), 3\)" is a sequence, while '
'substructure "type=str str=spam" is not')):
nest.assert_same_structure(structure1, structure_different_num_elements)
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
r'More specifically: Substructure "type=list str=\[0, 1\]" '
r'is a sequence, while substructure "type=ndarray str=\[0 1\]" '
"is not")):
nest.assert_same_structure([0, 1], np.array([0, 1]))
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
r'More specifically: Substructure "type=list str=\[0, 1\]" '
'is a sequence, while substructure "type=int str=0" '
"is not")):
nest.assert_same_structure(0, [0, 1])
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1), [0, 1])
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure(structure1, structure_different_nesting)
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1),
NestTest.Named0ab("a", "b"))
nest.assert_same_structure(NestTest.Named0ab(3, 4),
NestTest.Named0ab("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.Named0ab(3, 4), NestTest.Named1ab(3, 4))
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure(NestTest.Named0ab(3, 4),
NestTest.Named0ab([3], 4))
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure([[3], 4], [3, [4]])
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegexp(TypeError,
"don't have the same sequence type"):
nest.assert_same_structure(structure1, structure1_list)
nest.assert_same_structure(structure1, structure2, check_types=False)
nest.assert_same_structure(structure1, structure1_list, check_types=False)
with self.assertRaisesRegexp(ValueError,
"don't have the same set of keys"):
nest.assert_same_structure({"a": 1}, {"b": 1})
nest.assert_same_structure(NestTest.SameNameab(0, 1),
NestTest.SameNameab2(2, 3))
# This assertion is expected to pass: two namedtuples with the same
# name and field names are considered to be identical.
nest.assert_same_structure(
NestTest.SameNameab(NestTest.SameName1xy(0, 1), 2),
NestTest.SameNameab2(NestTest.SameName1xy2(2, 3), 4))
expected_message = "The two structures don't have the same.*"
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_same_structure(
NestTest.SameNameab(0, NestTest.SameNameab2(1, 2)),
NestTest.SameNameab2(NestTest.SameNameab(0, 1), 2))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.NotSameName(2, 3))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.SameNamexy(2, 3))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.SameNamedType1(2, 3))
EmptyNT = collections.namedtuple("empty_nt", "") # pylint: disable=invalid-name
def testHeterogeneousComparison(self):
nest.assert_same_structure({"a": 4}, _CustomMapping(a=3))
nest.assert_same_structure(_CustomMapping(b=3), {"b": 4})
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMapStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = (((7, 8), 9), 10, (11, 12))
structure1_plus1 = nest.map_structure(lambda x: x + 1, structure1)
nest.assert_same_structure(structure1, structure1_plus1)
self.assertAllEqual(
[2, 3, 4, 5, 6, 7],
nest.flatten(structure1_plus1))
structure1_plus_structure2 = nest.map_structure(
lambda x, y: x + y, structure1, structure2)
self.assertEqual(
(((1 + 7, 2 + 8), 3 + 9), 4 + 10, (5 + 11, 6 + 12)),
structure1_plus_structure2)
self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4))
self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4))
# Empty structures
self.assertEqual((), nest.map_structure(lambda x: x + 1, ()))
self.assertEqual([], nest.map_structure(lambda x: x + 1, []))
self.assertEqual({}, nest.map_structure(lambda x: x + 1, {}))
self.assertEqual(NestTest.EmptyNT(), nest.map_structure(lambda x: x + 1,
NestTest.EmptyNT()))
# This is checking actual equality of types, empty list != empty tuple
self.assertNotEqual((), nest.map_structure(lambda x: x + 1, []))
with self.assertRaisesRegexp(TypeError, "callable"):
nest.map_structure("bad", structure1_plus1)
with self.assertRaisesRegexp(ValueError, "at least one structure"):
nest.map_structure(lambda x: x)
with self.assertRaisesRegexp(ValueError, "same number of elements"):
nest.map_structure(lambda x, y: None, (3, 4), (3, 4, 5))
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, 3, (3,))
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), [(3, 4), 5])
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)))
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, structure1, structure1_list)
nest.map_structure(lambda x, y: None, structure1, structure1_list,
check_types=False)
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)),
check_types=False)
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, foo="a")
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, check_types=False, foo="a")
ABTuple = collections.namedtuple("ab_tuple", "a, b") # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMapStructureWithStrings(self):
inp_a = NestTest.ABTuple(a="foo", b=("bar", "baz"))
inp_b = NestTest.ABTuple(a=2, b=(1, 3))
out = nest.map_structure(lambda string, repeats: string * repeats,
inp_a,
inp_b)
self.assertEqual("foofoo", out.a)
self.assertEqual("bar", out.b[0])
self.assertEqual("bazbazbaz", out.b[1])
nt = NestTest.ABTuple(a=("something", "something_else"),
b="yet another thing")
rev_nt = nest.map_structure(lambda x: x[::-1], nt)
# Check the output is the correct structure, and all strings are reversed.
nest.assert_same_structure(nt, rev_nt)
self.assertEqual(nt.a[0][::-1], rev_nt.a[0])
self.assertEqual(nt.a[1][::-1], rev_nt.a[1])
self.assertEqual(nt.b[::-1], rev_nt.b)
def testMapStructureOverPlaceholders(self):
inp_a = (array_ops.placeholder(dtypes.float32, shape=[3, 4]),
array_ops.placeholder(dtypes.float32, shape=[3, 7]))
inp_b = (array_ops.placeholder(dtypes.float32, shape=[3, 4]),
array_ops.placeholder(dtypes.float32, shape=[3, 7]))
output = nest.map_structure(lambda x1, x2: x1 + x2, inp_a, inp_b)
nest.assert_same_structure(output, inp_a)
self.assertShapeEqual(np.zeros((3, 4)), output[0])
self.assertShapeEqual(np.zeros((3, 7)), output[1])
feed_dict = {
inp_a: (np.random.randn(3, 4), np.random.randn(3, 7)),
inp_b: (np.random.randn(3, 4), np.random.randn(3, 7))
}
with self.cached_session() as sess:
output_np = sess.run(output, feed_dict=feed_dict)
self.assertAllClose(output_np[0],
feed_dict[inp_a][0] + feed_dict[inp_b][0])
self.assertAllClose(output_np[1],
feed_dict[inp_a][1] + feed_dict[inp_b][1])
def testAssertShallowStructure(self):
inp_ab = ["a", "b"]
inp_abc = ["a", "b", "c"]
expected_message = (
"The two structures don't have the same sequence length. Input "
"structure has length 2, while shallow structure has length 3.")
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_shallow_structure(inp_abc, inp_ab)
inp_ab1 = [(1, 1), (2, 2)]
inp_ab2 = [[1, 1], [2, 2]]
expected_message = (
"The two structures don't have the same sequence type. Input structure "
"has type <(type|class) 'tuple'>, while shallow structure has type "
"<(type|class) 'list'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types=False)
inp_ab1 = {"a": (1, 1), "b": {"c": (2, 2)}}
inp_ab2 = {"a": (1, 1), "b": {"d": (2, 2)}}
expected_message = (
r"The two structures don't have the same keys. Input "
r"structure has keys \['c'\], while shallow structure has "
r"keys \['d'\].")
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
inp_ab = collections.OrderedDict([("a", 1), ("b", (2, 3))])
inp_ba = collections.OrderedDict([("b", (2, 3)), ("a", 1)])
nest.assert_shallow_structure(inp_ab, inp_ba)
# This assertion is expected to pass: two namedtuples with the same
# name and field names are considered to be identical.
inp_shallow = NestTest.SameNameab(1, 2)
inp_deep = NestTest.SameNameab2(1, [1, 2, 3])
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=False)
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=True)
def testFlattenUpTo(self):
# Shallow tree ends at scalar.
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
# Shallow tree ends at string.
input_tree = [[("a", 1), [("b", 2), [("c", 3), [("d", 4)]]]]]
shallow_tree = [["level_1", ["level_2", ["level_3", ["level_4"]]]]]
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
# Make sure dicts are correctly flattened, yielding values, not keys.
input_tree = {"a": 1, "b": {"c": 2}, "d": [3, (4, 5)]}
shallow_tree = {"a": 0, "b": 0, "d": [0, 0]}
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[1, {"c": 2}, 3, (4, 5)])
# Namedtuples.
ab_tuple = NestTest.ABTuple
input_tree = ab_tuple(a=[0, 1], b=2)
shallow_tree = ab_tuple(a=0, b=1)
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[[0, 1], 2])
# Nested dicts, OrderedDicts and namedtuples.
input_tree = collections.OrderedDict(
[("a", ab_tuple(a=[0, {"b": 1}], b=2)),
("c", {"d": 3, "e": collections.OrderedDict([("f", 4)])})])
shallow_tree = input_tree
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree, [0, 1, 2, 3, 4])
shallow_tree = collections.OrderedDict([("a", 0), ("c", {"d": 3, "e": 1})])
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
3,
collections.OrderedDict([("f", 4)])])
shallow_tree = collections.OrderedDict([("a", 0), ("c", 0)])
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
{"d": 3, "e": collections.OrderedDict([("f", 4)])}])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ["input_tree_0", "input_tree_1"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = [0]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = [0, 1]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ["shallow_tree"]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'str'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = "input_tree"
shallow_tree = ["shallow_tree_9", "shallow_tree_8"]
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
# Using non-iterable elements.
input_tree = 0
shallow_tree = [9]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'int'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = 0
shallow_tree = [9, 8]
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
def testMapStructureUpTo(self):
# Named tuples.
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops)
self.assertEqual(out.a, 6)
self.assertEqual(out.b, 15)
# Lists.
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ["evens", ["odds", "primes"]]
out = nest.map_structure_up_to(
name_list, lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
self.assertEqual(out, ["first_4_evens", ["first_5_odds", "first_3_primes"]])
def testGetTraverseShallowStructure(self):
scalar_traverse_input = [3, 4, (1, 2, [0]), [5, 6], {"a": (7,)}, []]
scalar_traverse_r = nest.get_traverse_shallow_structure(
lambda s: not isinstance(s, tuple),
scalar_traverse_input)
self.assertEqual(scalar_traverse_r,
[True, True, False, [True, True], {"a": False}, []])
nest.assert_shallow_structure(scalar_traverse_r,
scalar_traverse_input)
structure_traverse_input = [(1, [2]), ([1], 2)]
structure_traverse_r = nest.get_traverse_shallow_structure(
lambda s: (True, False) if isinstance(s, tuple) else True,
structure_traverse_input)
self.assertEqual(structure_traverse_r,
[(True, False), ([True], False)])
nest.assert_shallow_structure(structure_traverse_r,
structure_traverse_input)
with self.assertRaisesRegexp(TypeError, "returned structure"):
nest.get_traverse_shallow_structure(lambda _: [True], 0)
with self.assertRaisesRegexp(TypeError, "returned a non-bool scalar"):
nest.get_traverse_shallow_structure(lambda _: 1, [1])
with self.assertRaisesRegexp(
TypeError, "didn't return a depth=1 structure of bools"):
nest.get_traverse_shallow_structure(lambda _: [1], [1])
def testYieldFlatStringPaths(self):
for inputs_expected in ({"inputs": [], "expected": []},
{"inputs": 3, "expected": [()]},
{"inputs": [3], "expected": [(0,)]},
{"inputs": {"a": 3}, "expected": [("a",)]},
{"inputs": {"a": {"b": 4}},
"expected": [("a", "b")]},
{"inputs": [{"a": 2}], "expected": [(0, "a")]},
{"inputs": [{"a": [2]}], "expected": [(0, "a", 0)]},
{"inputs": [{"a": [(23, 42)]}],
"expected": [(0, "a", 0, 0), (0, "a", 0, 1)]},
{"inputs": [{"a": ([23], 42)}],
"expected": [(0, "a", 0, 0), (0, "a", 1)]},
{"inputs": {"a": {"a": 2}, "c": [[[4]]]},
"expected": [("a", "a"), ("c", 0, 0, 0)]},
{"inputs": {"0": [{"1": 23}]},
"expected": [("0", 0, "1")]}):
inputs = inputs_expected["inputs"]
expected = inputs_expected["expected"]
self.assertEqual(list(nest.yield_flat_paths(inputs)), expected)
def testFlattenWithStringPaths(self):
for inputs_expected in (
{"inputs": [], "expected": []},
{"inputs": [23, "42"], "expected": [("0", 23), ("1", "42")]},
{"inputs": [[[[108]]]], "expected": [("0/0/0/0", 108)]}):
inputs = inputs_expected["inputs"]
expected = inputs_expected["expected"]
self.assertEqual(
nest.flatten_with_joined_string_paths(inputs, separator="/"),
expected)
# Need a separate test for namedtuple as we can't declare tuple definitions
# in the @parameterized arguments.
def testFlattenNamedTuple(self):
# pylint: disable=invalid-name
Foo = collections.namedtuple("Foo", ["a", "b"])
Bar = collections.namedtuple("Bar", ["c", "d"])
# pylint: enable=invalid-name
test_cases = [
(Foo(a=3, b=Bar(c=23, d=42)),
[("a", 3), ("b/c", 23), ("b/d", 42)]),
(Foo(a=Bar(c=23, d=42), b=Bar(c=0, d="something")),
[("a/c", 23), ("a/d", 42), ("b/c", 0), ("b/d", "something")]),
(Bar(c=42, d=43),
[("c", 42), ("d", 43)]),
(Bar(c=[42], d=43),
[("c/0", 42), ("d", 43)]),
]
for inputs, expected in test_cases:
self.assertEqual(
list(nest.flatten_with_joined_string_paths(inputs)), expected)
@parameterized.named_parameters(
("tuples", (1, 2), (3, 4), True, (("0", 4), ("1", 6))),
("dicts", {"a": 1, "b": 2}, {"b": 4, "a": 3}, True,
{"a": ("a", 4), "b": ("b", 6)}),
("mixed", (1, 2), [3, 4], False, (("0", 4), ("1", 6))),
("nested",
{"a": [2, 3], "b": [1, 2, 3]}, {"b": [5, 6, 7], "a": [8, 9]}, True,
{"a": [("a/0", 10), ("a/1", 12)],
"b": [("b/0", 6), ("b/1", 8), ("b/2", 10)]}))
def testMapWithPathsCompatibleStructures(self, s1, s2, check_types, expected):
def format_sum(path, *values):
return (path, sum(values))
result = nest.map_structure_with_paths(format_sum, s1, s2,
check_types=check_types)
self.assertEqual(expected, result)
@parameterized.named_parameters(
("tuples", (1, 2), (3, 4, 5), ValueError),
("dicts", {"a": 1}, {"b": 2}, ValueError),
("mixed", (1, 2), [3, 4], TypeError),
("nested",
{"a": [2, 3], "b": [1, 3]},
{"b": [5, 6, 7], "a": [8, 9]},
ValueError
))
def testMapWithPathsIncompatibleStructures(self, s1, s2, error_type):
with self.assertRaises(error_type):
nest.map_structure_with_paths(lambda path, *s: 0, s1, s2)
class NestBenchmark(test.Benchmark):
def run_and_report(self, s1, s2, name):
burn_iter, test_iter = 100, 30000
for _ in xrange(burn_iter):
nest.assert_same_structure(s1, s2)
t0 = time.time()
for _ in xrange(test_iter):
nest.assert_same_structure(s1, s2)
t1 = time.time()
self.report_benchmark(iters=test_iter, wall_time=(t1 - t0) / test_iter,
name=name)
def benchmark_assert_structure(self):
s1 = (((1, 2), 3), 4, (5, 6))
s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
self.run_and_report(s1, s2, "assert_same_structure_6_elem")
s1 = (((1, 2), 3), 4, (5, 6)) * 10
s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) * 10
self.run_and_report(s1, s2, "assert_same_structure_60_elem")
if __name__ == "__main__":
test.main()
|
|
"""
Strategy Transformers -- class decorators that transform the behavior of any
strategy.
See the various Meta strategies for another type of transformation.
"""
import inspect
import random
import collections
from numpy.random import choice
from .actions import Actions, flip_action
from .random_ import random_choice
C, D = Actions.C, Actions.D
# Note: After a transformation is applied,
# the player's history is overwritten with the modified history
# just like in the noisy tournament case
# This can lead to unexpected behavior, such as when
# FlipTransform is applied to Alternator
def StrategyTransformerFactory(strategy_wrapper, name_prefix=None):
"""Modify an existing strategy dynamically by wrapping the strategy
method with the argument `strategy_wrapper`.
Parameters
----------
strategy_wrapper: function
A function of the form `strategy_wrapper(player, opponent, proposed_action, *args, **kwargs)`
Can also use a class that implements
def __call__(self, player, opponent, action)
wrapper_args: tuple
Any arguments to pass to the wrapper
wrapper_kwargs: dict
Any keyword arguments to pass to the wrapper
name_prefix: string, "Transformed "
A string to prepend to the strategy and class name
"""
# Create a class that applies a wrapper function to the strategy method
# of a given class. We use a class here instead of a function so that the
# decorator can have arguments.
class Decorator(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
if "name_prefix" in kwargs:
self.name_prefix = kwargs["name_prefix"]
else:
self.name_prefix = name_prefix
def __call__(self, PlayerClass):
"""
Parameters
----------
PlayerClass: A subclass of axelrod.Player, e.g. Cooperator
The Player Class to modify
name_prefix: str
A string to prepend to the Player and Class name
Returns
-------
new_class, class object
A class object that can create instances of the modified PlayerClass
"""
args = self.args
kwargs = self.kwargs
# Define the new strategy method, wrapping the existing method
# with `strategy_wrapper`
def strategy(self, opponent):
# Is the original strategy method a static method?
argspec = inspect.getargspec(getattr(PlayerClass, "strategy"))
if 'self' in argspec.args:
# it's not a static method
proposed_action = PlayerClass.strategy(self, opponent)
else:
proposed_action = PlayerClass.strategy(opponent)
# Apply the wrapper
return strategy_wrapper(self, opponent, proposed_action,
*args, **kwargs)
# Define a new class and wrap the strategy method
# Modify the PlayerClass name
new_class_name = PlayerClass.__name__
name = PlayerClass.name
name_prefix = self.name_prefix
if name_prefix:
# Modify the Player name (class variable inherited from Player)
new_class_name = name_prefix + PlayerClass.__name__
# Modify the Player name (class variable inherited from Player)
name = name_prefix + ' ' + PlayerClass.name
# Dynamically create the new class
new_class = type(
new_class_name, (PlayerClass,),
{
"name": name,
"strategy": strategy,
"__module__": PlayerClass.__module__
})
return new_class
return Decorator
def compose_transformers(t1, t2):
"""Compose transformers without having to invoke the first on
a PlayerClass."""
class Composition(object):
def __init__(self):
self.t1 = t1
self.t2 = t2
def __call__(self, PlayerClass):
return t1(t2(PlayerClass))
return Composition()
def generic_strategy_wrapper(player, opponent, proposed_action, *args, **kwargs):
"""
Strategy wrapper functions should be of the following form.
Parameters
----------
player: Player object or subclass (self)
opponent: Player object or subclass
proposed_action: an axelrod.Action, C or D
The proposed action by the wrapped strategy
proposed_action = Player.strategy(...)
args, kwargs:
Any additional arguments that you need.
Returns
-------
action: an axelrod.Action, C or D
"""
# This example just passes through the proposed_action
return proposed_action
IdentityTransformer = StrategyTransformerFactory(generic_strategy_wrapper)()
def flip_wrapper(player, opponent, action):
"""Applies flip_action at the class level."""
return flip_action(action)
FlipTransformer = StrategyTransformerFactory(
flip_wrapper, name_prefix="Flipped")()
def noisy_wrapper(player, opponent, action, noise=0.05):
"""Applies flip_action at the class level."""
r = random.random()
if r < noise:
return flip_action(action)
return action
NoisyTransformer = StrategyTransformerFactory(
noisy_wrapper, name_prefix="Noisy")
def forgiver_wrapper(player, opponent, action, p):
"""If a strategy wants to defect, flip to cooperate with the given
probability."""
if action == D:
return random_choice(p)
return C
ForgiverTransformer = StrategyTransformerFactory(
forgiver_wrapper, name_prefix="Forgiving")
def initial_sequence(player, opponent, action, initial_seq):
"""Play the moves in `seq` first (must be a list), ignoring the strategy's
moves until the list is exhausted."""
index = len(player.history)
if index < len(initial_seq):
return initial_seq[index]
return action
InitialTransformer = StrategyTransformerFactory(initial_sequence)
def final_sequence(player, opponent, action, seq):
"""Play the moves in `seq` first, ignoring the strategy's moves until the
list is exhausted."""
length = player.match_attributes["length"]
player.classifier["makes_use_of"].update(["length"])
if length < 0: # default is -1
return action
index = length - len(player.history)
# If for some reason we've overrun the expected game length, just pass
# the intended action through
if len(player.history) >= length:
return action
# Check if we're near the end and need to start passing the actions
# from seq for the final few rounds.
if index <= len(seq):
return seq[-index]
return action
FinalTransformer = StrategyTransformerFactory(final_sequence)
def history_track_wrapper(player, opponent, action):
"""Wrapper to track a player's history in a variable `._recorded_history`."""
try:
player._recorded_history.append(action)
except AttributeError:
player._recorded_history = [action]
return action
TrackHistoryTransformer = StrategyTransformerFactory(history_track_wrapper,
name_prefix="HistoryTracking")()
def deadlock_break_wrapper(player, opponent, action):
"""Detect and attempt to break deadlocks by cooperating."""
if len(player.history) < 2:
return action
last_round = (player.history[-1], opponent.history[-1])
penultimate_round = (player.history[-2], opponent.history[-2])
if (penultimate_round, last_round) == ((C, D), (D, C)) or \
(penultimate_round, last_round) == ((D, C), (C, D)):
# attempt to break deadlock by Cooperating
return C
return action
DeadlockBreakingTransformer = StrategyTransformerFactory(
deadlock_break_wrapper, name_prefix="DeadlockBreaking")()
def grudge_wrapper(player, opponent, action, grudges):
"""After `grudges` defections, defect forever."""
if opponent.defections > grudges:
return D
return action
GrudgeTransformer = StrategyTransformerFactory(
grudge_wrapper, name_prefix="Grudging")
def apology_wrapper(player, opponent, action, myseq, opseq):
length = len(myseq)
if len(player.history) < length:
return action
if (myseq == player.history[-length:]) and \
(opseq == opponent.history[-length:]):
return C
return action
ApologyTransformer = StrategyTransformerFactory(
apology_wrapper, name_prefix="Apologizing")
def mixed_wrapper(player, opponent, action, probability, m_player):
"""Randomly picks a strategy to play, either from a distribution on a list
of players or a single player.
In essence creating a mixed strategy.
Parameters
----------
probability: a float (or integer: 0 or 1) OR an iterable representing a
an incomplete probability distribution (entries to do not have to sum to
1). Eg: 0, 1, [.5,.5], (.5,.3)
m_players: a single player class or iterable representing set of player
classes to mix from.
Eg: axelrod.TitForTat, [axelod.Cooperator, axelrod.Defector]
"""
# If a single probability, player is passed
if isinstance(probability, float) or isinstance(probability, int):
m_player = [m_player]
probability = [probability]
# If a probability distribution, players is passed
if isinstance(probability, collections.Iterable) and \
isinstance(m_player, collections.Iterable):
mutate_prob = sum(probability) # Prob of mutation
if mutate_prob > 0:
# Distribution of choice of mutation:
normalised_prob = [prob / float(mutate_prob)
for prob in probability]
if random.random() < mutate_prob:
p = choice(list(m_player), p=normalised_prob)()
p.history = player.history
return p.strategy(opponent)
return action
MixedTransformer = StrategyTransformerFactory(
mixed_wrapper, name_prefix="Mutated")
# Strategy wrappers as classes
class RetaliationWrapper(object):
"""Retaliates `retaliations` times after a defection (cumulative)."""
def __call__(self, player, opponent, action, retaliations):
if len(player.history) == 0:
self.retaliation_count = 0
return action
if opponent.history[-1] == D:
self.retaliation_count += retaliations - 1
return D
if self.retaliation_count == 0:
return action
if self.retaliation_count > 0:
self.retaliation_count -= 1
return D
RetaliationTransformer = StrategyTransformerFactory(
RetaliationWrapper(), name_prefix="Retaliating")
class RetaliationUntilApologyWrapper(object):
"""Enforces the TFT rule that the opponent pay back a defection with a
cooperation for the player to stop defecting."""
def __call__(self, player, opponent, action):
if len(player.history) == 0:
self.is_retaliating = False
return action
if opponent.history[-1] == D:
self.is_retaliating = True
if self.is_retaliating:
if opponent.history[-1] == C:
self.is_retaliating = False
return C
return D
return action
RetaliateUntilApologyTransformer = StrategyTransformerFactory(
RetaliationUntilApologyWrapper(), name_prefix="RUA")()
|
|
#!/usr/bin/env python
# Fit proper motion and parallax using ra/dec/mjd data
# Most of this code was taken from here:
# https://github.com/ctheissen/WISE_Parallaxes/blob/master/WISE_Parallax.py
import os, sys
import numpy as np
from astropy.table import Table, vstack, join
#import matplotlib.pyplot as plt
from astropy import units as u
from scipy.optimize import curve_fit, minimize
from astropy.time import Time
import astropy.coordinates as coords
from dlnpyutils import utils as dln, coords as dcoords
from argparse import ArgumentParser
import time
from dl import queryClient as qc
import psycopg2 as pq
# Set some constants
d2a = 3600.
d2ma = 3600000.
d2y = 1/365.25
def astrometryfunc(x, Delta1, Delta2, PMra, PMdec, pi):
""" Compute proper motion and parallax model for a set of ra/dec/mjd values."""
# x: input list of central RA and DEC positions and array of MJDs
# Delta1: initial dRA position
# Delta2: initial dDEC position
# PMra: proper motion in RA (arcsec/yr)
# PMdec: proper motion in DEC (arcsec/yr)
# pi: parallax (arcsec)
ra0, dec0, mjds = x
n = len(mjds)
years = (mjds - mjds[0])*d2y
ras = np.zeros(n,np.float64)+ra0
decs = np.zeros(n,np.float64)+dec0
bary = coords.get_body_barycentric('earth', Time(mjds, format='mjd'))
# Parallax factors
Fac1 = (bary.x * np.sin(ras*np.pi/180.) - bary.y * np.cos(ras*np.pi/180.) )
Fac2 = bary.x * np.cos(ras*np.pi/180.) * np.sin(decs*np.pi/180.) + \
bary.y * np.sin(ras*np.pi/180.) * np.sin(decs*np.pi/180.) - \
bary.z * np.cos(decs*np.pi/180.)
RAsend = Delta1 + PMra * years + pi * Fac1.value
DECsend = Delta2 + PMdec * years + pi * Fac2.value
return np.concatenate( [RAsend, DECsend]).flatten()
def fit(cat):
""" Fit proper motion and parallax to ra/dec/mjd data in a table."""
mjd = cat['mjd']
ra = cat['ra']
raerr = cat['raerr']
dec = cat['dec']
decerr = cat['decerr']
# Compute relative positions
cenra = np.mean(ra)
cendec = np.mean(dec)
lon,lat = dcoords.rotsphcen(ra,dec,cenra,cendec,gnomic=True)
lon *= d2a
lat *= d2a
# Fit proper motion and parallax
pars, cov = curve_fit(astrometryfunc, [ra, dec, mjd] ,
np.concatenate( [lon,lat] ).flatten(),
sigma=np.concatenate( [ raerr, decerr ] ).flatten() )
return pars,cov
def plotfit(cat,pars,cov,savefig=None):
""" Plot a figure of the data and the proper motion/parallax fit."""
plt.rcParams.update({'font.size': 12})
# Compute relative positions
cenra = np.mean(cat['ra'])
cendec = np.mean(cat['dec'])
lon,lat = dcoords.rotsphcen(cat['ra'],cat['dec'],cenra,cendec,gnomic=True)
lon *= d2a
lat *= d2a
# Array of MJDs for model curve
mjd = np.linspace(np.min(cat['mjd']),np.max(cat['mjd']),100)
out = astrometryfunc([cenra,cendec,mjd],pars[0],pars[1],pars[2],pars[3],pars[4])
ll = out[0:100]
bb = out[100:]
# Plot the model and data
plt.plot(ll,bb)
plt.errorbar(lon,lat,xerr=cat['raerr'],yerr=cat['decerr'],fmt='o',color='black',
markersize=5,ecolor='lightgray',elinewidth=2,linestyle='none',capsize=0)
plt.xlabel('dRA (arcsec)')
plt.ylabel('dDEC (arcsec)')
xr = dln.minmax(np.concatenate((lon,ll)))
xr = [xr[0]-0.05*dln.valrange(xr),xr[1]+0.05*dln.valrange(xr)]
yr = dln.minmax(np.concatenate((lat,bb)))
yr = [yr[0]-0.05*dln.valrange(yr),yr[1]+0.05*dln.valrange(yr)]
plt.xlim(xr)
plt.ylim(yr)
perr = np.sqrt(np.diag(cov))
plt.annotate(r'$\mu_\alpha$ = %5.3f $\pm$ %5.3f mas/yr' % (pars[2]*1e3,perr[2]*1e3) + '\n' +
r'$\mu_\delta$ = %5.3f $\pm$ %5.3f mas/yr' % (pars[3]*1e3,perr[3]*1e3) + '\n' +
r'$\pi$ = %5.3f $\pm$ %5.3f mas' % (pars[4]*1e3,perr[4]*1e3),
xy=(xr[0]+0.05*dln.valrange(xr),yr[1]-0.20*dln.valrange(yr)),ha='left')
if savefig is not None:
plt.savefig(savefig)
# Main command-line program
if __name__ == "__main__":
parser = ArgumentParser(description='Run Doppler fitting on spectra')
parser.add_argument('healpix0', type=int, nargs=1, help='Starting healpix')
parser.add_argument('healpix1', type=int, nargs=1, help='Ending healpix')
args = parser.parse_args()
t0 = time.time()
pix0 = args.healpix0[0]
pix1 = args.healpix1[0]
connection = pq.connect(user="dlquery",host="db01.datalab.noao.edu",
password="",port = "5432",database = "tapdb")
cur = connection.cursor()
osql = '''select id,ra,dec,gmag,imag,ndet from nsc_dr2.hpm where
pix>=%d and pix<=%d and ndet>=20 and deltamjd > 1095 and
(POWER(pmra/pmraerr,2) + POWER(pmdec/pmdecerr,2)) > 27.63''' % (pix0,pix1)
cur.execute(osql)
data = cur.fetchall()
# Convert to numpy structured array
dtype = np.dtype([('id',np.str,50),('ra',np.float64),('dec',np.float64),
('gmag',float),('imag',float),('ndet',int)])
obj = np.zeros(len(data),dtype=dtype)
obj[...] = data
del(data)
nobj = len(obj)
if nobj==0:
print('No objects found')
sys.exit()
print(str(nobj)+' total objects found')
dt = np.dtype([('objectid',np.str,50),('nmeas',int),('chi2_motion',float),('deltamjd',float),
('class_star',float),('gmag',float),('rmag',float),('imag',float),('zmag',float),
('pars',np.float64,5),('perr',np.float64,5),('cov',np.float64,(5,5))])
cat = np.zeros(nobj,dtype=dt)
# HEALPix loop
pix = np.arange(pix0,pix1+1)
cnt = 0
for p in pix:
#print('Pix = '+str(p))
osql1 = '''select id,ra,dec,pmra,pmraerr,pmdec,pmdecerr,gmag,rmag,imag,zmag,ndet,class_star,deltamjd
from nsc_dr2.hpm where pix=%d and ndet>=20 and deltamjd > 1095 and
(POWER(pmra/pmraerr,2) + POWER(pmdec/pmdecerr,2)) > 27.63''' % p
cur.execute(osql1)
data = cur.fetchall()
# Convert to numpy structured array
dtype = np.dtype([('id',np.str,50),('ra',np.float64),('dec',np.float64),
('pmra',np.float64),('pmraerr',float),('pmdec',np.float64),('pmdecerr',float),
('gmag',float),('rmag',float),('imag',float),('zmag',float),
('ndet',int),('class_star',float),('deltamjd',np.float64)])
obj1 = np.zeros(len(data),dtype=dtype)
obj1[...] = data
del(data)
nobj1 = len(obj1)
#print(str(nobj1)+' objects')
if nobj1>0:
msql = '''select meas.objectid,meas.ra,meas.raerr,meas.dec,meas.decerr,meas.mjd,meas.class_star
from nsc_dr2.meas as meas join nsc_dr2.hpm as obj on meas.objectid=obj.id where
obj.pix=%d and obj.ndet>=20 and obj.deltamjd > 1095 and
(POWER(obj.pmra/obj.pmraerr,2) + POWER(obj.pmdec/obj.pmdecerr,2)) > 27.63''' % p
cur.execute(msql)
data = cur.fetchall()
# Convert to numpy structured array
dtype = np.dtype([('objectid',np.str,50),('ra',np.float64),('raerr',float),('dec',np.float64),
('decerr',float),('mjd',np.float64),('class_star',float)])
meas = np.zeros(len(data),dtype=dtype)
meas[...] = data
del(data)
nmeas = len(meas)
# Loop over objects
for i in range(nobj1):
ind, = np.where(meas['objectid']==obj1['id'][i])
nind = len(ind)
meas1 = meas[ind]
pars, cov = fit(meas1)
perr = np.sqrt(np.diag(cov))
print(str(cnt)+' '+obj1['id'][i]+' '+str(nind)+' '+str(pars))
cat['objectid'][cnt] = obj1['id'][i]
cat['nmeas'][cnt] = nind
cat['chi2_motion'][cnt] = (obj1['pmra'][i]/obj1['pmraerr'][i])**2 + (obj1['pmdec'][i]/obj1['pmdecerr'][i])**2
cat['deltamjd'][cnt] = obj1['deltamjd'][i]
cat['class_star'][cnt] = obj1['class_star'][i]
cat['gmag'][cnt] = obj1['gmag'][i]
cat['rmag'][cnt] = obj1['rmag'][i]
cat['imag'][cnt] = obj1['imag'][i]
cat['zmag'][cnt] = obj1['zmag'][i]
cat['pars'][cnt] = pars
cat['perr'][cnt] = perr
cat['cov'][cnt] = cov
cnt += 1
cur.close()
connection.close()
# Write the output file
outfile = '/net/dl2/dnidever/nsc/instcal/v3/parallax/plx_'+str(pix0)+'_'+str(pix1)+'.fits'
print('Writing to '+outfile)
Table(cat).write(outfile,overwrite=True)
dt = time.time()-t0
print('dt = '+str(dt)+' sec.')
|
|
# Copyright 2012 SINA Corporation
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extracts OpenStack config option info from module(s)."""
from __future__ import print_function
import argparse
import imp
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
import six
import stevedore.named
from ceilometer.openstack.common import gettextutils
from ceilometer.openstack.common import importutils
gettextutils.install('ceilometer')
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
DICTOPT = "DictOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
DICTOPT: 'dict value',
MULTISTROPT: 'multi valued',
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT, DICTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../../"))
WORDWRAP_WIDTH = 60
def generate(argv):
parser = argparse.ArgumentParser(
description='generate sample configuration file',
)
parser.add_argument('-m', dest='modules', action='append')
parser.add_argument('-l', dest='libraries', action='append')
parser.add_argument('srcfiles', nargs='*')
parsed_args = parser.parse_args(argv)
mods_by_pkg = dict()
for filepath in parsed_args.srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
if parsed_args.modules:
for module_name in parsed_args.modules:
module = _import_module(module_name)
if module:
for group, opts in _list_opts(module):
opts_by_group.setdefault(group, []).append((module_name,
opts))
# Look for entry points defined in libraries (or applications) for
# option discovery, and include their return values in the output.
#
# Each entry point should be a function returning an iterable
# of pairs with the group name (or None for the default group)
# and the list of Opt instances for that group.
if parsed_args.libraries:
loader = stevedore.named.NamedExtensionManager(
'oslo.config.opts',
names=list(set(parsed_args.libraries)),
invoke_on_load=False,
)
for ext in loader:
for group, opts in ext.plugin():
opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
opt_list.append((ext.name, opts))
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
raise RuntimeError("Unable to import module %s" % mod_str)
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group in sorted(opts_by_group.keys()):
print_group_opts(group, opts_by_group[group])
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except Exception as e:
sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
return None
def _is_in_group(opt, group):
"Check if opt is in group."
for value in group._opts.values():
# NOTE(llu): Temporary workaround for bug #1262148, wait until
# newly released oslo.config support '==' operator.
if not(value['opt'] != opt):
return True
return False
def _guess_groups(opt, mod_obj):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
# what other groups is it in?
for value in cfg.CONF.values():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
raise RuntimeError(
"Unable to find group for option %s, "
"maybe it's defined twice in the same group?"
% opt.name
)
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print("[%s]" % group)
print('')
for mod, opts in opts_by_module:
print('#')
print('# Options defined in %s' % mod)
print('#')
print('')
for opt in opts:
_print_opt(opt)
print('')
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(name, value):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if value.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
# part.
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
elif value.startswith(BASEDIR):
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in value:
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name:
return 'ceilometer'
elif value.strip() != value:
return '"%s"' % value
return value
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help = u'%s (%s)' % (opt_help,
OPT_TYPES[opt_type])
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
if opt.deprecated_opts:
for deprecated_opt in opt.deprecated_opts:
if deprecated_opt.name:
deprecated_group = (deprecated_opt.group if
deprecated_opt.group else "DEFAULT")
print('# Deprecated group/name - [%s]/%s' %
(deprecated_group,
deprecated_opt.name))
try:
if opt_default is None:
print('#%s=<None>' % opt_name)
elif opt_type == STROPT:
assert(isinstance(opt_default, six.string_types))
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
opt_default)))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, str(opt_default).lower()))
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
elif opt_type == DICTOPT:
assert(isinstance(opt_default, dict))
opt_default_strlist = [str(key) + ':' + str(value)
for (key, value) in opt_default.items()]
print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print('#%s=%s' % (opt_name, default))
print('')
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
def main():
generate(sys.argv[1:])
if __name__ == '__main__':
main()
|
|
"""
Notes on functionals in pyquante2.
1. I would like for all operations here to be array-scale operations.
- This means e.g. np.power rather than pow
2. I would like as few memory copies as possible.
3. Need to decide how to handle redundant information, i.e.
- rhoa and rhob when non-spin-polarized
This might make tracking total density and the zeta (spin polarization)
worthwhile; the latter could just be zero (or False or None or whatever)
- return values from non-spin-polarized calculations.
"""
import numpy as np
def zero_low_density(rho,cut=1e-10):
rho[rho<cut]=0
return rho
def xs(rho,alpha=2/3.):
"Xalpha X functional. alpha is the X-alpha scaling factor"
fac=-2.25*alpha*np.power(0.75/np.pi,1./3.)
rho3 = np.power(rho,1./3.)
fx = fac*rho*rho3
dfxdna = (4./3.)*fac*rho3
return fx,dfxdna
def xb88_array(rho,gam,tol=1e-6):
# Still doesn't work
rho = zero_low_density(rho)
rho13 = np.power(rho,1./3.)
x = np.zeros(rho.shape,dtype=float)
g = np.zeros(rho.shape,dtype=float)
dg = np.zeros(rho.shape,dtype=float)
x[rho>tol] = np.sqrt(gam)/rho13/rho
g[rho>tol] = b88_g(x[rho>tol])
dg[rho>tol] = b88_dg(x[rho>tol])
dfxdrho = (4./3.)*rho13*(g-x*dg)
dfxdgam = 0.5*dg/np.sqrt(gam)
fx = rho*rho13*g
return fx,dfxdrho,dfxdgam
def xb88(rho,gam,tol=1e-10):
rho = zero_low_density(rho)
fxs = []
dfxdrhos = []
dfxdgams = []
for na,gama in zip(rho,gam):
fx = dfxdrho = dfxdgam = 0
if na > tol:
rho13 = np.power(na,1./3.)
x = np.sqrt(gama)/rho13/na
g = b88_g(x)
dg = b88_dg(x)
dfxdrho = (4./3.)*rho13*(g-x*dg)
dfxdgam = 0.5*dg/np.sqrt(gama)
fx = na*rho13*g
fxs.append(fx)
dfxdrhos.append(dfxdrho)
dfxdgams.append(dfxdgam)
return np.array(fxs),np.array(dfxdrhos),np.array(dfxdgams)
def xpbe(rho,gam,tol=1e-10):
rho = zero_low_density(rho)
fxs = []
dfxdrhos = []
dfxdgams = []
for na,gama in zip(rho,gam):
fx = dfxdrho = dfxdgam = 0
if na > tol:
kap = 0.804
mu = 0.449276922095889E-2
fx0,vx0 = xs(na)
rho13 = na**(1.0/3.0)
rho43 = rho13*na
den = 1+mu*gama/rho43/rho43
F = 1+kap-kap/den
fx = fx0*F
dFdr = -(8./3.)*kap*mu*gama/den/den*na**(-11./3.)
dfxdrho = vx0*F+fx0*dFdr
dFdg = -kap*mu/rho43/rho43/den/den
dfxdgam = fx0*dFdg
fxs.append(fx)
dfxdrhos.append(dfxdrho)
dfxdgams.append(dfxdgam)
return np.array(fxs),np.array(dfxdrhos),np.array(dfxdgams)
def cvwn5(rhoa,rhob,tol=1e-10):
rhoa = zero_low_density(rhoa)
rhob = zero_low_density(rhob)
ecs = []
vcrhoas = []
vcrhobs = []
for na,nb in zip(rhoa,rhob):
rho = na+nb
ec = vcrhoa = vcrhob = 0
if rho>tol:
zeta=(na-nb)/rho
x = pow(3./4./np.pi/rho,1/6.)
epsp = vwn_epsp(x)
epsf = vwn_epsf(x)
g = vwn_g(zeta)
eps = epsp + g*(epsf-epsp)
ec = eps*rho
depsp = vwn_depsp(x)
depsf = vwn_depsf(x)
dg = vwn_dg(zeta)
deps_dx = depsp + g*(depsf-depsp)
deps_dg = (epsf-epsp)*dg
vcrhoa = eps - (x/6.)*deps_dx + deps_dg*(1-zeta)
vcrhob = eps - (x/6.)*deps_dx - deps_dg*(1+zeta)
ecs.append(ec)
vcrhoas.append(vcrhoa)
vcrhobs.append(vcrhob)
return np.array(ecs),np.array(vcrhoas),np.array(vcrhobs)
def clyp(rhoas,rhobs,gaas,gabs,gbbs,tol=1e-10):
fcs = []
fcnas = []
fcnbs = []
fcgaas = []
fcgabs = []
fcgbbs = []
for na,nb,gaa,gab,gbb in zip(rhoas,rhobs,gaas,gabs,gbbs):
fc,fcna,fcnb,fcgaa,fcgab,fcgbb = clyp_point(na,nb,gaa,gab,gbb,tol)
fcs.append(fc)
fcnas.append(fcnbs)
fcnbs.append(fcnb)
fcgaas.append(fcgaa)
fcgabs.append(fcgab)
fcgbbs.append(fcgbb)
return np.array(fcs),np.array(fcnas),np.array(fcnbs),np.array(fcgaas),np.array(fcgabs),np.array(fcgbbs)
def clyp_point(rhoa,rhob,gamaa,gamab,gambb,tol=1e-10):
# Modified and corrected by AEM in June 2006.
a = 0.04918 # Parameters from the LYP papers
b = 0.132
c = 0.2533
d = 0.349
rho = rhoa+rhob
fc=fcrhoa=fcrhob=fcgamaa=fcgamab=fcgambb=0
assert rhoa >= 0.0
assert rhob >= 0.0
if rho > tol:
rhom3 = np.power(rho,-1./3.)
w = np.exp(-c*rhom3)/(1+d*rhom3)*np.power(rho,-11./3.)
dl = c*rhom3+d*rhom3/(1+d*rhom3)
fcgamaa = -a*b*w*((1./9.)*rhoa*rhob*(1-3*dl-(dl-11)*rhoa/rho)-rhob*rhob)
fcgamab = -a*b*w*((1./9.)*rhoa*rhob*(47-7*dl)-(4./3.)*rho*rho)
fcgambb = -a*b*w*((1./9.)*rhoa*rhob*(1-3*dl-(dl-11)*rhob/rho)-rhoa*rhoa)
fc = -4*a/(1+d*rhom3)*rhoa*rhob/rho \
-np.power(2,11./3.)*0.3*np.power(3*np.pi*np.pi,2./3.)*a*b*w \
*rhoa*rhob*(np.power(rhoa,8./3.)+np.power(rhob,8./3.)) \
+ fcgamaa*gamaa + fcgamab*gamab + fcgambb*gambb
dw = -(1./3.)*np.power(rho,-4./3.)*w*(11*np.power(rho,1./3.)-c-d/(1+d*rhom3))
ddl = (1./3.)*(d*d*np.power(rho,-5./3.)/np.power(1+d*rhom3,2)-dl/rho)
d2f_dradgaa = dw/w*fcgamaa - a*b*w*(
(1./9.)*rhob*(1-3*dl-(dl-11)*rhoa/rho)
-(1./9.)*rhoa*rhob*((3+rhoa/rho)*ddl+(dl-11)*rhob/rho/rho))
d2f_dradgbb = dw/w*fcgambb - a*b*w*(
(1./9.)*rhob*(1-3*dl-(dl-11)*rhob/rho)
-(1./9.)*rhoa*rhob*((3+rhob/rho)*ddl-(dl-11)*rhob/rho/rho)
-2*rhoa)
d2f_dradgab = dw/w*fcgamab-a*b*w*(
(1./9)*rhob*(47-7*dl)-(7./9.)*rhoa*rhob*ddl-(8./3.)*rho)
d2f_drbdgaa = dw/w*fcgamaa - a*b*w*(
(1./9.)*rhoa*(1-3*dl-(dl-11)*rhoa/rho)
-(1./9.)*rhoa*rhob*((3+rhoa/rho)*ddl-(dl-11)*rhoa/rho/rho)
-2*rhob)
d2f_drbdgbb = dw/w*fcgambb - a*b*w*(
(1./9.)*rhoa*(1-3*dl-(dl-11)*rhob/rho)
-(1./9.)*rhoa*rhob*((3+rhob/rho)*ddl+(dl-11)*rhoa/rho/rho))
d2f_drbdgab = dw/w*fcgamab-a*b*w*(
(1./9)*rhoa*(47-7*dl)-(7./9.)*rhoa*rhob*ddl-(8./3.)*rho)
fcrhoa = fcrhob = 0
if rhoa > tol:
fcrhoa = -4*a/(1+d*rhom3)*rhoa*rhob/rho*(
(1./3.)*d*np.power(rho,-4./3.)/(1+d*rhom3)+1/rhoa-1/rho)\
-np.power(2,11./3.)*0.3*np.power(3*np.pi*np.pi,2./3.)*a*b*(
dw*rhoa*rhob*(np.power(rhoa,8./3.)+np.power(rhob,8./3.))
+w*rhob*((11./3.)*np.power(rhoa,8./3.)+np.power(rhob,8./3.))) \
+d2f_dradgaa*gamaa + d2f_dradgbb*gambb + d2f_dradgab*gamab
if rhob > tol:
fcrhob = -4*a/(1+d*rhom3)*rhoa*rhob/rho*(
(1./3.)*d*np.power(rho,-4./3.)/(1+d*rhom3)+1/rhob-1/rho)\
-np.power(2,11./3.)*0.3*np.power(3*np.pi*np.pi,2./3.)*a*b*(
dw*rhoa*rhob*(np.power(rhob,8./3.)+np.power(rhoa,8./3.))
+w*rhoa*((11./3.)*np.power(rhob,8./3.)+np.power(rhoa,8./3.))) \
+d2f_drbdgaa*gamaa + d2f_drbdgbb*gambb + d2f_drbdgab*gamab
return fc,fcrhoa,fcrhob,fcgamaa,fcgamab,fcgambb
def cpbe(na,nb,ga,gab,gb):
"PBE Correlation Functional"
npts = len(na)
ec = np.zeros(npts,'d')
vca = np.zeros(npts,'d')
vcb = np.zeros(npts,'d')
vcga = np.zeros(npts,'d')
vcgab = np.zeros(npts,'d')
vcgb = np.zeros(npts,'d')
for i in range(npts):
ec[i],vca[i],vcb[i],vcga[i],vcgab[i],vcgb[i] = \
cpbe_point(na[i],nb[i],ga[i],gab[i],gb[i])
return ec,vca,vcb,vcga,vcgab,vcgb
def cpbe_point(rhoa,rhob,gama,gamb,gamab,tol=1e-10):
rho = rhoa+rhob
ec = vca = vcb = vcgama = vcgamb = vcgamab = 0
gam = 0.031091
ohm = 0.046644
bet = 0.066725
if rho > tol:
Rs = np.power(3./(4.*np.pi*rho),1./3.)
Zeta = (rhoa-rhob)/rho
Kf = np.power(3*np.pi*np.pi*rho,1./3.)
Ks = np.sqrt(4*Kf/np.pi)
Phi = 0.5*(np.power(1+Zeta,2./3.) + np.power(1-Zeta,2./3.))
Phi3 = Phi*Phi*Phi
gradrho = np.sqrt(gama+gamb+2.*gamab)
T = gradrho/(2*Phi*Ks*rho)
T2 = T*T
T4 = T2*T2
eps,vc0a,vc0b = cpbe_lsd(rhoa,rhob)
expo = (np.exp(-eps/(gam*Phi3))-1.)
A = bet/gam/expo
N = T2+A*T4
D = 1.+A*T2+A*A*T4
H = gam*Phi3*np.log(1.+(bet/gam)*N/D)
ec = rho*(eps+H)
# Derivative stuff
dZ_drhoa = (1.-Zeta)/rho
dZ_drhob = -(1.+Zeta)/rho
dPhi_dZ = np.power(1.+Zeta,-1./3.)/3.-np.power(1.-Zeta,-1./3.)/3.
dPhi_drhoa = dPhi_dZ*dZ_drhoa
dPhi_drhob = dPhi_dZ*dZ_drhob
dKs_drho = Ks/(6*rho)
dT_dPhi = -T/Phi
dT_dKs = -T/Ks
dT_drhoa = -T/rho + dT_dPhi*dPhi_drhoa + dT_dKs*dKs_drho
dT_drhob = -T/rho + dT_dPhi*dPhi_drhob + dT_dKs*dKs_drho
dA_dPhi = -A/expo*np.exp(-eps/(gam*Phi3))*(3*eps/(gam*Phi3*Phi))
dA_deps = -A/expo*np.exp(-eps/(gam*Phi3))*(-1/(gam*Phi3))
deps_drhoa = (vc0a-eps)/rho
deps_drhob = (vc0b-eps)/rho
dA_drhoa = dA_dPhi*dPhi_drhoa + dA_deps*deps_drhoa
dA_drhob = dA_dPhi*dPhi_drhob + dA_deps*deps_drhoa
dN_dT = 2*T+4*A*T2*T
dD_dT = 2*A*T + 4*A*A*T*T2
dN_dA = T4
dD_dA = T2+2*A*T4
dH_dPhi = 3*H/Phi
dH_dT = bet*Phi3/(1.+bet/gam*N/D)*(D*dN_dT-N*dD_dT)/D/D
dH_dA = bet*Phi3/(1.+bet/gam*N/D)*(D*dN_dA-N*dD_dA)/D/D
dH_drhoa = dH_dPhi*dPhi_drhoa + dH_dT*dT_drhoa + dH_dA*dA_drhoa
dH_drhob = dH_dPhi*dPhi_drhob + dH_dT*dT_drhob + dH_dA*dA_drhob
vca = vc0a + H + rho*dH_drhoa
vcb = vc0b + H + rho*dH_drhob
# Havent done the dE_dgamma derives yet
return ec,vca,vcb,vcgama,vcgamab,vcgamb
def vwn_xx(x,b,c): return x*x+b*x+c
def vwn_epsp(x): return vwn_eps(x,0.0310907,-0.10498,3.72744,12.9352)
#def vwn_epsf(x): return vwn_eps(x,0.01554535,-0.32500,7.06042,13.0045)
def vwn_epsf(x): return vwn_eps(x,0.01554535,-0.32500,7.06042,18.0578)
def vwn_eps(x,a,x0,b,c):
Q = np.sqrt(4*c-b*b)
eps = a*(np.log(x*x/vwn_xx(x,b,c))
- b*(x0/vwn_xx(x0,b,c))*np.log(np.power(x-x0,2)/vwn_xx(x,b,c))
+ (2*b/Q)*(1-(x0*(2*x0+b)/vwn_xx(x0,b,c))) * np.arctan(Q/(2*x+b)))
#eps = a*(np.log(x*x/vwn_xx(x,b,c)) + (2*b/Q)*np.arctan(Q/(2*x+b))
# - (b*x0/vwn_xx(x0,b,c))*np.log(np.power(x-x0,2)/vwn_xx(x,b,c))
# + (2*(b+2*x0)/Q)*np.arctan(Q/(2*x+b)))
return eps
def vwn_eps0(x,a,x0,b,c):
def X(x): return x*x+b*x+c
Q = np.sqrt(4*c-b*b)
eps = a*(np.log(x*x/X(x)) + (2*b/Q)*np.arctan(Q/(2*x+b))
- (b*x0/X(x0))*np.log(np.power(x-x0,2)/X(x))
+ (2*(b+2*x0)/Q)*np.arctan(Q/(2*x+b)))
return eps
def vwn_depsp(x): return vwn_deps(x,0.0310907,-0.10498,3.72744,12.9352)
#def vwn_depsf(x): return vwn_deps(x,0.01554535,-0.32500,7.06042,13.0045)
def vwn_depsf(x): return vwn_deps(x,0.01554535,-0.32500,7.06042,18.0578)
def vwn_deps(x,a,x0,b,c):
q = np.sqrt(4*c-b*b)
deps = a*(2/x - (2*x+b)/vwn_xx(x,b,c)
- 4*b/(np.power(2*x+b,2)+q*q) - (b*x0/vwn_xx(x0,b,c))
* (2/(x-x0)-(2*x+b)/vwn_xx(x,b,c)-4*(2*x0+b)/(np.power(2*x+b,2)+q*q)))
return deps
def vwn_g(z): return 1.125*(np.power(1+z,4./3.)+np.power(1-z,4./3.)-2)
def vwn_dg(z): return 1.5*(np.power(1+z,1./3.)-np.power(1-z,1./3.))
def b88_g(x,b=0.0042):
return -1.5*np.power(3./4./np.pi,1./3.)-b*x*x/(1.+6.*b*x*np.arcsinh(x))
def b88_dg(x,b=0.0042):
num = 6*b*b*x*x*(x/np.sqrt(x*x+1)-np.arcsinh(x))-2*b*x
denom = np.power(1+6*b*x*np.arcsinh(x),2)
return num/denom
def cpbe_lsd(rhoa,rhob):
# Not quite VWN. AEM: It's usually called PW correlation
# LSD terms
# Note that this routine gives out ec, not fc.
# If you rather have fc, use pw instead
rho = rhoa+rhob
Rs = np.power(3./(4.*np.pi*rho),1./3.)
Zeta = (rhoa-rhob)/rho
thrd = 1./3. # thrd*=various multiples of 1/3
thrd4 = 4*thrd
ggam=0.5198420997897463295344212145565 # gam= 2^(4/3)-2
fzz=8./(9.*ggam) # fzz=f''(0)= 8/(9*gam)
rtrs = np.sqrt(Rs)
eu,eurs = pbe_gcor(0.0310907,0.21370,7.5957,
3.5876,1.6382,0.49294,rtrs)
ep,eprs = pbe_gcor(0.01554535,0.20548,14.1189,
6.1977,3.3662,0.62517,rtrs)
alfm,alfrsm = pbe_gcor(0.0168869,0.11125,10.357,
3.6231,0.88026,0.49671,rtrs)
alfc = -alfm
z4 = Zeta**4
f=(np.power(1.+Zeta,thrd4)+np.power(1.-Zeta,thrd4)-2.)/ggam
eps = eu*(1.-f*z4)+ep*f*z4-alfm*f*(1.-z4)/fzz
ecrs = eurs*(1.-f*z4)+eprs*f*z4-alfrsm*f*(1.-z4)/fzz
fz = thrd4*(np.power(1.+Zeta,thrd)-np.power(1.-Zeta,thrd))/ggam
eczet = 4.*(Zeta**3)*f*(ep-eu+alfm/fzz)+fz*(z4*ep-z4*eu-(1.-z4)*alfm/fzz)
comm = eps -Rs*ecrs/3.-Zeta*eczet
vca = comm + eczet
vcb = comm - eczet
return eps,vca,vcb
def pbe_gcor(a,a1,b1,b2,b3,b4,rtrs):
# subroutine gcor2(a,a1,b1,b2,b3,b4,rtrs,gg,ggrs)
# slimmed down version of gcor used in pw91 routines, to interpolate
# lsd correlation energy, as given by (10) of
# j. p. perdew and y. wang, phys. rev. b {\bf 45}, 13244 (1992).
# k. burke, may 11, 1996.
# implicit real*8 (a-h,o-z)
q0 = -2.*a*(1.+a1*rtrs*rtrs)
q1 = 2.*a*rtrs*(b1+rtrs*(b2+rtrs*(b3+b4*rtrs)))
q2 = np.log(1.+1./q1)
gg = q0*q2
q3 = a*(b1/rtrs+2.*b2+rtrs*(3.*b3+4.*b4*rtrs))
ggrs = -2.*a*a1*q2-q0*q3/(q1*(1.+q1))
return gg,ggrs
if __name__ == '__main__':
import pylab as pyl
x = np.linspace(1e-12,1)
pyl.plot(x,vwn_eps(x,0.0310907,-0.10498,3.72744,12.9352))
pyl.plot(x,vwn_eps0(x,0.0310907,-0.10498,3.72744,12.9352))
pyl.show()
|
|
r"""OS routines for NT or Posix depending on what system we're on.
This exports:
- all functions from posix or nt, e.g. unlink, stat, etc.
- os.path is either posixpath or ntpath
- os.name is either 'posix' or 'nt'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator (always '.')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import abc
import sys, errno
import stat as st
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR",
"SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen",
"popen", "extsep"]
def _exists(name):
return name in globals()
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
# Any new dependencies of the os module and/or changes in path separator
# requires updating importlib as well.
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
__all__.append('_exit')
except ImportError:
pass
import posixpath as path
try:
from posix import _have_functions
except ImportError:
pass
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
__all__.append('_exit')
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
try:
from nt import _have_functions
except ImportError:
pass
else:
raise ImportError('no os specific module found')
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
if _exists("_have_functions"):
_globals = globals()
def _add(str, fn):
if (fn in _globals) and (str in _have_functions):
_set.add(_globals[fn])
_set = set()
_add("HAVE_FACCESSAT", "access")
_add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_FUTIMESAT", "utime")
_add("HAVE_LINKAT", "link")
_add("HAVE_MKDIRAT", "mkdir")
_add("HAVE_MKFIFOAT", "mkfifo")
_add("HAVE_MKNODAT", "mknod")
_add("HAVE_OPENAT", "open")
_add("HAVE_READLINKAT", "readlink")
_add("HAVE_RENAMEAT", "rename")
_add("HAVE_SYMLINKAT", "symlink")
_add("HAVE_UNLINKAT", "unlink")
_add("HAVE_UNLINKAT", "rmdir")
_add("HAVE_UTIMENSAT", "utime")
supports_dir_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
supports_effective_ids = _set
_set = set()
_add("HAVE_FCHDIR", "chdir")
_add("HAVE_FCHMOD", "chmod")
_add("HAVE_FCHOWN", "chown")
_add("HAVE_FDOPENDIR", "listdir")
_add("HAVE_FEXECVE", "execve")
_set.add(stat) # fstat always works
_add("HAVE_FTRUNCATE", "truncate")
_add("HAVE_FUTIMENS", "utime")
_add("HAVE_FUTIMES", "utime")
_add("HAVE_FPATHCONF", "pathconf")
if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3
_add("HAVE_FSTATVFS", "statvfs")
supports_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
# Some platforms don't support lchmod(). Often the function exists
# anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP.
# (No, I don't know why that's a good design.) ./configure will detect
# this and reject it--so HAVE_LCHMOD still won't be defined on such
# platforms. This is Very Helpful.
#
# However, sometimes platforms without a working lchmod() *do* have
# fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15,
# OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes
# it behave like lchmod(). So in theory it would be a suitable
# replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s
# flag doesn't work *either*. Sadly ./configure isn't sophisticated
# enough to detect this condition--it only determines whether or not
# fchmodat() minimally works.
#
# Therefore we simply ignore fchmodat() when deciding whether or not
# os.chmod supports follow_symlinks. Just checking lchmod() is
# sufficient. After all--if you have a working fchmodat(), your
# lchmod() almost certainly works too.
#
# _add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_LCHFLAGS", "chflags")
_add("HAVE_LCHMOD", "chmod")
if _exists("lchown"): # mac os x10.3
_add("HAVE_LCHOWN", "chown")
_add("HAVE_LINKAT", "link")
_add("HAVE_LUTIMES", "utime")
_add("HAVE_LSTAT", "stat")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_UTIMENSAT", "utime")
_add("MS_WINDOWS", "stat")
supports_follow_symlinks = _set
del _set
del _have_functions
del _globals
del _add
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
# Other possible SEEK values are directly imported from posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works like
mkdir, except that any intermediate path segment (not just the rightmost)
will be created if it does not exist. If the target directory already
exists, raise an OSError if exist_ok is False. Otherwise no exception is
raised. This is recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode, exist_ok)
except FileExistsError:
# Defeats race condition when another thread created the path
pass
cdir = curdir
if isinstance(tail, bytes):
cdir = bytes(curdir, 'ASCII')
if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
mkdir(name, mode)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if not exist_ok or not path.isdir(name):
raise
def removedirs(name):
"""removedirs(name)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except OSError:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except OSError:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune the
search, or to impose a specific order of visiting. Modifying dirnames when
topdown is false is ineffective, since the directories in dirnames have
already been generated by the time dirnames itself is generated. No matter
the value of topdown, the list of subdirectories is retrieved before the
tuples for the directory and its subdirectories are generated.
By default errors from the os.scandir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an OSError instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([getsize(join(root, name)) for name in files]), end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
top = fspath(top)
dirs = []
nondirs = []
walk_dirs = []
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
if name == 'nt' and isinstance(top, bytes):
scandir_it = _dummy_scandir(top)
else:
# Note that scandir is global in this module due
# to earlier import-*.
scandir_it = scandir(top)
except OSError as error:
if onerror is not None:
onerror(error)
return
with scandir_it:
while True:
try:
try:
entry = next(scandir_it)
except StopIteration:
break
except OSError as error:
if onerror is not None:
onerror(error)
return
try:
is_dir = entry.is_dir()
except OSError:
# If is_dir() raises an OSError, consider that the entry is not
# a directory, same behaviour than os.path.isdir().
is_dir = False
if is_dir:
dirs.append(entry.name)
else:
nondirs.append(entry.name)
if not topdown and is_dir:
# Bottom-up: recurse into sub-directory, but exclude symlinks to
# directories if followlinks is False
if followlinks:
walk_into = True
else:
try:
is_symlink = entry.is_symlink()
except OSError:
# If is_symlink() raises an OSError, consider that the
# entry is not a symbolic link, same behaviour than
# os.path.islink().
is_symlink = False
walk_into = not is_symlink
if walk_into:
walk_dirs.append(entry.path)
# Yield before recursion if going top down
if topdown:
yield top, dirs, nondirs
# Recurse into sub-directories
islink, join = path.islink, path.join
for dirname in dirs:
new_path = join(top, dirname)
# Issue #23605: os.path.islink() is used instead of caching
# entry.is_symlink() result during the loop on os.scandir() because
# the caller can replace the directory entry during the "yield"
# above.
if followlinks or not islink(new_path):
yield from walk(new_path, topdown, onerror, followlinks)
else:
# Recurse into sub-directories
for new_path in walk_dirs:
yield from walk(new_path, topdown, onerror, followlinks)
# Yield after recursion if going bottom up
yield top, dirs, nondirs
class _DummyDirEntry:
"""Dummy implementation of DirEntry
Only used internally by os.walk(bytes). Since os.walk() doesn't need the
follow_symlinks parameter: don't implement it, always follow symbolic
links.
"""
def __init__(self, dir, name):
self.name = name
self.path = path.join(dir, name)
# Mimick FindFirstFile/FindNextFile: we should get file attributes
# while iterating on a directory
self._stat = None
self._lstat = None
try:
self.stat(follow_symlinks=False)
except OSError:
pass
def stat(self, *, follow_symlinks=True):
if follow_symlinks:
if self._stat is None:
self._stat = stat(self.path)
return self._stat
else:
if self._lstat is None:
self._lstat = stat(self.path, follow_symlinks=False)
return self._lstat
def is_dir(self):
if self._lstat is not None and not self.is_symlink():
# use the cache lstat
stat = self.stat(follow_symlinks=False)
return st.S_ISDIR(stat.st_mode)
stat = self.stat()
return st.S_ISDIR(stat.st_mode)
def is_symlink(self):
stat = self.stat(follow_symlinks=False)
return st.S_ISLNK(stat.st_mode)
class _dummy_scandir:
# listdir-based implementation for bytes patches on Windows
def __init__(self, dir):
self.dir = dir
self.it = iter(listdir(dir))
def __iter__(self):
return self
def __next__(self):
return _DummyDirEntry(self.dir, next(self.it))
def __enter__(self):
return self
def __exit__(self, *args):
self.it = iter(())
__all__.append("walk")
if {open, stat} <= supports_dir_fd and {listdir, stat} <= supports_fd:
def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None):
"""Directory tree generator.
This behaves exactly like walk(), except that it yields a 4-tuple
dirpath, dirnames, filenames, dirfd
`dirpath`, `dirnames` and `filenames` are identical to walk() output,
and `dirfd` is a file descriptor referring to the directory `dirpath`.
The advantage of fwalk() over walk() is that it's safe against symlink
races (when follow_symlinks is False).
If dir_fd is not None, it should be a file descriptor open to a directory,
and top should be relative; top will then be relative to that directory.
(dir_fd is always supported for fwalk.)
Caution:
Since fwalk() yields file descriptors, those are only valid until the
next iteration step, so you should dup() them if you want to keep them
for a longer period.
Example:
import os
for root, dirs, files, rootfd in os.fwalk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]),
end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
if not isinstance(top, int) or not hasattr(top, '__index__'):
top = fspath(top)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd)
topfd = open(top, O_RDONLY, dir_fd=dir_fd)
try:
if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and
path.samestat(orig_st, stat(topfd)))):
yield from _fwalk(topfd, top, topdown, onerror, follow_symlinks)
finally:
close(topfd)
def _fwalk(topfd, toppath, topdown, onerror, follow_symlinks):
# Note: This uses O(depth of the directory tree) file descriptors: if
# necessary, it can be adapted to only require O(1) FDs, see issue
# #13734.
names = listdir(topfd)
dirs, nondirs = [], []
for name in names:
try:
# Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with
# walk() which reports symlinks to directories as directories.
# We do however check for symlinks before recursing into
# a subdirectory.
if st.S_ISDIR(stat(name, dir_fd=topfd).st_mode):
dirs.append(name)
else:
nondirs.append(name)
except FileNotFoundError:
try:
# Add dangling symlinks, ignore disappeared files
if st.S_ISLNK(stat(name, dir_fd=topfd, follow_symlinks=False)
.st_mode):
nondirs.append(name)
except FileNotFoundError:
continue
if topdown:
yield toppath, dirs, nondirs, topfd
for name in dirs:
try:
orig_st = stat(name, dir_fd=topfd, follow_symlinks=follow_symlinks)
dirfd = open(name, O_RDONLY, dir_fd=topfd)
except OSError as err:
if onerror is not None:
onerror(err)
continue
try:
if follow_symlinks or path.samestat(orig_st, stat(dirfd)):
dirpath = path.join(toppath, name)
yield from _fwalk(dirfd, dirpath, topdown, onerror, follow_symlinks)
finally:
close(dirfd)
if not topdown:
yield toppath, dirs, nondirs, topfd
__all__.append("fwalk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
exec_func = execve
argrest = (args, env)
else:
exec_func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
exec_func(file, *argrest)
return
last_exc = saved_exc = None
saved_tb = None
path_list = get_exec_path(env)
if name != 'nt':
file = fsencode(file)
path_list = map(fsencode, path_list)
for dir in path_list:
fullname = path.join(dir, file)
try:
exec_func(fullname, *argrest)
except OSError as e:
last_exc = e
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise saved_exc.with_traceback(saved_tb)
raise last_exc.with_traceback(tb)
def get_exec_path(env=None):
"""Returns the sequence of directories that will be searched for the
named executable (similar to a shell) when launching a process.
*env* must be an environment variable dict or None. If *env* is None,
os.environ will be used.
"""
# Use a local import instead of a global import to limit the number of
# modules loaded at startup: the os module is always loaded at startup by
# Python. It may also avoid a bootstrap issue.
import warnings
if env is None:
env = environ
# {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a
# BytesWarning when using python -b or python -bb: ignore the warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
try:
path_list = env.get('PATH')
except TypeError:
path_list = None
if supports_bytes_environ:
try:
path_listb = env[b'PATH']
except (KeyError, TypeError):
pass
else:
if path_list is not None:
raise ValueError(
"env cannot contain 'PATH' and b'PATH' keys")
path_list = path_listb
if path_list is not None and isinstance(path_list, bytes):
path_list = fsdecode(path_list)
if path_list is None:
path_list = defpath
return path_list.split(pathsep)
# Change environ to automatically call putenv(), unsetenv if they exist.
from _collections_abc import MutableMapping
class _Environ(MutableMapping):
def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv):
self.encodekey = encodekey
self.decodekey = decodekey
self.encodevalue = encodevalue
self.decodevalue = decodevalue
self.putenv = putenv
self.unsetenv = unsetenv
self._data = data
def __getitem__(self, key):
try:
value = self._data[self.encodekey(key)]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
return self.decodevalue(value)
def __setitem__(self, key, value):
key = self.encodekey(key)
value = self.encodevalue(value)
self.putenv(key, value)
self._data[key] = value
def __delitem__(self, key):
encodedkey = self.encodekey(key)
self.unsetenv(encodedkey)
try:
del self._data[encodedkey]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
def __iter__(self):
for key in self._data:
yield self.decodekey(key)
def __len__(self):
return len(self._data)
def __repr__(self):
return 'environ({{{}}})'.format(', '.join(
('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value))
for key, value in self._data.items())))
def copy(self):
return dict(self)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
try:
_putenv = putenv
except NameError:
_putenv = lambda key, value: None
else:
if "putenv" not in __all__:
__all__.append("putenv")
try:
_unsetenv = unsetenv
except NameError:
_unsetenv = lambda key: _putenv(key, "")
else:
if "unsetenv" not in __all__:
__all__.append("unsetenv")
def _createenviron():
if name == 'nt':
# Where Env Var Names Must Be UPPERCASE
def check_str(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value
encode = check_str
decode = str
def encodekey(key):
return encode(key).upper()
data = {}
for key, value in environ.items():
data[encodekey(key)] = value
else:
# Where Env Var Names Can Be Mixed Case
encoding = sys.getfilesystemencoding()
def encode(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value.encode(encoding, 'surrogateescape')
def decode(value):
return value.decode(encoding, 'surrogateescape')
encodekey = encode
data = environ
return _Environ(data,
encodekey, decode,
encode, decode,
_putenv, _unsetenv)
# unicode environ
environ = _createenviron()
del _createenviron
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return environ.get(key, default)
supports_bytes_environ = (name != 'nt')
__all__.extend(("getenv", "supports_bytes_environ"))
if supports_bytes_environ:
def _check_bytes(value):
if not isinstance(value, bytes):
raise TypeError("bytes expected, not %s" % type(value).__name__)
return value
# bytes environ
environb = _Environ(environ._data,
_check_bytes, bytes,
_check_bytes, bytes,
_putenv, _unsetenv)
del _check_bytes
def getenvb(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are bytes."""
return environb.get(key, default)
__all__.extend(("environb", "getenvb"))
def _fscodec():
encoding = sys.getfilesystemencoding()
errors = sys.getfilesystemencodeerrors()
def fsencode(filename):
"""Encode filename (an os.PathLike, bytes, or str) to the filesystem
encoding with 'surrogateescape' error handler, return bytes unchanged.
On Windows, use 'strict' error handler if the file system encoding is
'mbcs' (which is the default encoding).
"""
filename = fspath(filename) # Does type-checking of `filename`.
if isinstance(filename, str):
return filename.encode(encoding, errors)
else:
return filename
def fsdecode(filename):
"""Decode filename (an os.PathLike, bytes, or str) from the filesystem
encoding with 'surrogateescape' error handler, return str unchanged. On
Windows, use 'strict' error handler if the file system encoding is
'mbcs' (which is the default encoding).
"""
filename = fspath(filename) # Does type-checking of `filename`.
if isinstance(filename, bytes):
return filename.decode(encoding, errors)
else:
return filename
return fsencode, fsdecode
fsencode, fsdecode = _fscodec()
del _fscodec
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
__all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"])
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise OSError("Not stopped, signaled or exited???")
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
__all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"])
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnl", "spawnle"])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnlp", "spawnlpe"])
# Supply os.popen()
def popen(cmd, mode="r", buffering=-1):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
if mode not in ("r", "w"):
raise ValueError("invalid mode %r" % mode)
if buffering == 0 or buffering is None:
raise ValueError("popen() does not support unbuffered streams")
import subprocess, io
if mode == "r":
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdout), proc)
else:
proc = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdin), proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close:
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if name == 'nt':
return returncode
else:
return returncode << 8 # Shift left to match old behavior
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
# Supply os.fdopen()
def fdopen(fd, *args, **kwargs):
if not isinstance(fd, int):
raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
import io
return io.open(fd, *args, **kwargs)
# For testing purposes, make sure the function is available when the C
# implementation exists.
def _fspath(path):
"""Return the path representation of a path-like object.
If str or bytes is passed in, it is returned unchanged. Otherwise the
os.PathLike interface is used to get the path representation. If the
path representation is not str or bytes, TypeError is raised. If the
provided path is not str, bytes, or os.PathLike, TypeError is raised.
"""
if isinstance(path, (str, bytes)):
return path
# Work from the object's type to match method resolution of other magic
# methods.
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
if hasattr(path_type, '__fspath__'):
raise
else:
raise TypeError("expected str, bytes or os.PathLike object, "
"not " + path_type.__name__)
if isinstance(path_repr, (str, bytes)):
return path_repr
else:
raise TypeError("expected {}.__fspath__() to return str or bytes, "
"not {}".format(path_type.__name__,
type(path_repr).__name__))
# If there is no C implementation, make the pure Python version the
# implementation as transparently as possible.
if not _exists('fspath'):
fspath = _fspath
fspath.__name__ = "fspath"
class PathLike(abc.ABC):
"""Abstract base class for implementing the file system path protocol."""
@abc.abstractmethod
def __fspath__(self):
"""Return the file system path representation of the object."""
raise NotImplementedError
@classmethod
def __subclasshook__(cls, subclass):
return hasattr(subclass, '__fspath__')
|
|
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import glob
import optparse
import re
import subprocess
import sys
import test_format
NCVAL_VERDICT = {
'*** <input> is safe ***': True,
'*** <input> IS UNSAFE ***': False}
ValidatorResult = collections.namedtuple('ValidatorResult', 'verdict offsets')
def ParseNval(nval_content):
"""Parse content of @nval section.
Args:
nval_content: Content of @nval section (as produced by 32-bit ncval
with options --max_errors=-1 --detailed=false --cpuid-all).
Returns:
ValidatorResult
"""
lines = nval_content.split('\n')
last_line = lines.pop()
assert last_line == ''
verdict = NCVAL_VERDICT[lines.pop()]
offsets = set()
for line in lines:
if re.match(r'.+ > .+ \(read overflow of .+ bytes\)', line):
# Add to offsets something that designedly can't occur in offsets
# produced by ParseRdfaOutput, so that difference will show up and
# corresponding test won't be missed.
offsets.add('read overflow')
continue
if line == 'ErrorSegmentation':
# Same here.
offsets.add('ErrorSegmentation')
continue
# Parse error message of the form
# VALIDATOR: 4: Bad prefix usage
m = re.match(r'VALIDATOR: ([0-9a-f]+): (.*)$', line, re.IGNORECASE)
assert m is not None, "can't parse %r" % line
offset = int(m.group(1), 16)
offsets.add(offset)
return ValidatorResult(verdict=verdict, offsets=offsets)
def ParseRval(rval_content):
"""Parse content of @rval section.
Args:
nval_content: Content of @rval section (as produced by 64-bit ncval
with options --max_errors=-1 --readwrite_sfi --detailed=false
--annotate=false --cpuid-all).
Returns:
ValidatorResult
"""
lines = rval_content.split('\n')
last_line = lines.pop()
assert last_line == ''
verdict = NCVAL_VERDICT[lines.pop()]
offsets = set()
for prev_line, line in zip([None] + lines, lines):
if line.startswith('VALIDATOR: Checking jump targets:'):
continue
if line.startswith('VALIDATOR: Checking that basic blocks are aligned'):
continue
# Skip disassembler output of the form
# VALIDATOR: 0000000000000003: 49 89 14 07 mov [%r15+%rax*1], %rdx
m = re.match(r'VALIDATOR: ([0-9a-f]+):', line, re.IGNORECASE)
if m is not None:
continue
# Parse error message of the form
# VALIDATOR: ERROR: 20: Bad basic block alignment.
m = re.match(r'VALIDATOR: ERROR: ([0-9a-f]+): (.*)', line, re.IGNORECASE)
if m is not None:
offset = int(m.group(1), 16)
offsets.add(offset)
continue
# Parse two-line error messages of the form
# VALIDATOR: 0000000000000003: 49 89 14 07 mov [%r15+%rax*1], %rdx
# VALIDATOR: ERROR: Invalid index register in memory offset
m = re.match(r'VALIDATOR: (ERROR|WARNING): .*$', line, re.IGNORECASE)
if m is not None:
message_type = m.group(1)
assert prev_line is not None, (
"can't deduce error offset because line %r "
"is not preceded with disassembly" % line)
m2 = re.match(r'VALIDATOR: ([0-9a-f]+):', prev_line, re.IGNORECASE)
assert m2 is not None, "can't parse line %r preceding line %r" % (
prev_line,
line)
offset = int(m2.group(1), 16)
if message_type != 'WARNING':
offsets.add(offset)
continue
raise AssertionError("can't parse line %r" % line)
return ValidatorResult(verdict=verdict, offsets=offsets)
RDFA_VERDICT = {
'return code: 0': True,
'return code: 1': False}
def ParseRdfaOutput(rdfa_content):
"""Parse content of @rdfa_output section.
Args:
rdfa_content: Content of @rdfa_output section in .test file.
Returns:
ValidatorResult
"""
lines = rdfa_content.split('\n')
assert lines[-1] == ''
verdict = RDFA_VERDICT[lines[-2]]
offsets = set()
for line in lines[:-2]:
# Parse error message of the form
# 4: [1] DFA error in validator
m = re.match(r'([0-9a-f]+): \[\d+\] (.*)$', line, re.IGNORECASE)
assert m is not None, "can't parse %r" % line
offset = int(m.group(1), 16)
offsets.add(offset)
return ValidatorResult(verdict=verdict, offsets=offsets)
def Compare(options, items_list, stats):
val_field = {32: 'nval', 64: 'rval'}[options.bits]
val_parser = {32: ParseNval, 64: ParseRval}[options.bits]
info = dict(items_list)
if 'rdfa_output' not in info:
if val_field in info:
print ' rdfa_output section is missing'
stats['rdfa missing'] +=1
else:
print ' both sections are missing'
stats['both missing'] += 1
return items_list
if val_field not in info:
print ' @%s section is missing' % val_field
stats['val missing'] += 1
return items_list
val = val_parser(info[val_field])
rdfa = ParseRdfaOutput(info['rdfa_output'])
if val == rdfa:
stats['agree'] += 1
if options.git:
# Stage the file for commit, so that files that pass can be
# committed with "git commit" (without -a or other arguments).
subprocess.check_call(['git', 'add', test_filename])
if 'validators_disagree' in info:
stats['spurious @validators_disagree'] += 1
print (' warning: validators agree, but the section '
'"@validators_disagree" is present')
else:
stats['disagree'] += 1
if 'validators_disagree' in info:
print ' validators disagree, see @validators_disagree section'
else:
print ' validators disagree!'
stats['unexplained disagreements'] += 1
diff = ['TODO: explain this\n']
if val.verdict != rdfa.verdict:
diff.append('old validator verdict: %s\n' % val.verdict)
diff.append('rdfa validator verdict: %s\n' % rdfa.verdict)
set_diff = val.offsets - rdfa.offsets
if len(set_diff) > 0:
diff.append('errors reported by old validator but not by rdfa one:\n')
for offset in sorted(set_diff):
if isinstance(offset, int):
offset = hex(offset)
diff.append(' %s\n' % offset)
set_diff = rdfa.offsets - val.offsets
if len(set_diff) > 0:
diff.append('errors reported by rdfa validator but not by old one:\n')
for offset in sorted(set_diff):
if isinstance(offset, int):
offset = hex(offset)
diff.append(' %s\n' % offset)
items_list = items_list + [('validators_disagree', ''.join(diff))]
return items_list
def main(args):
parser = optparse.OptionParser()
parser.add_option('--bits',
type=int,
help='The subarchitecture to run tests against: 32 or 64')
parser.add_option('--update',
default=False,
action='store_true',
help='When validators disagree, fill in '
'@validators_disagree section (if not present)')
# TODO(shcherbina): Get rid of this option once most tests are committed.
parser.add_option('--git',
default=False,
action='store_true',
help='Add tests with no discrepancies to git index')
options, args = parser.parse_args(args)
if options.bits not in [32, 64]:
parser.error('specify --bits 32 or --bits 64')
if len(args) == 0:
parser.error('No test files specified')
stats = collections.defaultdict(int)
for glob_expr in args:
test_files = sorted(glob.glob(glob_expr))
if len(test_files) == 0:
raise AssertionError(
'%r matched no files, which was probably not intended' % glob_expr)
for test_file in test_files:
print 'Comparing', test_file
tests = test_format.LoadTestFile(test_file)
tests = [Compare(options, test, stats) for test in tests]
if options.update:
test_format.SaveTestFile(tests, test_file)
print 'Stats:'
for key, value in stats.items():
print ' %s: %d' %(key, value)
if options.update:
if stats['unexplained disagreements'] > 0:
print '@validators_disagree sections were created'
else:
if stats['unexplained disagreements'] > 0:
sys.exit(1)
if __name__ == '__main__':
main(sys.argv[1:])
|
|
##
# Copyright (c) 2005-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twisted.internet.defer import inlineCallbacks
from twistedcaldav.config import config
from twistedcaldav.directory import calendaruserproxy
from twistedcaldav.directory.calendaruserproxy import ProxySqliteDB, \
ProxyPostgreSQLDB
from twistedcaldav.directory.calendaruserproxyloader import XMLCalendarUserProxyLoader
import twistedcaldav.test.util
import os
class ProxyPrincipalDBSqlite (twistedcaldav.test.util.TestCase):
"""
Directory service provisioned principals.
"""
class old_ProxyDB(ProxySqliteDB):
def _db_version(self):
"""
@return: the schema version assigned to this index.
"""
return "3"
def _db_init_data_tables(self):
"""
Initialise the underlying database tables.
@param q: a database cursor to use.
"""
#
# GROUPS table
#
return self._db_execute(
"""
create table GROUPS (
GROUPNAME text,
MEMBER text
)
"""
)
class new_ProxyDB(ProxySqliteDB):
def _db_version(self):
"""
@return: the schema version assigned to this index.
"""
return "11"
class newer_ProxyDB(ProxySqliteDB):
def _db_version(self):
"""
@return: the schema version assigned to this index.
"""
return "51"
@inlineCallbacks
def test_normalDB(self):
# Get the DB
db_path = os.path.abspath(self.mktemp())
db = ProxySqliteDB(db_path)
yield db.setGroupMembers("A", ("B", "C", "D",))
membersA = yield db.getMembers("A")
membershipsB = yield db.getMemberships("B")
self.assertEqual(membersA, set(("B", "C", "D",)))
self.assertEqual(membershipsB, set(("A",)))
@inlineCallbacks
def test_normalDBNonAscii(self):
# Get the DB
db_path = os.path.abspath(self.mktemp())
db = ProxySqliteDB(db_path)
principalID = "Test \xe4\xbd\x90\xe8\x97\xa4"
yield db.setGroupMembers(principalID, ("B", "C", "D",))
membersA = yield db.getMembers(principalID)
membershipsB = yield db.getMemberships("B")
self.assertEqual(membersA, set(("B", "C", "D",)))
self.assertEqual(membershipsB, set((principalID,)))
@inlineCallbacks
def test_DBIndexed(self):
# Get the DB
db_path = os.path.abspath(self.mktemp())
db = ProxySqliteDB(db_path)
self.assertEqual(set([row[1] for row in (yield db.query("PRAGMA index_list(GROUPS)"))]), set(("GROUPNAMES", "MEMBERS")))
@inlineCallbacks
def test_OldDB(self):
# Get the DB
db_path = os.path.abspath(self.mktemp())
db = self.old_ProxyDB(db_path)
self.assertEqual(set([row[1] for row in (yield db.query("PRAGMA index_list(GROUPS)"))]), set())
@inlineCallbacks
def test_DBUpgrade(self):
# Get the DB
db_path = os.path.abspath(self.mktemp())
db = self.old_ProxyDB(db_path)
yield db.setGroupMembers("A", ("B", "C", "D",))
membersA = yield db.getMembers("A")
membershipsB = yield db.getMemberships("B")
self.assertEqual(membersA, set(("B", "C", "D",)))
self.assertEqual(membershipsB, set(("A",)))
self.assertEqual(set([row[1] for row in (yield db.query("PRAGMA index_list(GROUPS)"))]), set())
db.close()
db = None
db = ProxySqliteDB(db_path)
membersA = yield db.getMembers("A")
membershipsB = yield db.getMemberships("B")
self.assertEqual(membersA, set(("B", "C", "D",)))
self.assertEqual(membershipsB, set(("A",)))
self.assertEqual(set([row[1] for row in (yield db.query("PRAGMA index_list(GROUPS)"))]), set(("GROUPNAMES", "MEMBERS")))
db.close()
db = None
@inlineCallbacks
def test_DBUpgradeNewer(self):
# Get the DB
db_path = os.path.abspath(self.mktemp())
db = self.old_ProxyDB(db_path)
yield db.setGroupMembers("A", ("B", "C", "D",))
membersA = yield db.getMembers("A")
membershipsB = yield db.getMemberships("B")
self.assertEqual(membersA, set(("B", "C", "D",)))
self.assertEqual(membershipsB, set(("A",)))
self.assertEqual(set([row[1] for row in (yield db.query("PRAGMA index_list(GROUPS)"))]), set())
db.close()
db = None
db = self.new_ProxyDB(db_path)
membersA = yield db.getMembers("A")
membershipsB = yield db.getMemberships("B")
self.assertEqual(membersA, set(("B", "C", "D",)))
self.assertEqual(membershipsB, set(("A",)))
self.assertEqual(set([row[1] for row in (yield db.query("PRAGMA index_list(GROUPS)"))]), set(("GROUPNAMES", "MEMBERS")))
db.close()
db = None
@inlineCallbacks
def test_DBNoUpgradeNewer(self):
# Get the DB
db_path = os.path.abspath(self.mktemp())
db = self.new_ProxyDB(db_path)
yield db.setGroupMembers("A", ("B", "C", "D",))
membersA = yield db.getMembers("A")
membershipsB = yield db.getMemberships("B")
self.assertEqual(membersA, set(("B", "C", "D",)))
self.assertEqual(membershipsB, set(("A",)))
self.assertEqual(set([row[1] for row in (yield db.query("PRAGMA index_list(GROUPS)"))]), set(("GROUPNAMES", "MEMBERS")))
db.close()
db = None
db = self.newer_ProxyDB(db_path)
membersA = yield db.getMembers("A")
membershipsB = yield db.getMemberships("B")
self.assertEqual(membersA, set(("B", "C", "D",)))
self.assertEqual(membershipsB, set(("A",)))
self.assertEqual(set([row[1] for row in (yield db.query("PRAGMA index_list(GROUPS)"))]), set(("GROUPNAMES", "MEMBERS")))
db.close()
db = None
@inlineCallbacks
def test_cachingDBInsert(self):
for processType in ("Single", "Combined",):
config.ProcessType = processType
# Get the DB
db_path = os.path.abspath(self.mktemp())
db = ProxySqliteDB(db_path)
# Do one insert and check the result
yield db.setGroupMembers("A", ("B", "C", "D",))
membersA = yield db.getMembers("A")
membershipsB = yield db.getMemberships("B")
membershipsC = yield db.getMemberships("C")
membershipsD = yield db.getMemberships("D")
membershipsE = yield db.getMemberships("E")
self.assertEqual(membersA, set(("B", "C", "D",)))
self.assertEqual(membershipsB, set(("A",)))
self.assertEqual(membershipsC, set(("A",)))
self.assertEqual(membershipsD, set(("A",)))
self.assertEqual(membershipsE, set(()))
# Change and check the result
yield db.setGroupMembers("A", ("B", "C", "E",))
membersA = yield db.getMembers("A")
membershipsB = yield db.getMemberships("B")
membershipsC = yield db.getMemberships("C")
membershipsD = yield db.getMemberships("D")
membershipsE = yield db.getMemberships("E")
self.assertEqual(membersA, set(("B", "C", "E",)))
self.assertEqual(membershipsB, set(("A",)))
self.assertEqual(membershipsC, set(("A",)))
self.assertEqual(membershipsD, set())
self.assertEqual(membershipsE, set(("A",)))
yield db.clean()
@inlineCallbacks
def test_cachingDBRemove(self):
for processType in ("Single", "Combined",):
config.ProcessType = processType
# Get the DB
db_path = os.path.abspath(self.mktemp())
db = ProxySqliteDB(db_path)
# Do one insert and check the result
yield db.setGroupMembers("A", ("B", "C", "D",))
yield db.setGroupMembers("X", ("B", "C",))
membersA = yield db.getMembers("A")
membersX = yield db.getMembers("X")
membershipsB = yield db.getMemberships("B")
membershipsC = yield db.getMemberships("C")
membershipsD = yield db.getMemberships("D")
self.assertEqual(membersA, set(("B", "C", "D",)))
self.assertEqual(membersX, set(("B", "C",)))
self.assertEqual(membershipsB, set(("A", "X",)))
self.assertEqual(membershipsC, set(("A", "X",)))
self.assertEqual(membershipsD, set(("A",)))
# Remove and check the result
yield db.removeGroup("A")
membersA = yield db.getMembers("A")
membersX = yield db.getMembers("X")
membershipsB = yield db.getMemberships("B")
membershipsC = yield db.getMemberships("C")
membershipsD = yield db.getMemberships("D")
self.assertEqual(membersA, set())
self.assertEqual(membersX, set(("B", "C",)))
self.assertEqual(membershipsB, set("X",))
self.assertEqual(membershipsC, set("X",))
self.assertEqual(membershipsD, set())
yield db.clean()
@inlineCallbacks
def test_cachingDBRemoveSpecial(self):
for processType in ("Single", "Combined",):
config.ProcessType = processType
# Get the DB
db_path = os.path.abspath(self.mktemp())
db = ProxySqliteDB(db_path)
# Do one insert and check the result
yield db.setGroupMembers("A", ("B", "C", "D",))
yield db.setGroupMembers("X", ("B", "C",))
membershipsB = yield db.getMemberships("B")
membershipsC = yield db.getMemberships("C")
membershipsD = yield db.getMemberships("D")
# Remove and check the result
yield db.removeGroup("A")
membersA = yield db.getMembers("A")
membersX = yield db.getMembers("X")
membershipsB = yield db.getMemberships("B")
membershipsC = yield db.getMemberships("C")
membershipsD = yield db.getMemberships("D")
self.assertEqual(membersA, set())
self.assertEqual(membersX, set(("B", "C",)))
self.assertEqual(membershipsB, set("X",))
self.assertEqual(membershipsC, set("X",))
self.assertEqual(membershipsD, set())
yield db.clean()
@inlineCallbacks
def test_cachingDBInsertUncached(self):
for processType in ("Single", "Combined",):
config.ProcessType = processType
# Get the DB
db_path = os.path.abspath(self.mktemp())
db = ProxySqliteDB(db_path)
# Do one insert and check the result for the one we will remove
yield db.setGroupMembers("AA", ("BB", "CC", "DD",))
yield db.getMemberships("DD")
# Change and check the result
yield db.setGroupMembers("AA", ("BB", "CC", "EE",))
membersAA = yield db.getMembers("AA")
membershipsBB = yield db.getMemberships("BB")
membershipsCC = yield db.getMemberships("CC")
membershipsDD = yield db.getMemberships("DD")
membershipsEE = yield db.getMemberships("EE")
self.assertEqual(membersAA, set(("BB", "CC", "EE",)))
self.assertEqual(membershipsBB, set(("AA",)))
self.assertEqual(membershipsCC, set(("AA",)))
self.assertEqual(membershipsDD, set())
self.assertEqual(membershipsEE, set(("AA",)))
yield db.clean()
class ProxyPrincipalDBPostgreSQL (twistedcaldav.test.util.TestCase):
"""
Directory service provisioned principals.
"""
@inlineCallbacks
def setUp(self):
super(ProxyPrincipalDBPostgreSQL, self).setUp()
self.db = ProxyPostgreSQLDB(host="localhost", database="proxies")
yield self.db.clean()
@inlineCallbacks
def tearDown(self):
yield self.db.close()
self.db = None
@inlineCallbacks
def test_normalDB(self):
# Get the DB
yield self.db.clean()
calendaruserproxy.ProxyDBService = self.db
loader = XMLCalendarUserProxyLoader("/Volumes/Data/Users/cyrusdaboo/Documents/Development/Apple/eclipse/CalendarServer-3/conf/auth/proxies-test.xml")
yield loader.updateProxyDB()
yield self.db.setGroupMembers("A", ("B", "C", "D",))
membersA = yield self.db.getMembers("A")
membershipsB = yield self.db.getMemberships("B")
self.assertEqual(membersA, set(("B", "C", "D",)))
self.assertEqual(membershipsB, set(("A",)))
@inlineCallbacks
def test_DBIndexed(self):
# Get the DB
yield self.db.clean()
self.assertTrue((yield self.db.queryOne("select hasindexes from pg_tables where tablename = 'groups'")))
@inlineCallbacks
def test_cachingDBInsert(self):
for processType in ("Single", "Combined",):
config.ProcessType = processType
# Get the DB
yield self.db.clean()
# Do one insert and check the result
yield self.db.setGroupMembers("A", ("B", "C", "D",))
membersA = yield self.db.getMembers("A")
membershipsB = yield self.db.getMemberships("B")
membershipsC = yield self.db.getMemberships("C")
membershipsD = yield self.db.getMemberships("D")
membershipsE = yield self.db.getMemberships("E")
self.assertEqual(membersA, set(("B", "C", "D",)))
self.assertEqual(membershipsB, set(("A",)))
self.assertEqual(membershipsC, set(("A",)))
self.assertEqual(membershipsD, set(("A",)))
self.assertEqual(membershipsE, set(()))
# Change and check the result
yield self.db.setGroupMembers("A", ("B", "C", "E",))
membersA = yield self.db.getMembers("A")
membershipsB = yield self.db.getMemberships("B")
membershipsC = yield self.db.getMemberships("C")
membershipsD = yield self.db.getMemberships("D")
membershipsE = yield self.db.getMemberships("E")
self.assertEqual(membersA, set(("B", "C", "E",)))
self.assertEqual(membershipsB, set(("A",)))
self.assertEqual(membershipsC, set(("A",)))
self.assertEqual(membershipsD, set())
self.assertEqual(membershipsE, set(("A",)))
@inlineCallbacks
def test_cachingDBRemove(self):
for processType in ("Single", "Combined",):
config.ProcessType = processType
# Get the DB
yield self.db.clean()
# Do one insert and check the result
yield self.db.setGroupMembers("A", ("B", "C", "D",))
yield self.db.setGroupMembers("X", ("B", "C",))
membersA = yield self.db.getMembers("A")
membersX = yield self.db.getMembers("X")
membershipsB = yield self.db.getMemberships("B")
membershipsC = yield self.db.getMemberships("C")
membershipsD = yield self.db.getMemberships("D")
self.assertEqual(membersA, set(("B", "C", "D",)))
self.assertEqual(membersX, set(("B", "C",)))
self.assertEqual(membershipsB, set(("A", "X",)))
self.assertEqual(membershipsC, set(("A", "X",)))
self.assertEqual(membershipsD, set(("A",)))
# Remove and check the result
yield self.db.removeGroup("A")
membersA = yield self.db.getMembers("A")
membersX = yield self.db.getMembers("X")
membershipsB = yield self.db.getMemberships("B")
membershipsC = yield self.db.getMemberships("C")
membershipsD = yield self.db.getMemberships("D")
self.assertEqual(membersA, set())
self.assertEqual(membersX, set(("B", "C",)))
self.assertEqual(membershipsB, set("X",))
self.assertEqual(membershipsC, set("X",))
self.assertEqual(membershipsD, set())
@inlineCallbacks
def test_cachingDBRemoveSpecial(self):
for processType in ("Single", "Combined",):
config.ProcessType = processType
# Get the DB
yield self.db.clean()
# Do one insert and check the result
yield self.db.setGroupMembers("A", ("B", "C", "D",))
yield self.db.setGroupMembers("X", ("B", "C",))
membershipsB = yield self.db.getMemberships("B")
membershipsC = yield self.db.getMemberships("C")
membershipsD = yield self.db.getMemberships("D")
# Remove and check the result
yield self.db.removeGroup("A")
membersA = yield self.db.getMembers("A")
membersX = yield self.db.getMembers("X")
membershipsB = yield self.db.getMemberships("B")
membershipsC = yield self.db.getMemberships("C")
membershipsD = yield self.db.getMemberships("D")
self.assertEqual(membersA, set())
self.assertEqual(membersX, set(("B", "C",)))
self.assertEqual(membershipsB, set("X",))
self.assertEqual(membershipsC, set("X",))
self.assertEqual(membershipsD, set())
@inlineCallbacks
def test_cachingDBRemovePrincipal(self):
for processType in ("Single", "Combined",):
config.ProcessType = processType
# Get the DB
yield self.db.clean()
# Do one insert and check the result
yield self.db.setGroupMembers("A", ("B", "C", "D",))
yield self.db.setGroupMembers("X", ("B", "C",))
membersA = yield self.db.getMembers("A")
membersX = yield self.db.getMembers("X")
membershipsB = yield self.db.getMemberships("B")
membershipsC = yield self.db.getMemberships("C")
membershipsD = yield self.db.getMemberships("D")
self.assertEqual(membersA, set(("B", "C", "D",)))
self.assertEqual(membersX, set(("B", "C",)))
self.assertEqual(membershipsB, set(("A", "X",)))
self.assertEqual(membershipsC, set(("A", "X",)))
self.assertEqual(membershipsD, set(("A",)))
# Remove and check the result
yield self.db.removePrincipal("B")
membersA = yield self.db.getMembers("A")
membersX = yield self.db.getMembers("X")
membershipsB = yield self.db.getMemberships("B")
membershipsC = yield self.db.getMemberships("C")
membershipsD = yield self.db.getMemberships("D")
self.assertEqual(membersA, set(("C", "D",)))
self.assertEqual(membersX, set(("C",)))
self.assertEqual(membershipsB, set())
self.assertEqual(membershipsC, set(("A", "X",)))
self.assertEqual(membershipsD, set(("A",),))
@inlineCallbacks
def test_cachingDBInsertUncached(self):
for processType in ("Single", "Combined",):
config.ProcessType = processType
# Get the DB
yield self.db.clean()
# Do one insert and check the result for the one we will remove
yield self.db.setGroupMembers("AA", ("BB", "CC", "DD",))
yield self.db.getMemberships("DD")
# Change and check the result
yield self.db.setGroupMembers("AA", ("BB", "CC", "EE",))
membersAA = yield self.db.getMembers("AA")
membershipsBB = yield self.db.getMemberships("BB")
membershipsCC = yield self.db.getMemberships("CC")
membershipsDD = yield self.db.getMemberships("DD")
membershipsEE = yield self.db.getMemberships("EE")
self.assertEqual(membersAA, set(("BB", "CC", "EE",)))
self.assertEqual(membershipsBB, set(("AA",)))
self.assertEqual(membershipsCC, set(("AA",)))
self.assertEqual(membershipsDD, set())
self.assertEqual(membershipsEE, set(("AA",)))
try:
from txdav.base.datastore.subpostgres import postgres
except ImportError:
ProxyPrincipalDBPostgreSQL.skip = True
else:
try:
db = postgres.connect(host="localhost", database="proxies")
except:
ProxyPrincipalDBPostgreSQL.skip = True
|
|
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import pager
COLORS = {None :-1,
'normal' :-1,
'black' : 0,
'red' : 1,
'green' : 2,
'yellow' : 3,
'blue' : 4,
'magenta': 5,
'cyan' : 6,
'white' : 7}
ATTRS = {None :-1,
'bold' : 1,
'dim' : 2,
'ul' : 4,
'blink' : 5,
'reverse': 7}
RESET = "\033[m" # pylint: disable=W1401
# backslash is not anomalous
def is_color(s):
return s in COLORS
def is_attr(s):
return s in ATTRS
def _Color(fg = None, bg = None, attr = None):
fg = COLORS[fg]
bg = COLORS[bg]
attr = ATTRS[attr]
if attr >= 0 or fg >= 0 or bg >= 0:
need_sep = False
code = "\033[" #pylint: disable=W1401
if attr >= 0:
code += chr(ord('0') + attr)
need_sep = True
if fg >= 0:
if need_sep:
code += ';'
need_sep = True
if fg < 8:
code += '3%c' % (ord('0') + fg)
else:
code += '38;5;%d' % fg
if bg >= 0:
if need_sep:
code += ';'
need_sep = True
if bg < 8:
code += '4%c' % (ord('0') + bg)
else:
code += '48;5;%d' % bg
code += 'm'
else:
code = ''
return code
DEFAULT = None
def SetDefaultColoring(state):
"""Set coloring behavior to |state|.
This is useful for overriding config options via the command line.
"""
if state is None:
# Leave it alone -- return quick!
return
global DEFAULT
state = state.lower()
if state in ('auto',):
DEFAULT = state
elif state in ('always', 'yes', 'true', True):
DEFAULT = 'always'
elif state in ('never', 'no', 'false', False):
DEFAULT = 'never'
class Coloring(object):
def __init__(self, config, section_type):
self._section = 'color.%s' % section_type
self._config = config
self._out = sys.stdout
on = DEFAULT
if on is None:
on = self._config.GetString(self._section)
if on is None:
on = self._config.GetString('color.ui')
if on == 'auto':
if pager.active or os.isatty(1):
self._on = True
else:
self._on = False
elif on in ('true', 'always'):
self._on = True
else:
self._on = False
def redirect(self, out):
self._out = out
@property
def is_on(self):
return self._on
def write(self, fmt, *args):
self._out.write(fmt % args)
def flush(self):
self._out.flush()
def nl(self):
self._out.write('\n')
def printer(self, opt=None, fg=None, bg=None, attr=None):
s = self
c = self.colorer(opt, fg, bg, attr)
def f(fmt, *args):
s._out.write(c(fmt, *args))
return f
def nofmt_printer(self, opt=None, fg=None, bg=None, attr=None):
s = self
c = self.nofmt_colorer(opt, fg, bg, attr)
def f(fmt):
s._out.write(c(fmt))
return f
def colorer(self, opt=None, fg=None, bg=None, attr=None):
if self._on:
c = self._parse(opt, fg, bg, attr)
def f(fmt, *args):
output = fmt % args
return ''.join([c, output, RESET])
return f
else:
def f(fmt, *args):
return fmt % args
return f
def nofmt_colorer(self, opt=None, fg=None, bg=None, attr=None):
if self._on:
c = self._parse(opt, fg, bg, attr)
def f(fmt):
return ''.join([c, fmt, RESET])
return f
else:
def f(fmt):
return fmt
return f
def _parse(self, opt, fg, bg, attr):
if not opt:
return _Color(fg, bg, attr)
v = self._config.GetString('%s.%s' % (self._section, opt))
if v is None:
return _Color(fg, bg, attr)
v = v.strip().lower()
if v == "reset":
return RESET
elif v == '':
return _Color(fg, bg, attr)
have_fg = False
for a in v.split(' '):
if is_color(a):
if have_fg:
bg = a
else:
fg = a
elif is_attr(a):
attr = a
return _Color(fg, bg, attr)
|
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Compute API that proxies via Cells Service."""
from oslo import messaging
from nova import availability_zones
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.cells import utils as cells_utils
from nova.compute import api as compute_api
from nova.compute import rpcapi as compute_rpcapi
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova import rpc
check_instance_state = compute_api.check_instance_state
wrap_check_policy = compute_api.wrap_check_policy
check_policy = compute_api.check_policy
check_instance_lock = compute_api.check_instance_lock
check_instance_cell = compute_api.check_instance_cell
class ComputeRPCAPIRedirect(object):
# NOTE(comstud): These are a list of methods where the cells_rpcapi
# and the compute_rpcapi methods have the same signatures. This
# is for transitioning to a common interface where we can just
# swap out the compute_rpcapi class with the cells_rpcapi class.
cells_compatible = ['start_instance', 'stop_instance',
'reboot_instance', 'suspend_instance',
'resume_instance', 'terminate_instance',
'soft_delete_instance', 'pause_instance',
'unpause_instance', 'revert_resize',
'confirm_resize', 'reset_network',
'inject_network_info',
'backup_instance', 'snapshot_instance']
def __init__(self, cells_rpcapi):
self.cells_rpcapi = cells_rpcapi
def __getattr__(self, key):
if key in self.cells_compatible:
return getattr(self.cells_rpcapi, key)
def _noop_rpc_wrapper(*args, **kwargs):
return None
return _noop_rpc_wrapper
class ConductorTaskRPCAPIRedirect(object):
# NOTE(comstud): These are a list of methods where the cells_rpcapi
# and the compute_task_rpcapi methods have the same signatures. This
# is for transitioning to a common interface where we can just
# swap out the compute_task_rpcapi class with the cells_rpcapi class.
cells_compatible = ['build_instances', 'resize_instance',
'live_migrate_instance', 'rebuild_instance']
def __init__(self, cells_rpcapi_obj):
self.cells_rpcapi = cells_rpcapi_obj
def __getattr__(self, key):
if key in self.cells_compatible:
return getattr(self.cells_rpcapi, key)
def _noop_rpc_wrapper(*args, **kwargs):
return None
return _noop_rpc_wrapper
class RPCClientCellsProxy(object):
def __init__(self, target, version_cap):
super(RPCClientCellsProxy, self).__init__()
self.target = target
self.version_cap = version_cap
self._server = None
self._version = None
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def prepare(self, **kwargs):
ret = type(self)(self.target, self.version_cap)
ret.cells_rpcapi = self.cells_rpcapi
server = kwargs.pop('server', None)
version = kwargs.pop('version', None)
if kwargs:
raise ValueError("Unsupported kwargs: %s" % kwargs.keys())
if server:
ret._server = server
if version:
ret._version = version
return ret
def _check_version_cap(self, version):
client = rpc.get_client(self.target, version_cap=self.version_cap)
if not client.can_send_version(version):
raise messaging.RPCVersionCapError(version=version,
version_cap=self.version_cap)
def _make_msg(self, method, **kwargs):
version = self._version if self._version else self.target.version
self._check_version_cap(version)
return {
'method': method,
'namespace': None,
'version': version,
'args': kwargs
}
def _get_topic(self):
if self._server is not None:
return '%s.%s' % (self.target.topic, self._server)
else:
return self.target.topic
def can_send_version(self, version):
client = rpc.get_client(self.target, version_cap=self.version_cap)
return client.can_send_version(version)
def cast(self, ctxt, method, **kwargs):
msg = self._make_msg(method, **kwargs)
topic = self._get_topic()
self.cells_rpcapi.proxy_rpc_to_manager(ctxt, msg, topic)
def call(self, ctxt, method, **kwargs):
msg = self._make_msg(method, **kwargs)
topic = self._get_topic()
return self.cells_rpcapi.proxy_rpc_to_manager(ctxt, msg,
topic, call=True)
class ComputeRPCProxyAPI(compute_rpcapi.ComputeAPI):
"""Class used to substitute Compute RPC API that will proxy
via the cells manager to a compute manager in a child cell.
"""
def get_client(self, target, version_cap, serializer):
return RPCClientCellsProxy(target, version_cap)
class ComputeCellsAPI(compute_api.API):
def __init__(self, *args, **kwargs):
super(ComputeCellsAPI, self).__init__(*args, **kwargs)
self.cells_rpcapi = cells_rpcapi.CellsAPI()
# Avoid casts/calls directly to compute
self.compute_rpcapi = ComputeRPCAPIRedirect(self.cells_rpcapi)
# Redirect conductor build_instances to cells
self._compute_task_api = ConductorTaskRPCAPIRedirect(self.cells_rpcapi)
self._cell_type = 'api'
def _cast_to_cells(self, context, instance, method, *args, **kwargs):
instance_uuid = instance['uuid']
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
self.cells_rpcapi.cast_compute_api_method(context, cell_name,
method, instance_uuid, *args, **kwargs)
def _call_to_cells(self, context, instance, method, *args, **kwargs):
instance_uuid = instance['uuid']
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
return self.cells_rpcapi.call_compute_api_method(context, cell_name,
method, instance_uuid, *args, **kwargs)
def _check_requested_networks(self, context, requested_networks,
max_count):
"""Override compute API's checking of this. It'll happen in
child cell
"""
return max_count
def create(self, *args, **kwargs):
"""We can use the base functionality, but I left this here just
for completeness.
"""
return super(ComputeCellsAPI, self).create(*args, **kwargs)
def update(self, context, instance, **kwargs):
"""Update an instance."""
cell_name = instance['cell_name']
if cell_name and self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance['uuid'],
state="temporary_readonly",
method='update')
rv = super(ComputeCellsAPI, self).update(context,
instance, **kwargs)
kwargs_copy = kwargs.copy()
# We need to skip vm_state/task_state updates as the child
# cell is authoritative for these. The admin API does
# support resetting state, but it has been converted to use
# Instance.save() with an appropriate kwarg.
kwargs_copy.pop('vm_state', None)
kwargs_copy.pop('task_state', None)
if kwargs_copy:
try:
self._cast_to_cells(context, instance, 'update',
**kwargs_copy)
except exception.InstanceUnknownCell:
pass
return rv
def soft_delete(self, context, instance):
self._handle_cell_delete(context, instance, 'soft_delete')
def delete(self, context, instance):
self._handle_cell_delete(context, instance, 'delete')
def _handle_cell_delete(self, context, instance, method_name):
if not instance['cell_name']:
delete_type = method_name == 'soft_delete' and 'soft' or 'hard'
self.cells_rpcapi.instance_delete_everywhere(context,
instance, delete_type)
bdms = block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid']))
# NOTE(danms): If we try to delete an instance with no cell,
# there isn't anything to salvage, so we can hard-delete here.
super(ComputeCellsAPI, self)._local_delete(context, instance, bdms,
method_name,
self._do_delete)
return
method = getattr(super(ComputeCellsAPI, self), method_name)
method(context, instance)
@check_instance_cell
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
super(ComputeCellsAPI, self).restore(context, instance)
self._cast_to_cells(context, instance, 'restore')
@check_instance_cell
def force_delete(self, context, instance):
"""Force delete a previously deleted (but not reclaimed) instance."""
super(ComputeCellsAPI, self).force_delete(context, instance)
self._cast_to_cells(context, instance, 'force_delete')
@check_instance_cell
def evacuate(self, context, instance, *args, **kwargs):
"""Evacuate the given instance with the provided attributes."""
super(ComputeCellsAPI, self).evacuate(context, instance, *args,
**kwargs)
self._cast_to_cells(context, instance, 'evacuate', *args, **kwargs)
@check_instance_cell
def add_fixed_ip(self, context, instance, *args, **kwargs):
"""Add fixed_ip from specified network to given instance."""
super(ComputeCellsAPI, self).add_fixed_ip(context, instance,
*args, **kwargs)
self._cast_to_cells(context, instance, 'add_fixed_ip',
*args, **kwargs)
@check_instance_cell
def remove_fixed_ip(self, context, instance, *args, **kwargs):
"""Remove fixed_ip from specified network to given instance."""
super(ComputeCellsAPI, self).remove_fixed_ip(context, instance,
*args, **kwargs)
self._cast_to_cells(context, instance, 'remove_fixed_ip',
*args, **kwargs)
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
# FIXME(comstud): Cache this?
# Also: only calling super() to get state/policy checking
super(ComputeCellsAPI, self).get_diagnostics(context, instance)
return self._call_to_cells(context, instance, 'get_diagnostics')
def get_instance_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
# FIXME(comstud): Cache this?
# Also: only calling super() to get state/policy checking
super(ComputeCellsAPI, self).get_instance_diagnostics(context,
instance)
return self._call_to_cells(context, instance,
'get_instance_diagnostics')
@check_instance_cell
def rescue(self, context, instance, rescue_password=None,
rescue_image_ref=None):
"""Rescue the given instance."""
super(ComputeCellsAPI, self).rescue(context, instance,
rescue_password=rescue_password,
rescue_image_ref=rescue_image_ref)
self._cast_to_cells(context, instance, 'rescue',
rescue_password=rescue_password,
rescue_image_ref=rescue_image_ref)
@check_instance_cell
def unrescue(self, context, instance):
"""Unrescue the given instance."""
super(ComputeCellsAPI, self).unrescue(context, instance)
self._cast_to_cells(context, instance, 'unrescue')
@wrap_check_policy
@check_instance_cell
def shelve(self, context, instance):
"""Shelve the given instance."""
self._cast_to_cells(context, instance, 'shelve')
@wrap_check_policy
@check_instance_cell
def shelve_offload(self, context, instance):
"""Offload the shelved instance."""
super(ComputeCellsAPI, self).shelve_offload(context, instance)
self._cast_to_cells(context, instance, 'shelve_offload')
@wrap_check_policy
@check_instance_cell
def unshelve(self, context, instance):
"""Unshelve the given instance."""
super(ComputeCellsAPI, self).unshelve(context, instance)
self._cast_to_cells(context, instance, 'unshelve')
@check_instance_cell
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance."""
super(ComputeCellsAPI, self).set_admin_password(context, instance,
password=password)
self._cast_to_cells(context, instance, 'set_admin_password',
password=password)
@wrap_check_policy
@check_instance_cell
def get_vnc_console(self, context, instance, console_type):
"""Get a url to a VNC Console."""
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
connect_info = self._call_to_cells(context, instance,
'get_vnc_connect_info', console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
connect_info['port'], connect_info['internal_access_path'],
instance['uuid'])
return {'url': connect_info['access_url']}
@wrap_check_policy
@check_instance_cell
def get_spice_console(self, context, instance, console_type):
"""Get a url to a SPICE Console."""
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
connect_info = self._call_to_cells(context, instance,
'get_spice_connect_info', console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
connect_info['port'], connect_info['internal_access_path'],
instance['uuid'])
return {'url': connect_info['access_url']}
@wrap_check_policy
@check_instance_cell
def get_rdp_console(self, context, instance, console_type):
"""Get a url to a RDP Console."""
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
connect_info = self._call_to_cells(context, instance,
'get_rdp_connect_info', console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
connect_info['port'], connect_info['internal_access_path'],
instance['uuid'])
return {'url': connect_info['access_url']}
@wrap_check_policy
@check_instance_cell
def get_serial_console(self, context, instance, console_type):
"""Get a url to a serial console."""
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
connect_info = self._call_to_cells(context, instance,
'get_serial_console_connect_info', console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
connect_info['port'], connect_info['internal_access_path'],
instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_cell
def get_console_output(self, context, instance, *args, **kwargs):
"""Get console output for an instance."""
# NOTE(comstud): Calling super() just to get policy check
super(ComputeCellsAPI, self).get_console_output(context, instance,
*args, **kwargs)
return self._call_to_cells(context, instance, 'get_console_output',
*args, **kwargs)
@check_instance_cell
def _attach_volume(self, context, instance, volume_id, device,
disk_bus, device_type):
"""Attach an existing volume to an existing instance."""
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
return self._call_to_cells(context, instance, 'attach_volume',
volume_id, device, disk_bus, device_type)
@check_instance_cell
def _detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
self.volume_api.check_detach(context, volume)
self._cast_to_cells(context, instance, 'detach_volume',
volume)
@wrap_check_policy
@check_instance_cell
def associate_floating_ip(self, context, instance, address):
"""Makes calls to network_api to associate_floating_ip.
:param address: is a string floating ip address
"""
self._cast_to_cells(context, instance, 'associate_floating_ip',
address)
@check_instance_cell
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
super(ComputeCellsAPI, self).delete_instance_metadata(context,
instance, key)
self._cast_to_cells(context, instance, 'delete_instance_metadata',
key)
@wrap_check_policy
@check_instance_cell
def update_instance_metadata(self, context, instance,
metadata, delete=False):
rv = super(ComputeCellsAPI, self).update_instance_metadata(context,
instance, metadata, delete=delete)
try:
self._cast_to_cells(context, instance,
'update_instance_metadata',
metadata, delete=delete)
except exception.InstanceUnknownCell:
pass
return rv
def get_migrations(self, context, filters):
return self.cells_rpcapi.get_migrations(context, filters)
class ServiceProxy(object):
def __init__(self, obj, cell_path):
self._obj = obj
self._cell_path = cell_path
@property
def id(self):
return cells_utils.cell_with_item(self._cell_path, self._obj.id)
def __getitem__(self, key):
if key == 'id':
return self.id
return getattr(self._obj, key)
def __getattr__(self, key):
return getattr(self._obj, key)
class HostAPI(compute_api.HostAPI):
"""HostAPI() class for cells.
Implements host management related operations. Works by setting the
RPC API used by the base class to proxy via the cells manager to the
compute manager in the correct cell. Hosts specified with cells will
need to be of the format 'path!to!cell@host'.
DB methods in the base class are also overridden to proxy via the
cells manager.
"""
def __init__(self):
super(HostAPI, self).__init__(rpcapi=ComputeRPCProxyAPI())
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Cannot check this in API cell. This will be checked in the
target child cell.
"""
pass
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
return self.cells_rpcapi.get_host_uptime(context, host_name)
def service_get_all(self, context, filters=None, set_zones=False):
if filters is None:
filters = {}
if 'availability_zone' in filters:
zone_filter = filters.pop('availability_zone')
set_zones = True
else:
zone_filter = None
services = self.cells_rpcapi.service_get_all(context,
filters=filters)
if set_zones:
services = availability_zones.set_availability_zones(context,
services)
if zone_filter is not None:
services = [s for s in services
if s['availability_zone'] == zone_filter]
# NOTE(johannes): Cells adds the cell path as a prefix to the id
# to uniquely identify the service amongst all cells. Unfortunately
# the object model makes the id an integer. Use a proxy here to
# work around this particular problem.
# Split out the cell path first
cell_paths = []
for service in services:
cell_path, id = cells_utils.split_cell_and_item(service['id'])
service['id'] = id
cell_paths.append(cell_path)
# NOTE(danms): Currently cells does not support objects as
# return values, so just convert the db-formatted service objects
# to new-world objects here
services = obj_base.obj_make_list(context,
objects.ServiceList(),
objects.Service,
services)
# Now wrap it in the proxy with the original cell_path
services = [ServiceProxy(s, c) for s, c in zip(services, cell_paths)]
return services
def service_get_by_compute_host(self, context, host_name):
db_service = self.cells_rpcapi.service_get_by_compute_host(context,
host_name)
# NOTE(danms): Currently cells does not support objects as
# return values, so just convert the db-formatted service objects
# to new-world objects here
if db_service:
return objects.Service._from_db_object(context,
objects.Service(),
db_service)
def service_update(self, context, host_name, binary, params_to_update):
"""Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
"""
db_service = self.cells_rpcapi.service_update(
context, host_name, binary, params_to_update)
# NOTE(danms): Currently cells does not support objects as
# return values, so just convert the db-formatted service objects
# to new-world objects here
if db_service:
return objects.Service._from_db_object(context,
objects.Service(),
db_service)
def service_delete(self, context, service_id):
"""Deletes the specified service."""
self.cells_rpcapi.service_delete(context, service_id)
def instance_get_all_by_host(self, context, host_name):
"""Get all instances by host. Host might have a cell prepended
to it, so we'll need to strip it out. We don't need to proxy
this call to cells, as we have instance information here in
the API cell.
"""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
instances = super(HostAPI, self).instance_get_all_by_host(context,
host_name)
if cell_name:
instances = [i for i in instances
if i['cell_name'] == cell_name]
return instances
def task_log_get_all(self, context, task_name, beginning, ending,
host=None, state=None):
"""Return the task logs within a given range from cells,
optionally filtering by the host and/or state. For cells, the
host should be a path like 'path!to!cell@host'. If no @host
is given, only task logs from a particular cell will be returned.
"""
return self.cells_rpcapi.task_log_get_all(context,
task_name,
beginning,
ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Get a compute node from a particular cell by its integer ID.
compute_id should be in the format of 'path!to!cell@ID'.
"""
return self.cells_rpcapi.compute_node_get(context, compute_id)
def compute_node_get_all(self, context):
return self.cells_rpcapi.compute_node_get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return self.cells_rpcapi.compute_node_get_all(context,
hypervisor_match=hypervisor_match)
def compute_node_statistics(self, context):
return self.cells_rpcapi.compute_node_stats(context)
class InstanceActionAPI(compute_api.InstanceActionAPI):
"""InstanceActionAPI() class for cells."""
def __init__(self):
super(InstanceActionAPI, self).__init__()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def actions_get(self, context, instance):
return self.cells_rpcapi.actions_get(context, instance)
def action_get_by_request_id(self, context, instance, request_id):
return self.cells_rpcapi.action_get_by_request_id(context, instance,
request_id)
def action_events_get(self, context, instance, action_id):
return self.cells_rpcapi.action_events_get(context, instance,
action_id)
|
|
# exported win32 api
from ctypes import WINFUNCTYPE, GetLastError, windll, pythonapi, cast, c_buffer
from ctypes import create_string_buffer, c_ushort, c_ubyte, c_char, c_short,\
c_int, c_uint, c_ulong, c_long, c_void_p, byref, c_char_p,\
Structure, Union, py_object, POINTER, pointer, sizeof,\
string_at
from ctypes.wintypes import HANDLE, ULONG, DWORD, BOOL, LPCSTR,\
LPCWSTR, UINT
from ctypes import sizeof as c_sizeof
from struct import unpack, pack
import os
import socket
from errno import EINTR, EAGAIN
from msvcrt import open_osfhandle
try:
from ctypes import WinError
except ImportError:
from ctypes.wintypes import WinError
INVALID_HANDLE_VALUE = HANDLE(~0)
PIPE_NOWAIT = DWORD(0x00000001)
ERROR_NO_DATA = 232
NULL = c_ulong()
GROUP = SOCKET = UINT
LPDWORD = POINTER(DWORD)
PULONG_PTR = POINTER(c_ulong)
SOCKET = UINT
SERVICETYPE = UINT
GROUP = UINT
class _US(Structure):
_fields_ = [('Offset', DWORD), ('OffsetHigh', DWORD)]
class _U(Union):
_fields_ = [('s', _US), ('Pointer', c_void_p)]
_anonymous_ = ('s', )
class OVERLAPPED(Structure):
_fields_ = [('Internal', POINTER(ULONG)), ('InternalHIgh', POINTER(ULONG)),
('u', _U), ('hEvent', HANDLE), ('object', py_object)]
_anonymous_ = ('u', )
LPOVERLAPPED = POINTER(OVERLAPPED)
def _bool_error_throw(result, func, args):
if not result:
raise WinError()
return result
def _bool_error_check(result, func, args):
if not result:
return GetLastError()
return 0
def _invalid_handle_throw(result, func, args):
if result == HANDLE(-1):
raise WinError()
return result
def _zero_throw(result, func, args):
if result != 0:
raise WinError()
return result
lowCloseHandle = windll.kernel32.CloseHandle
lowCloseHandle.argtype = HANDLE
lowCloseHandle.restype = BOOL
lowCloseHandle.errcheck = _bool_error_throw
def CloseHandle(hObject):
lowCloseHandle(hObject)
lowWSAGetLastError = windll.ws2_32.WSAGetLastError
lowWSAGetLastError.argtype = []
lowWSAGetLastError.restype = c_int
def WSAGetLastError():
return lowWSAGetLastError()
lowWSASocket = windll.ws2_32.WSASocketA
lowWSASocket.argtypes = (c_int, c_int, c_int, c_void_p, GROUP, DWORD)
lowWSASocket.restype = SOCKET
lowWSASocket.errcheck = _invalid_handle_throw
def WSASocket(af, socket_type, protocol, lpProtocol=None, g=0, dwFlags=0):
s = lowWSASocket(af, socket_type, protocol, lpProtocol, g, dwFlags)
return s
class _UN_b(Structure):
_fields_ = [
('s_b1', c_ubyte),
('s_b2', c_ubyte),
('s_b3', c_ubyte),
('s_b4', c_ubyte),
]
class _UN_w(Structure):
_fields_ = [
('s_w1', c_ushort),
('s_w2', c_ushort),
]
class _UN(Structure):
_fields_ = [
('S_addr', c_ulong),
]
class in_addr(Union):
_fields_ = [
('S_un', _UN),
('S_un_b', _UN_b),
('S_un_w', _UN_w),
]
_anonymous_ = ('S_un', )
class sockaddr_in(Structure):
_fields_ = [
('sin_family', c_short),
('sin_port', c_ushort),
('sin_addr', in_addr),
('sz_pads', c_char * 8),
]
class WSABUF(Structure):
_fields_ = [('len', ULONG), ('buf', c_char_p)]
LPWSABUF = POINTER(WSABUF)
class FLOWSPEC(Structure):
_fields_ = [
('TokenRate', ULONG),
('TokenBucketSize', ULONG),
('PeakBandwidth', ULONG),
('Latency', ULONG),
('DelayVariation', ULONG),
('ServiceType', SERVICETYPE),
('MaxSduSize', ULONG),
('MinimumPolicedSize', ULONG),
]
LPQOS = POINTER(FLOWSPEC)
lowWSAConnect = windll.ws2_32.WSAConnect
lowWSAConnect.argtypes = (SOCKET, POINTER(sockaddr_in), c_int, LPWSABUF,
LPWSABUF, LPQOS, LPQOS)
lowWSAConnect.restype = c_int
lowWSAConnect.errcheck = _zero_throw
def WSAConnect(s, addr):
sa_addr = sockaddr_in()
host, port = addr
sa_addr.sin_family = socket.AF_INET
sa_addr.sin_port = socket.htons(port)
sa_addr.sin_addr.S_addr = unpack('<i', socket.inet_aton(host))[0]
lowWSAConnect(s, byref(sa_addr), c_sizeof(sa_addr), None, None, None, None)
lowWSAAccept = windll.ws2_32.WSAAccept
lowWSAAccept.argtypes = (SOCKET, POINTER(sockaddr_in), POINTER(c_int),
c_void_p, POINTER(DWORD))
lowWSAAccept.restype = SOCKET
lowWSAAccept.errcheck = _invalid_handle_throw
def WSAAccept(s):
sa_addr = sockaddr_in()
sa_addr_len = c_int(c_sizeof(sa_addr))
rc = lowWSAAccept(s, byref(sa_addr), byref(sa_addr_len), None, None)
port = socket.ntohs(sa_addr.sin_port)
host = socket.inet_ntoa(pack('<i', sa_addr.sin_addr.S_addr))
addr = (host, port)
return (rc, addr)
low_bind = windll.ws2_32.bind
low_bind.argtypes = (SOCKET, POINTER(sockaddr_in), c_int)
low_bind.restype = c_int
low_bind.errcheck = _zero_throw
def _bind(s, addr):
sa_addr = sockaddr_in()
host, port = addr
sa_addr.sin_family = socket.AF_INET
sa_addr.sin_port = socket.htons(port)
sa_addr.sin_addr.S_addr = unpack('<i', socket.inet_aton(host))[0]
low_bind(s, byref(sa_addr), c_sizeof(sa_addr))
low_listen = windll.ws2_32.listen
low_listen.argtypes = (SOCKET, c_int)
low_listen.restype = c_int
low_listen.errcheck = _zero_throw
def _listen(s, backlog):
low_listen(s, backlog)
low_getsockname = windll.ws2_32.getsockname
low_getsockname.argtypes = (SOCKET, POINTER(sockaddr_in), POINTER(c_int))
low_getsockname.restype = c_int
low_getsockname.errcheck = _zero_throw
def _getsockname(s):
sa_addr = sockaddr_in()
sa_addr_len = c_int(c_sizeof(sa_addr))
low_getsockname(s, byref(sa_addr), byref(sa_addr_len))
port = socket.ntohs(sa_addr.sin_port)
host = socket.inet_ntoa(pack('<i', sa_addr.sin_addr.S_addr))
addr = (host, port)
return addr
# from windows sdk
FIONREAD = 0x4004667f
FIONBIO = 0x8004667e
low_ioctlsocket = windll.ws2_32.ioctlsocket
low_ioctlsocket.argtypes = (SOCKET, c_long, POINTER(c_ulong))
low_ioctlsocket.restype = c_int
low_ioctlsocket.errcheck = _zero_throw
def _ioctlsocket(s, cmd, arg=0):
ul_arg = c_ulong(arg)
low_ioctlsocket(s, cmd, byref(ul_arg))
return unpack('<L', ul_arg)[0]
class WinSocket(object):
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __init__(self, handle=None):
if not handle:
handle = WSASocket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_TCP)
self._file_handle = handle
self._file_no = open_osfhandle(self._file_handle, 0)
def close(self):
CloseHandle(self._file_handle)
def fileno(self):
return self._file_no
def filehandle(self):
return self._file_handle
def bind(self, addr):
_bind(self._file_handle, addr)
def listen(self, backlog):
_listen(self._file_handle, backlog)
def accept(self):
s, addr = WSAAccept(self._file_handle)
return WinSocket(s), addr
def connect(self, addr):
WSAConnect(self._file_handle, addr)
def getsockname(self):
return _getsockname(self._file_handle)
# tcp-emulated socketpair, might fail
def Win32SocketPair():
localhost = '127.0.0.1'
with WinSocket() as listener:
listener.bind((localhost, 0))
listener.listen(1)
addr = listener.getsockname()
client = WinSocket()
client.connect(addr)
server, server_addr = listener.accept()
client_addr = client.getsockname()
if server_addr != client_addr:
client.close()
server.close()
raise OSError('win32 socketpair failure')
return server, client
def SetNonBlock(winsocket, enabled=True):
int_enabled = int(enabled)
_ioctlsocket(winsocket.filehandle(), FIONBIO, int_enabled)
def EstimateUnreadBytes(winsocket):
return int(_ioctlsocket(winsocket.filehandle(), FIONREAD))
def WriteUtf8(winsocket, data):
msg = data.encode('utf-8')
fd = winsocket.fileno()
written = 0
while len(msg):
try:
ret = os.write(fd, msg)
if ret == 0:
raise OSError('broken pipe')
written += ret
msg = msg[ret:]
except OSError as e:
if e.errno == EAGAIN:
break
if e.errno != EINTR:
raise
return written
def ReadUtf8(winsocket, length):
msg = bytes()
fd = winsocket.fileno()
while length:
try:
buf = os.read(fd, length)
if len(buf) == 0:
raise OSError('broken pipe')
length -= len(buf)
msg += buf
except OSError as e:
if e.errno == EAGAIN:
break
if e.errno != EINTR:
raise
return msg.decode('utf-8')
|
|
# Shell python script to test a live SSO set up - Ed Crewe 26 Nov 2010
# It can be really fiddly testing out SSO proxy auth via typing in URLs etc
# see Dave Spencer's guide at https://wiki.jasig.org/display/CAS/Proxy+CAS+Walkthrough
# This does script does it for you against the deployed servers
# Run via python 2.4 or above ...
# python cas_tests.py [username]
# You will need to edit the constants below to match your setup ...
import unittest
import sys
import commands
import getpass
import urllib2
import urllib
from urlparse import urljoin
import cookielib
from xml.dom import minidom
# Add in a separate test_config file if you wish of the following format
try:
from test_config import *
except:
# Please edit these urls to match your cas server, proxy and app server urls
CAS_SERVER_URL = 'https://signin.k-state.edu/WebISO/login'
APP_URL = 'http://webdev.labs.ome.ksu.edu/'
APP_RESTRICTED = 'connect'
PROXY_URL = 'https://webdev.labs.ome.ksu.edu/accounts/login/casProxyCallback/'
# Depending on your cas login form you may need to adjust these field name keys
TOKEN = '_eventID' # CSRF token field name
CAS_SUCCESS = 'Login successful' # CAS server successful login flag (find string in html page)
AUTH = {'username' : 'garrett', # user field name
'password' : 'password', # password field name
'submit' : 'submit' # login submit button
}
SCRIPT = 'manage.py shell --plain < get_pgt.py' # A script to extract the PGT from your proxying server
class TestCAS(unittest.TestCase):
""" A class for testing a CAS setup both for standard and proxy authentication """
opener = None
auth = {}
urls = {}
def setUp(self):
self.cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
urllib2.install_opener(opener)
self.opener = opener
self.get_auth()
self.set_url('cas', CAS_SERVER_URL)
self.set_url('app', APP_URL)
self.set_url('proxy', PROXY_URL)
def set_url(self, name, url):
""" Make sure valid url with query string appended """
for end in ['/','.html','.htm']:
if url.endswith(end):
self.urls[name] = url
return
self.urls[name] = '%s/' % url
def test_cas(self):
""" Test ordinary and proxy CAS login
NB cant put these into separate tests since tickets
are required to be passed between tests
"""
print 'Testing with following URLs'
print '---------------------------'
print 'CAS server = %s' % self.urls['cas']
print 'Application server = %s' % self.urls['app']
print 'Proxy CAS server = %s' % self.urls['proxy']
print ''
print 'Test ordinary CAS login'
print '-----------------------'
self.ticket = self.login()
self.get_restricted(opener=self.opener)
self.logout()
print ''
print 'Test get proxy ticket'
print '---------------------'
self.ticket = self.login()
iou = self.proxy1_iou()
if iou.startswith('PGT'):
print 'PASS: Got IOU - %s for %s' % (iou, self.urls['proxy'])
else:
print iou
pgt = self.proxy2_pgt(iou)
if pgt.startswith('PGT'):
print 'PASS: Got PGT - %s' % pgt
else:
print pgt
pt = self.proxy3_pt(pgt)
if pt.startswith('PT'):
print 'PASS: Got PT - %s' % pt
else:
print pt
# NB: Dont logout proxy app, but test proxy auth with new openers
# for the tests to be valid...
print ''
print 'Test SSO server login with proxy ticket'
print '---------------------------------------'
proxy = self.proxy4_login(pt)
if proxy:
print 'PASS: Got Success response for app %s using proxy %s' % (self.urls['app'], proxy)
else:
print 'FAIL: The proxy login to %s via %s has failed' % (self.urls['app'], self.urls['proxy'])
print ''
print 'Test direct proxy login'
print '-----------------------'
new_pt = self.proxy3_pt(pgt)
self.proxy5_login(new_pt)
return
def get_auth(self):
""" Get authentication by passing to this script on the command line """
if len(sys.argv) > 1:
self.auth['username'] = sys.argv[1]
else:
self.auth['username'] = getpass.getuser()
self.auth['password'] = getpass.getpass('CAS Password for user %s:' % AUTH['username'])
return
def get_token(self, url=None, token=TOKEN, page=''):
""" Get CSRF token """
if url:
try:
r = self.opener.open(url)
except:
return 'FAIL: URL not found %s' % url
page = r.read()
if not page:
return 'FAIL: Page is empty'
starts = ['<input type="hidden" name="%s"' % token,
'value="']
return self.find_in_page(page, starts, '"')
def get_ticket(self, page, app_url):
""" Get CSRF token """
starts = [app_url,'?ticket=']
return self.find_in_page(page, starts, '"')
def find_in_dom(self, page, nesting=['body','form']):
""" Use dom to get values from XML or page """
dom = minidom.parseString(page)
for level in nesting:
try:
dom = dom.getElementsByTagName(level)[0]
except:
break
return dom.childNodes[0].nodeValue.strip()
def find_in_page(self, page, starts, stop):
""" Replace this with find_in_dom ?
Although without knowing the CAS login page this
is probably more generic.
Starts is a list to allow a series of marker points
in case a single start point marker is not unique
"""
pagepart = page
start = 0
for part in starts:
point = pagepart.find(part)
if point>-1:
start += point
else:
return "FAIL: Couldnt find '%s' in page" % part
pagepart = pagepart[start:]
start = start + len(part)
end = page[start:].find(stop)
if end == -1:
end = len(page[start:])
end = start + end
found = page[start:end]
return found.strip()
def login(self):
""" Login to CAS server """
url = '%slogin?service=%s' % (self.urls['cas'], self.urls['app'])
ticket = ''
token = self.get_token(url)
if token:
if token.startswith('FAIL'):
print token
return ticket
else:
self.auth[TOKEN] = token
else:
print 'FAIL: CSRF Token could not be found on page'
return ticket
self.auth['service'] = self.urls['app']
data = urllib.urlencode(self.auth)
sso_resp = self.opener.open(url, data)
sso_page = sso_resp.read()
found = sso_page.find(CAS_SUCCESS) > -1
sso_resp.close()
if found:
ticket = self.get_ticket(sso_page, self.urls['app'])
print 'PASS: CAS logged in to %s' % url
else:
print 'FAIL: Couldnt login to %s' % url
return ticket
def logout(self):
""" Logout inbetween tests """
url = '%slogout' % self.urls['cas']
app_resp = self.opener.open(url)
app_resp.close()
self.cj.clear()
print 'Logged out'
return
def get_restricted(self, ticket='', opener=None, print_page=False):
""" Access a restricted URL and see if its accessible
Use token to check if this page has redirected to SSO login
ie. success for get_token is a fail for get restricted
"""
url = '%s%s' % (self.urls['app'], APP_RESTRICTED)
if ticket:
url = '%s?ticket=%s' % (url, ticket)
try:
app_resp = opener.open(url)
ok = app_resp.code == 200
except:
print 'FAIL: couldnt log in to restricted app at %s' % url
return
page = app_resp.read()
if ok:
token = self.get_token(page=page)
if token and not token.startswith('FAIL'):
print 'FAIL: couldnt log in to restricted app at %s' % url
else:
print 'PASS: logged in to restricted app at %s' % url
else:
print 'FAIL: couldnt log in to restricted app at %s' % url
if print_page:
print page
app_resp.close()
def proxy1_iou(self):
""" Use login ticket to get proxy iou
NB: SSO server installation may require self.urls['proxy']/?pgtIou be called at the root
"""
url_args = (self.urls['cas'], self.ticket, self.urls['app'], self.urls['proxy'])
url = '%sserviceValidate?ticket=%s&service=%s&pgtUrl=%s' % url_args
try:
iou = self.opener.open(url)
except:
return 'FAIL: service validate url=%s not found' % url
page = iou.read()
if page.find('cas:authenticationSuccess') > -1:
iou_ticket = self.find_in_dom(page,['cas:serviceResponse',
'cas:authenticationSuccess',
'cas:proxyGrantingTicket'])
if iou_ticket:
return iou_ticket
else:
if page:
return "FAIL: NO PGIOU\n\n%s" % page
else:
return 'FAIL: PGIOU Empty response from %s' % url
else:
return 'FAIL: PGIOU Response failed authentication'
return None
def proxy2_pgt(self, iou):
""" Dig out the proxy granting ticket using shell script so this test class
is independent of CAS implementation - eg. can substitute this function
to get proxy ticket from Java CAS instead of django-cas for example
For a django-cas implementation this can be read from the ORM
by calling the django shell environment
"""
out = commands.getoutput(SCRIPT)
pgt = self.find_in_page(out, ['PGT',], ' ')
return 'PGT%s' % pgt
def proxy3_pt(self, pgt):
""" Use granting ticket to get proxy """
url_args = (self.urls['cas'], self.urls['app'], pgt)
url = '%sproxy?targetService=%s&pgt=%s' % url_args
try:
pt = self.opener.open(url)
except:
return 'FAIL: PTURL=%s not found' % url
page = pt.read()
if page.find('cas:serviceResponse') > -1:
try:
pt_ticket = self.find_in_dom(page,['cas:proxySuccess',
'cas:proxyTicket'])
return pt_ticket
except:
print url
print page
return ''
return None
def proxy4_login(self, pt):
""" Check proxy ticket for service
Use a new opener so its not got any cookies / auth already
"""
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.CookieJar()))
url_args = (self.urls['cas'], self.urls['app'], pt)
url = '%sproxyValidate?service=%s&ticket=%s' % url_args
try:
login = opener.open(url)
except:
return 'FAIL: PTURL=%s not found' % url
page = login.read()
print page
if page.find('cas:authenticationSuccess') > -1:
proxy = self.find_in_dom(page,['cas:proxies',
'cas:proxy'])
return proxy
return None
def proxy5_login(self, pt):
""" Use proxy ticket to login directly to app
Use a new opener so its not got any cookies / auth already
"""
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.CookieJar()))
return self.get_restricted(ticket=pt, opener=opener)
if __name__ == '__main__':
unittest.main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys, platform, subprocess
jcc_ver = '2.20'
machine = platform.machine()
if machine.startswith("iPod") or machine.startswith("iPhone"):
platform = 'ipod'
elif sys.platform == "win32" and "--compiler=mingw32" in sys.argv:
platform = 'mingw32'
else:
platform = sys.platform
# Add or edit the entry corresponding to your system in the JDK, INCLUDES,
# CFLAGS, DEBUG_CFLAGS, LFLAGS and JAVAC dictionaries below.
# These entries are used to build JCC _and_ by JCC to drive compiling and
# linking via distutils or setuptools the extensions it generated code for.
#
# The key for your system is determined by the platform variable defined
# above.
#
# Instead of editing the entries below, you may also override these
# dictionaries with JCC_JDK, JCC_INCLUDES, JCC_CFLAGS, JCC_DEBUG_CFLAGS,
# JCC_LFLAGS and JCC_JAVAC environment variables using os.pathsep as value
# separator.
if platform in ("win32", "mingw32"):
try:
JAVAFRAMEWORKS = None
from helpers.windows import JAVAHOME
except ImportError:
JAVAHOME = None
elif platform in ("darwin",):
try:
from helpers.darwin import JAVAHOME, JAVAFRAMEWORKS
except ImportError:
JAVAHOME = None
JAVAFRAMEWORKS = None
else:
JAVAHOME = None
JAVAFRAMEWORKS = None
JDK = {
'darwin': JAVAHOME or JAVAFRAMEWORKS,
'ipod': '/usr/include/gcc',
'linux2': '/usr/lib/jvm/java-7-openjdk-amd64',
'sunos5': '/usr/jdk/instances/jdk1.6.0',
'win32': JAVAHOME,
'mingw32': JAVAHOME,
'freebsd7': '/usr/local/diablo-jdk1.6.0'
}
if 'JCC_JDK' in os.environ:
JDK[platform] = os.environ['JCC_JDK']
if not JDK[platform]:
raise RuntimeError('''
Can't determine where the Java JDK has been installed on this machine.
Please set the environment variable JCC_JDK to that location before
running setup.py.
''')
elif not os.path.isdir(JDK[platform]):
raise RuntimeError('''
Java JDK directory '%s' does not exist.
Please set the environment variable JCC_JDK to the correct location before
running setup.py.
''' %(JDK[platform]))
INCLUDES = {
'darwin/frameworks': ['%(darwin)s/Headers' %(JDK)],
'darwin/home': ['%(darwin)s/include' %(JDK),
'%(darwin)s/include/darwin' %(JDK)],
'ipod': ['%(ipod)s/darwin/default' %(JDK)],
'linux2': ['%(linux2)s/include' %(JDK),
'%(linux2)s/include/linux' %(JDK)],
'sunos5': ['%(sunos5)s/include' %(JDK),
'%(sunos5)s/include/solaris' %(JDK)],
'win32': ['%(win32)s/include' %(JDK),
'%(win32)s/include/win32' %(JDK)],
'mingw32': ['%(mingw32)s/include' %(JDK),
'%(mingw32)s/include/win32' %(JDK)],
'freebsd7': ['%(freebsd7)s/include' %(JDK),
'%(freebsd7)s/include/freebsd' %(JDK)],
}
CFLAGS = {
'darwin': ['-fno-strict-aliasing', '-Wno-write-strings'],
'ipod': ['-Wno-write-strings'],
'linux2': ['-fno-strict-aliasing', '-Wno-write-strings'],
'sunos5': ['-features=iddollar',
'-erroff=badargtypel2w,wbadinitl,wvarhidemem'],
'win32': ["/EHsc", "/D_CRT_SECURE_NO_WARNINGS"], # MSVC 9 (2008)
'mingw32': ['-fno-strict-aliasing', '-Wno-write-strings'],
'freebsd7': ['-fno-strict-aliasing', '-Wno-write-strings'],
}
# added to CFLAGS when JCC is invoked with --debug
DEBUG_CFLAGS = {
'darwin': ['-O0', '-g', '-DDEBUG'],
'ipod': ['-O0', '-g', '-DDEBUG'],
'linux2': ['-O0', '-g', '-DDEBUG'],
'sunos5': ['-DDEBUG'],
'win32': ['/Od', '/DDEBUG'],
'mingw32': ['-O0', '-g', '-DDEBUG'],
'freebsd7': ['-O0', '-g', '-DDEBUG'],
}
LFLAGS = {
'darwin/frameworks': ['-framework', 'JavaVM'],
'darwin/home': ['-L%(darwin)s/jre/lib' %(JDK), '-ljava',
'-L%(darwin)s/jre/lib/server' %(JDK), '-ljvm',
'-Wl,-rpath', '-Wl,%(darwin)s/jre/lib' %(JDK),
'-Wl,-rpath', '-Wl,%(darwin)s/jre/lib/server' %(JDK)],
'ipod': ['-ljvm', '-lpython%s.%s' %(sys.version_info[0:2]),
'-L/usr/lib/gcc/arm-apple-darwin9/4.0.1'],
'linux2/i386': ['-L%(linux2)s/jre/lib/i386' %(JDK), '-ljava',
'-L%(linux2)s/jre/lib/i386/client' %(JDK), '-ljvm',
'-Wl,-rpath=%(linux2)s/jre/lib/i386:%(linux2)s/jre/lib/i386/client' %(JDK)],
'linux2/i686': ['-L%(linux2)s/jre/lib/i386' %(JDK), '-ljava',
'-L%(linux2)s/jre/lib/i386/client' %(JDK), '-ljvm',
'-Wl,-rpath=%(linux2)s/jre/lib/i386:%(linux2)s/jre/lib/i386/client' %(JDK)],
'linux2/x86_64': ['-L%(linux2)s/jre/lib/amd64' %(JDK), '-ljava',
'-L%(linux2)s/jre/lib/amd64/server' %(JDK), '-ljvm',
'-Wl,-rpath=%(linux2)s/jre/lib/amd64:%(linux2)s/jre/lib/amd64/server' %(JDK)],
'sunos5': ['-L%(sunos5)s/jre/lib/i386' %(JDK), '-ljava',
'-L%(sunos5)s/jre/lib/i386/client' %(JDK), '-ljvm',
'-R%(sunos5)s/jre/lib/i386:%(sunos5)s/jre/lib/i386/client' %(JDK)],
'win32': ['/LIBPATH:%(win32)s/lib' %(JDK), 'Ws2_32.lib', 'jvm.lib'],
'mingw32': ['-L%(mingw32)s/lib' %(JDK), '-ljvm'],
'freebsd7': ['-L%(freebsd7)s/jre/lib/i386' %(JDK), '-ljava', '-lverify',
'-L%(freebsd7)s/jre/lib/i386/client' %(JDK), '-ljvm',
'-Wl,-rpath=%(freebsd7)s/jre/lib/i386:%(freebsd7)s/jre/lib/i386/client' %(JDK)],
}
IMPLIB_LFLAGS = {
'win32': ["/IMPLIB:%s"],
'mingw32': ["-Wl,--out-implib,%s"]
}
if platform == 'linux2':
LFLAGS['linux2'] = LFLAGS['linux2/%s' %(machine)]
elif platform == 'darwin':
if JAVAHOME is not None:
INCLUDES['darwin'] = INCLUDES['darwin/home']
LFLAGS['darwin'] = LFLAGS['darwin/home']
elif JAVAFRAMEWORKS is not None:
INCLUDES['darwin'] = INCLUDES['darwin/frameworks']
LFLAGS['darwin'] = LFLAGS['darwin/frameworks']
JAVAC = {
'darwin': ['javac', '-source', '1.5', '-target', '1.5'],
'ipod': ['jikes', '-cp', '/usr/share/classpath/glibj.zip'],
'linux2': ['javac'],
'sunos5': ['javac'],
'win32': ['%(win32)s/bin/javac.exe' %(JDK)],
'mingw32': ['%(mingw32)s/bin/javac.exe' %(JDK)],
'freebsd7': ['javac'],
}
JAVADOC = {
'darwin': ['javadoc'],
'ipod': [],
'linux2': ['javadoc'],
'sunos5': ['javadoc'],
'win32': ['%(win32)s/bin/javadoc.exe' %(JDK)],
'mingw32': ['%(mingw32)s/bin/javadoc.exe' %(JDK)],
'freebsd7': ['javadoc'],
}
try:
if 'USE_DISTUTILS' in os.environ:
raise ImportError
from setuptools import setup, Extension
from pkg_resources import require
with_setuptools = require('setuptools')[0].parsed_version
enable_shared = False
with_setuptools_c7 = ('00000000', '00000006', '*c', '00000007', '*final')
with_setuptools_116 = ('00000001', '00000001', '00000006', '*final')
if with_setuptools >= with_setuptools_c7 and 'NO_SHARED' not in os.environ:
if platform in ('ipod', 'win32'):
enable_shared = True
elif platform == 'darwin':
enable_shared = True
if with_setuptools >= with_setuptools_116:
# fix Library building by monkey-patching expected _config_vars
# into build_ext otherwise build_ext is using sysconfig's
# instead, wrongly
from setuptools.command import build_ext
from distutils.sysconfig import get_config_var
get_config_var("LDSHARED") # ensure _config_vars is initialized
from distutils.sysconfig import _config_vars
build_ext._CONFIG_VARS = _config_vars
elif platform == 'linux2':
from helpers.linux import patch_setuptools
enable_shared = patch_setuptools(with_setuptools)
elif platform == 'mingw32':
enable_shared = True
# need to monkeypatch the CygwinCCompiler class to generate
# jcc.lib in the correct place
from helpers.mingw32 import JCCMinGW32CCompiler
import distutils.cygwinccompiler
distutils.cygwinccompiler.Mingw32CCompiler = JCCMinGW32CCompiler
if enable_shared:
if with_setuptools >= with_setuptools_116:
from setuptools.extension import Library
else:
from setuptools import Library
except ImportError:
if sys.version_info < (2, 4):
raise ImportError, 'setuptools is required when using Python 2.3'
else:
from distutils.core import setup, Extension
with_setuptools = None
enable_shared = False
def main(debug):
_jcc_argsep = os.environ.get('JCC_ARGSEP', os.pathsep)
if 'JCC_INCLUDES' in os.environ:
_includes = os.environ['JCC_INCLUDES'].split(_jcc_argsep)
else:
_includes = INCLUDES[platform]
if 'JCC_CFLAGS' in os.environ:
_cflags = os.environ['JCC_CFLAGS'].split(_jcc_argsep)
else:
_cflags = CFLAGS[platform]
if 'JCC_DEBUG_CFLAGS' in os.environ:
_debug_cflags = os.environ['JCC_DEBUG_CFLAGS'].split(_jcc_argsep)
else:
_debug_cflags = DEBUG_CFLAGS[platform]
if 'JCC_LFLAGS' in os.environ:
_lflags = os.environ['JCC_LFLAGS'].split(_jcc_argsep)
else:
_lflags = LFLAGS[platform]
if 'JCC_IMPLIB_LFLAGS' in os.environ:
_implib_lflags = os.environ['JCC_IMPLIB_LFLAGS'].split(_jcc_argsep)
else:
_implib_lflags = IMPLIB_LFLAGS.get(platform, [])
if 'JCC_JAVAC' in os.environ:
_javac = os.environ['JCC_JAVAC'].split(_jcc_argsep)
else:
_javac = JAVAC[platform]
if 'JCC_JAVADOC' in os.environ:
_javadoc = os.environ['JCC_JAVADOC'].split(_jcc_argsep)
else:
_javadoc = JAVADOC[platform]
from helpers.build import jcc_build_py
jcc_build_py.config_file = \
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'jcc', 'config.py')
jcc_build_py.config_text = \
'\n'.join(['',
'INCLUDES=%s' %(_includes),
'CFLAGS=%s' %(_cflags),
'DEBUG_CFLAGS=%s' %(_debug_cflags),
'LFLAGS=%s' %(_lflags),
'IMPLIB_LFLAGS=%s' %(_implib_lflags),
'SHARED=%s' %(enable_shared),
'VERSION="%s"' %(jcc_ver),
''])
extensions = []
boot = '_jcc'
cflags = ['-DPYTHON'] + _cflags
if debug:
cflags += _debug_cflags
includes = _includes + [boot, 'jcc/sources']
lflags = _lflags
if not debug:
if platform == 'win32':
pass
elif platform == 'sunos5':
lflags += ['-Wl,-s']
else:
lflags += ['-Wl,-S']
sources = ['jcc/sources/jcc.cpp',
'jcc/sources/JCCEnv.cpp',
'jcc/sources/JObject.cpp',
'jcc/sources/JArray.cpp',
'jcc/sources/functions.cpp',
'jcc/sources/types.cpp']
for path, dirs, names in os.walk(boot):
for name in names:
if name.endswith('.cpp'):
sources.append(os.path.join(path, name))
package_data = ['sources/*.cpp', 'sources/*.h', 'patches/patch.*']
if with_setuptools and enable_shared:
from subprocess import Popen, PIPE
kwds = { "extra_compile_args": cflags,
"include_dirs": includes,
"define_macros": [('_jcc_lib', None),
('JCC_VER', '"%s"' %(jcc_ver))],
"sources": sources[0:2] }
if platform in ('darwin', 'ipod'):
kwds["extra_link_args"] = \
lflags + ['-install_name', '@rpath/libjcc.dylib',
'-current_version', jcc_ver,
'-compatibility_version', jcc_ver]
elif platform == 'linux2':
kwds["extra_link_args"] = \
lflags + ['-lpython%s.%s' %(sys.version_info[0:2])]
kwds["force_shared"] = True # requires jcc/patches/patch.43
elif platform in IMPLIB_LFLAGS:
jcclib = 'jcc%s.lib' %(debug and '_d' or '')
implib_flags = ' '.join(IMPLIB_LFLAGS[platform])
kwds["extra_link_args"] = \
lflags + [implib_flags %(os.path.join('jcc', jcclib))]
package_data.append(jcclib)
else:
kwds["extra_link_args"] = lflags
extensions.append(Library('jcc', **kwds))
args = _javac[:]
args.extend(('-d', 'jcc/classes'))
args.append('java/org/apache/jcc/PythonVM.java')
args.append('java/org/apache/jcc/PythonException.java')
if not os.path.exists('jcc/classes'):
os.makedirs('jcc/classes')
try:
process = Popen(args, stderr=PIPE)
except Exception, e:
raise type(e), "%s: %s" %(e, args)
process.wait()
if process.returncode != 0:
raise OSError, process.stderr.read()
package_data.append('classes/org/apache/jcc/PythonVM.class')
package_data.append('classes/org/apache/jcc/PythonException.class')
args = _javadoc[:]
args.extend(('-d', 'javadoc', '-sourcepath', 'java', 'org.apache.jcc'))
try:
process = Popen(args, stderr=PIPE)
except Exception, e:
raise type(e), "%s: %s" %(e, args)
process.wait()
if process.returncode != 0:
raise OSError, process.stderr.read()
extensions.append(Extension('jcc._jcc',
extra_compile_args=cflags,
extra_link_args=lflags,
include_dirs=includes,
define_macros=[('_java_generics', None),
('JCC_VER', '"%s"' %(jcc_ver))],
sources=sources))
args = {
'name': 'JCC',
'version': jcc_ver,
'description': 'a C++ code generator for calling Java from C++/Python',
'long_description': open('DESCRIPTION').read(),
'author': 'Andi Vajda',
'author_email': '[email protected]',
'classifiers': ['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: C++',
'Programming Language :: Java',
'Programming Language :: Python',
'Topic :: Software Development :: Code Generators',
'Topic :: Software Development :: Libraries :: Java Libraries'],
'packages': ['jcc'],
'package_dir': {'jcc': 'jcc'},
'package_data': {'jcc': package_data},
'ext_modules': extensions,
"cmdclass": {"build_py": jcc_build_py},
}
if with_setuptools:
args['zip_safe'] = False
setup(**args)
if __name__ == "__main__":
main('--debug' in sys.argv)
|
|
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module implements a string formatter based on the standard PEP
292 string.Template class extended with function calls. Variables, as
with string.Template, are indicated with $ and functions are delimited
with %.
This module assumes that everything is Unicode: the template and the
substitution values. Bytestrings are not supported. Also, the templates
always behave like the ``safe_substitute`` method in the standard
library: unknown symbols are left intact.
This is sort of like a tiny, horrible degeneration of a real templating
engine like Jinja2 or Mustache.
"""
from __future__ import print_function
import re
import ast
import dis
import types
SYMBOL_DELIM = u'$'
FUNC_DELIM = u'%'
GROUP_OPEN = u'{'
GROUP_CLOSE = u'}'
ARG_SEP = u','
ESCAPE_CHAR = u'$'
VARIABLE_PREFIX = '__var_'
FUNCTION_PREFIX = '__func_'
class Environment(object):
"""Contains the values and functions to be substituted into a
template.
"""
def __init__(self, values, functions):
self.values = values
self.functions = functions
# Code generation helpers.
def ex_lvalue(name):
"""A variable load expression."""
return ast.Name(name, ast.Store())
def ex_rvalue(name):
"""A variable store expression."""
return ast.Name(name, ast.Load())
def ex_literal(val):
"""An int, float, long, bool, string, or None literal with the given
value.
"""
if val is None:
return ast.Name('None', ast.Load())
elif isinstance(val, (int, float, long)):
return ast.Num(val)
elif isinstance(val, bool):
return ast.Name(str(val), ast.Load())
elif isinstance(val, basestring):
return ast.Str(val)
raise TypeError('no literal for {0}'.format(type(val)))
def ex_varassign(name, expr):
"""Assign an expression into a single variable. The expression may
either be an `ast.expr` object or a value to be used as a literal.
"""
if not isinstance(expr, ast.expr):
expr = ex_literal(expr)
return ast.Assign([ex_lvalue(name)], expr)
def ex_call(func, args):
"""A function-call expression with only positional parameters. The
function may be an expression or the name of a function. Each
argument may be an expression or a value to be used as a literal.
"""
if isinstance(func, basestring):
func = ex_rvalue(func)
args = list(args)
for i in range(len(args)):
if not isinstance(args[i], ast.expr):
args[i] = ex_literal(args[i])
return ast.Call(func, args, [], None, None)
def compile_func(arg_names, statements, name='_the_func', debug=False):
"""Compile a list of statements as the body of a function and return
the resulting Python function. If `debug`, then print out the
bytecode of the compiled function.
"""
func_def = ast.FunctionDef(
name,
ast.arguments(
[ast.Name(n, ast.Param()) for n in arg_names],
None, None,
[ex_literal(None) for _ in arg_names],
),
statements,
[],
)
mod = ast.Module([func_def])
ast.fix_missing_locations(mod)
prog = compile(mod, '<generated>', 'exec')
# Debug: show bytecode.
if debug:
dis.dis(prog)
for const in prog.co_consts:
if isinstance(const, types.CodeType):
dis.dis(const)
the_locals = {}
exec prog in {}, the_locals
return the_locals[name]
# AST nodes for the template language.
class Symbol(object):
"""A variable-substitution symbol in a template."""
def __init__(self, ident, original):
self.ident = ident
self.original = original
def __repr__(self):
return u'Symbol(%s)' % repr(self.ident)
def evaluate(self, env):
"""Evaluate the symbol in the environment, returning a Unicode
string.
"""
if self.ident in env.values:
# Substitute for a value.
return env.values[self.ident]
else:
# Keep original text.
return self.original
def translate(self):
"""Compile the variable lookup."""
expr = ex_rvalue(VARIABLE_PREFIX + self.ident.encode('utf8'))
return [expr], set([self.ident.encode('utf8')]), set()
class Call(object):
"""A function call in a template."""
def __init__(self, ident, args, original):
self.ident = ident
self.args = args
self.original = original
def __repr__(self):
return u'Call(%s, %s, %s)' % (repr(self.ident), repr(self.args),
repr(self.original))
def evaluate(self, env):
"""Evaluate the function call in the environment, returning a
Unicode string.
"""
if self.ident in env.functions:
arg_vals = [expr.evaluate(env) for expr in self.args]
try:
out = env.functions[self.ident](*arg_vals)
except Exception as exc:
# Function raised exception! Maybe inlining the name of
# the exception will help debug.
return u'<%s>' % unicode(exc)
return unicode(out)
else:
return self.original
def translate(self):
"""Compile the function call."""
varnames = set()
funcnames = set([self.ident.encode('utf8')])
arg_exprs = []
for arg in self.args:
subexprs, subvars, subfuncs = arg.translate()
varnames.update(subvars)
funcnames.update(subfuncs)
# Create a subexpression that joins the result components of
# the arguments.
arg_exprs.append(ex_call(
ast.Attribute(ex_literal(u''), 'join', ast.Load()),
[ex_call(
'map',
[
ex_rvalue('unicode'),
ast.List(subexprs, ast.Load()),
]
)],
))
subexpr_call = ex_call(
FUNCTION_PREFIX + self.ident.encode('utf8'),
arg_exprs
)
return [subexpr_call], varnames, funcnames
class Expression(object):
"""Top-level template construct: contains a list of text blobs,
Symbols, and Calls.
"""
def __init__(self, parts):
self.parts = parts
def __repr__(self):
return u'Expression(%s)' % (repr(self.parts))
def evaluate(self, env):
"""Evaluate the entire expression in the environment, returning
a Unicode string.
"""
out = []
for part in self.parts:
if isinstance(part, basestring):
out.append(part)
else:
out.append(part.evaluate(env))
return u''.join(map(unicode, out))
def translate(self):
"""Compile the expression to a list of Python AST expressions, a
set of variable names used, and a set of function names.
"""
expressions = []
varnames = set()
funcnames = set()
for part in self.parts:
if isinstance(part, basestring):
expressions.append(ex_literal(part))
else:
e, v, f = part.translate()
expressions.extend(e)
varnames.update(v)
funcnames.update(f)
return expressions, varnames, funcnames
# Parser.
class ParseError(Exception):
pass
class Parser(object):
"""Parses a template expression string. Instantiate the class with
the template source and call ``parse_expression``. The ``pos`` field
will indicate the character after the expression finished and
``parts`` will contain a list of Unicode strings, Symbols, and Calls
reflecting the concatenated portions of the expression.
This is a terrible, ad-hoc parser implementation based on a
left-to-right scan with no lexing step to speak of; it's probably
both inefficient and incorrect. Maybe this should eventually be
replaced with a real, accepted parsing technique (PEG, parser
generator, etc.).
"""
def __init__(self, string):
self.string = string
self.pos = 0
self.parts = []
# Common parsing resources.
special_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_OPEN, GROUP_CLOSE,
ARG_SEP, ESCAPE_CHAR)
special_char_re = re.compile(ur'[%s]|$' %
u''.join(re.escape(c) for c in special_chars))
def parse_expression(self):
"""Parse a template expression starting at ``pos``. Resulting
components (Unicode strings, Symbols, and Calls) are added to
the ``parts`` field, a list. The ``pos`` field is updated to be
the next character after the expression.
"""
text_parts = []
while self.pos < len(self.string):
char = self.string[self.pos]
if char not in self.special_chars:
# A non-special character. Skip to the next special
# character, treating the interstice as literal text.
next_pos = (
self.special_char_re.search(self.string[self.pos:]).start()
+ self.pos
)
text_parts.append(self.string[self.pos:next_pos])
self.pos = next_pos
continue
if self.pos == len(self.string) - 1:
# The last character can never begin a structure, so we
# just interpret it as a literal character (unless it
# terminates the expression, as with , and }).
if char not in (GROUP_CLOSE, ARG_SEP):
text_parts.append(char)
self.pos += 1
break
next_char = self.string[self.pos + 1]
if char == ESCAPE_CHAR and next_char in \
(SYMBOL_DELIM, FUNC_DELIM, GROUP_CLOSE, ARG_SEP):
# An escaped special character ($$, $}, etc.). Note that
# ${ is not an escape sequence: this is ambiguous with
# the start of a symbol and it's not necessary (just
# using { suffices in all cases).
text_parts.append(next_char)
self.pos += 2 # Skip the next character.
continue
# Shift all characters collected so far into a single string.
if text_parts:
self.parts.append(u''.join(text_parts))
text_parts = []
if char == SYMBOL_DELIM:
# Parse a symbol.
self.parse_symbol()
elif char == FUNC_DELIM:
# Parse a function call.
self.parse_call()
elif char in (GROUP_CLOSE, ARG_SEP):
# Template terminated.
break
elif char == GROUP_OPEN:
# Start of a group has no meaning hear; just pass
# through the character.
text_parts.append(char)
self.pos += 1
else:
assert False
# If any parsed characters remain, shift them into a string.
if text_parts:
self.parts.append(u''.join(text_parts))
def parse_symbol(self):
"""Parse a variable reference (like ``$foo`` or ``${foo}``)
starting at ``pos``. Possibly appends a Symbol object (or,
failing that, text) to the ``parts`` field and updates ``pos``.
The character at ``pos`` must, as a precondition, be ``$``.
"""
assert self.pos < len(self.string)
assert self.string[self.pos] == SYMBOL_DELIM
if self.pos == len(self.string) - 1:
# Last character.
self.parts.append(SYMBOL_DELIM)
self.pos += 1
return
next_char = self.string[self.pos + 1]
start_pos = self.pos
self.pos += 1
if next_char == GROUP_OPEN:
# A symbol like ${this}.
self.pos += 1 # Skip opening.
closer = self.string.find(GROUP_CLOSE, self.pos)
if closer == -1 or closer == self.pos:
# No closing brace found or identifier is empty.
self.parts.append(self.string[start_pos:self.pos])
else:
# Closer found.
ident = self.string[self.pos:closer]
self.pos = closer + 1
self.parts.append(Symbol(ident,
self.string[start_pos:self.pos]))
else:
# A bare-word symbol.
ident = self._parse_ident()
if ident:
# Found a real symbol.
self.parts.append(Symbol(ident,
self.string[start_pos:self.pos]))
else:
# A standalone $.
self.parts.append(SYMBOL_DELIM)
def parse_call(self):
"""Parse a function call (like ``%foo{bar,baz}``) starting at
``pos``. Possibly appends a Call object to ``parts`` and update
``pos``. The character at ``pos`` must be ``%``.
"""
assert self.pos < len(self.string)
assert self.string[self.pos] == FUNC_DELIM
start_pos = self.pos
self.pos += 1
ident = self._parse_ident()
if not ident:
# No function name.
self.parts.append(FUNC_DELIM)
return
if self.pos >= len(self.string):
# Identifier terminates string.
self.parts.append(self.string[start_pos:self.pos])
return
if self.string[self.pos] != GROUP_OPEN:
# Argument list not opened.
self.parts.append(self.string[start_pos:self.pos])
return
# Skip past opening brace and try to parse an argument list.
self.pos += 1
args = self.parse_argument_list()
if self.pos >= len(self.string) or \
self.string[self.pos] != GROUP_CLOSE:
# Arguments unclosed.
self.parts.append(self.string[start_pos:self.pos])
return
self.pos += 1 # Move past closing brace.
self.parts.append(Call(ident, args, self.string[start_pos:self.pos]))
def parse_argument_list(self):
"""Parse a list of arguments starting at ``pos``, returning a
list of Expression objects. Does not modify ``parts``. Should
leave ``pos`` pointing to a } character or the end of the
string.
"""
# Try to parse a subexpression in a subparser.
expressions = []
while self.pos < len(self.string):
subparser = Parser(self.string[self.pos:])
subparser.parse_expression()
# Extract and advance past the parsed expression.
expressions.append(Expression(subparser.parts))
self.pos += subparser.pos
if self.pos >= len(self.string) or \
self.string[self.pos] == GROUP_CLOSE:
# Argument list terminated by EOF or closing brace.
break
# Only other way to terminate an expression is with ,.
# Continue to the next argument.
assert self.string[self.pos] == ARG_SEP
self.pos += 1
return expressions
def _parse_ident(self):
"""Parse an identifier and return it (possibly an empty string).
Updates ``pos``.
"""
remainder = self.string[self.pos:]
ident = re.match(ur'\w*', remainder).group(0)
self.pos += len(ident)
return ident
def _parse(template):
"""Parse a top-level template string Expression. Any extraneous text
is considered literal text.
"""
parser = Parser(template)
parser.parse_expression()
parts = parser.parts
remainder = parser.string[parser.pos:]
if remainder:
parts.append(remainder)
return Expression(parts)
# External interface.
class Template(object):
"""A string template, including text, Symbols, and Calls.
"""
def __init__(self, template):
self.expr = _parse(template)
self.original = template
self.compiled = self.translate()
def __eq__(self, other):
return self.original == other.original
def interpret(self, values={}, functions={}):
"""Like `substitute`, but forces the interpreter (rather than
the compiled version) to be used. The interpreter includes
exception-handling code for missing variables and buggy template
functions but is much slower.
"""
return self.expr.evaluate(Environment(values, functions))
def substitute(self, values={}, functions={}):
"""Evaluate the template given the values and functions.
"""
try:
res = self.compiled(values, functions)
except: # Handle any exceptions thrown by compiled version.
res = self.interpret(values, functions)
return res
def translate(self):
"""Compile the template to a Python function."""
expressions, varnames, funcnames = self.expr.translate()
argnames = []
for varname in varnames:
argnames.append(VARIABLE_PREFIX.encode('utf8') + varname)
for funcname in funcnames:
argnames.append(FUNCTION_PREFIX.encode('utf8') + funcname)
func = compile_func(
argnames,
[ast.Return(ast.List(expressions, ast.Load()))],
)
def wrapper_func(values={}, functions={}):
args = {}
for varname in varnames:
args[VARIABLE_PREFIX + varname] = values[varname]
for funcname in funcnames:
args[FUNCTION_PREFIX + funcname] = functions[funcname]
parts = func(**args)
return u''.join(parts)
return wrapper_func
# Performance tests.
if __name__ == '__main__':
import timeit
_tmpl = Template(u'foo $bar %baz{foozle $bar barzle} $bar')
_vars = {'bar': 'qux'}
_funcs = {'baz': unicode.upper}
interp_time = timeit.timeit('_tmpl.interpret(_vars, _funcs)',
'from __main__ import _tmpl, _vars, _funcs',
number=10000)
print(interp_time)
comp_time = timeit.timeit('_tmpl.substitute(_vars, _funcs)',
'from __main__ import _tmpl, _vars, _funcs',
number=10000)
print(comp_time)
print('Speedup:', interp_time / comp_time)
|
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2013 Rackspace Hosting All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a SQLAlchemy storage backend."""
from __future__ import absolute_import
import contextlib
import copy
import logging
import time
import sqlalchemy as sa
from sqlalchemy import exceptions as sa_exc
from sqlalchemy import orm as sa_orm
from sqlalchemy import pool as sa_pool
from taskflow import exceptions as exc
from taskflow.persistence.backends import base
from taskflow.persistence.backends.sqlalchemy import migration
from taskflow.persistence.backends.sqlalchemy import models
from taskflow.persistence import logbook
from taskflow.utils import misc
LOG = logging.getLogger(__name__)
# NOTE(harlowja): This is all very similar to what oslo-incubator uses but is
# not based on using oslo.cfg and its global configuration (which should not be
# used in libraries such as taskflow).
#
# TODO(harlowja): once oslo.db appears we should be able to use that instead
# since it's not supposed to have any usage of oslo.cfg in it when it
# materializes as a library.
# See: http://dev.mysql.com/doc/refman/5.0/en/error-messages-client.html
MY_SQL_CONN_ERRORS = (
# Lost connection to MySQL server at '%s', system error: %d
'2006',
# Can't connect to MySQL server on '%s' (%d)
'2003',
# Can't connect to local MySQL server through socket '%s' (%d)
'2002',
)
MY_SQL_GONE_WAY_AWAY_ERRORS = (
# Lost connection to MySQL server at '%s', system error: %d
'2006',
# Lost connection to MySQL server during query
'2013',
# Commands out of sync; you can't run this command now
'2014',
# Can't open shared memory; no answer from server (%lu)
'2045',
# Lost connection to MySQL server at '%s', system error: %d
'2055',
)
# See: http://www.postgresql.org/docs/9.1/static/errcodes-appendix.html
POSTGRES_CONN_ERRORS = (
# connection_exception
'08000',
# connection_does_not_exist
'08003',
# connection_failure
'08006',
# sqlclient_unable_to_establish_sqlconnection
'08001',
# sqlserver_rejected_establishment_of_sqlconnection
'08004',
# Just couldn't connect (postgres errors are pretty weird)
'could not connect to server',
)
POSTGRES_GONE_WAY_AWAY_ERRORS = (
# Server terminated while in progress (postgres errors are pretty weird)
'server closed the connection unexpectedly',
'terminating connection due to administrator command',
)
# These connection urls mean sqlite is being used as an in-memory DB
SQLITE_IN_MEMORY = ("sqlite://", 'sqlite:///:memory:')
def _in_any(reason, err_haystack):
"""Checks if any elements of the haystack are in the given reason"""
for err in err_haystack:
if reason.find(str(err)) != -1:
return True
return False
def _is_db_connection_error(reason):
return _in_any(reason, list(MY_SQL_CONN_ERRORS + POSTGRES_CONN_ERRORS))
def _thread_yield(dbapi_con, con_record):
"""Ensure other greenthreads get a chance to be executed.
If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will
execute instead of time.sleep(0).
Force a context switch. With common database backends (eg MySQLdb and
sqlite), there is no implicit yield caused by network I/O since they are
implemented by C libraries that eventlet cannot monkey patch.
"""
time.sleep(0)
def _ping_listener(dbapi_conn, connection_rec, connection_proxy):
"""Ensures that MySQL connections checked out of the pool are alive.
Modified + borrowed from: http://bit.ly/14BYaW6
"""
try:
dbapi_conn.cursor().execute('select 1')
except dbapi_conn.OperationalError as ex:
if _in_any(str(ex.args[0]), MY_SQL_GONE_WAY_AWAY_ERRORS):
LOG.warn('Got mysql server has gone away: %s', ex)
raise sa_exc.DisconnectionError("Database server went away")
elif _in_any(str(ex.args[0]), POSTGRES_GONE_WAY_AWAY_ERRORS):
LOG.warn('Got postgres server has gone away: %s', ex)
raise sa_exc.DisconnectionError("Database server went away")
else:
raise
class SQLAlchemyBackend(base.Backend):
def __init__(self, conf):
super(SQLAlchemyBackend, self).__init__(conf)
self._engine = None
self._session_maker = None
def _test_connected(self, engine, max_retries=0):
def test_connect(failures):
try:
# See if we can make a connection happen.
#
# NOTE(harlowja): note that even though we are connecting
# once it does not mean that we will be able to connect in
# the future, so this is more of a sanity test and is not
# complete connection insurance.
with contextlib.closing(engine.connect()):
pass
except sa_exc.OperationalError as ex:
if _is_db_connection_error(str(ex.args[0])):
failures.append(misc.Failure())
return False
return True
failures = []
if test_connect(failures):
return engine
# Sorry it didn't work out...
if max_retries <= 0:
failures[-1].reraise()
# Go through the exponential backoff loop and see if we can connect
# after a given number of backoffs (with a backoff sleeping period
# between each attempt)...
attempts_left = max_retries
for sleepy_secs in misc.ExponentialBackoff(attempts=max_retries):
LOG.warn("SQL connection failed due to '%s', %s attempts left.",
failures[-1].exc, attempts_left)
LOG.info("Attempting to test the connection again in %s seconds.",
sleepy_secs)
time.sleep(sleepy_secs)
if test_connect(failures):
return engine
attempts_left -= 1
# Sorry it didn't work out...
failures[-1].reraise()
def _create_engine(self):
# NOTE(harlowja): copy the internal one so that we don't modify it via
# all the popping that will happen below.
conf = copy.deepcopy(self._conf)
engine_args = {
'echo': misc.as_bool(conf.pop('echo', False)),
'convert_unicode': misc.as_bool(conf.pop('convert_unicode', True)),
'pool_recycle': 3600,
}
try:
idle_timeout = misc.as_int(conf.pop('idle_timeout', None))
engine_args['pool_recycle'] = idle_timeout
except TypeError:
pass
sql_connection = conf.pop('connection')
e_url = sa.engine.url.make_url(sql_connection)
if 'sqlite' in e_url.drivername:
engine_args["poolclass"] = sa_pool.NullPool
# Adjustments for in-memory sqlite usage
if sql_connection.lower().strip() in SQLITE_IN_MEMORY:
engine_args["poolclass"] = sa_pool.StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
else:
for (k, lookup_key) in [('pool_size', 'max_pool_size'),
('max_overflow', 'max_overflow'),
('pool_timeout', 'pool_timeout')]:
try:
engine_args[k] = misc.as_int(conf.pop(lookup_key, None))
except TypeError:
pass
# If the configuration dict specifies any additional engine args
# or engine arg overrides make sure we merge them in.
engine_args.update(conf.pop('engine_args', {}))
engine = sa.create_engine(sql_connection, **engine_args)
if misc.as_bool(conf.pop('checkin_yield', True)):
sa.event.listen(engine, 'checkin', _thread_yield)
if 'mysql' in e_url.drivername:
if misc.as_bool(conf.pop('checkout_ping', True)):
sa.event.listen(engine, 'checkout', _ping_listener)
try:
max_retries = misc.as_int(conf.pop('max_retries', None))
except TypeError:
max_retries = 0
return self._test_connected(engine, max_retries=max_retries)
@property
def engine(self):
if self._engine is None:
self._engine = self._create_engine()
return self._engine
def _get_session_maker(self):
if self._session_maker is None:
self._session_maker = sa_orm.sessionmaker(bind=self.engine,
autocommit=True)
return self._session_maker
def get_connection(self):
return Connection(self, self._get_session_maker())
def close(self):
if self._session_maker is not None:
self._session_maker.close_all()
if self._engine is not None:
self._engine.dispose()
self._engine = None
self._session_maker = None
class Connection(base.Connection):
def __init__(self, backend, session_maker):
self._backend = backend
self._session_maker = session_maker
self._engine = backend.engine
@property
def backend(self):
return self._backend
def _run_in_session(self, functor, *args, **kwargs):
"""Runs a function in a session and makes sure that sqlalchemy
exceptions aren't emitted from that sessions actions (as that would
expose the underlying backends exception model).
"""
try:
session = self._make_session()
with session.begin():
return functor(session, *args, **kwargs)
except sa_exc.SQLAlchemyError as e:
raise exc.StorageError("Failed running database session: %s" % e,
e)
def _make_session(self):
try:
return self._session_maker()
except sa_exc.SQLAlchemyError as e:
raise exc.StorageError("Failed creating database session: %s"
% e, e)
def upgrade(self):
try:
with contextlib.closing(self._engine.connect()) as conn:
migration.db_sync(conn)
except sa_exc.SQLAlchemyError as e:
raise exc.StorageError("Failed upgrading database version: %s" % e,
e)
def _clear_all(self, session):
# NOTE(harlowja): due to how we have our relationship setup and
# cascading deletes are enabled, this will cause all associated
# task details and flow details to automatically be purged.
try:
return session.query(models.LogBook).delete()
except sa_exc.DBAPIError as e:
raise exc.StorageError("Failed clearing all entries: %s" % e, e)
def clear_all(self):
return self._run_in_session(self._clear_all)
def _update_task_details(self, session, td):
# Must already exist since a tasks details has a strong connection to
# a flow details, and tasks details can not be saved on there own since
# they *must* have a connection to an existing flow details.
td_m = _task_details_get_model(td.uuid, session=session)
td_m = _taskdetails_merge(td_m, td)
td_m = session.merge(td_m)
return _convert_td_to_external(td_m)
def update_task_details(self, task_detail):
return self._run_in_session(self._update_task_details, td=task_detail)
def _update_flow_details(self, session, fd):
# Must already exist since a flow details has a strong connection to
# a logbook, and flow details can not be saved on there own since they
# *must* have a connection to an existing logbook.
fd_m = _flow_details_get_model(fd.uuid, session=session)
fd_m = _flowdetails_merge(fd_m, fd)
fd_m = session.merge(fd_m)
return _convert_fd_to_external(fd_m)
def update_flow_details(self, flow_detail):
return self._run_in_session(self._update_flow_details, fd=flow_detail)
def _destroy_logbook(self, session, lb_id):
try:
lb = _logbook_get_model(lb_id, session=session)
session.delete(lb)
except sa_exc.DBAPIError as e:
raise exc.StorageError("Failed destroying"
" logbook %s: %s" % (lb_id, e), e)
def destroy_logbook(self, book_uuid):
return self._run_in_session(self._destroy_logbook, lb_id=book_uuid)
def _save_logbook(self, session, lb):
try:
lb_m = _logbook_get_model(lb.uuid, session=session)
# NOTE(harlowja): Merge them (note that this doesn't provide
# 100% correct update semantics due to how databases have
# MVCC). This is where a stored procedure or a better backing
# store would handle this better by allowing this merge logic
# to exist in the database itself.
lb_m = _logbook_merge(lb_m, lb)
except exc.NotFound:
lb_m = _convert_lb_to_internal(lb)
try:
lb_m = session.merge(lb_m)
return _convert_lb_to_external(lb_m)
except sa_exc.DBAPIError as e:
raise exc.StorageError("Failed saving logbook %s: %s" %
(lb.uuid, e), e)
def save_logbook(self, book):
return self._run_in_session(self._save_logbook, lb=book)
def get_logbook(self, book_uuid):
session = self._make_session()
try:
lb = _logbook_get_model(book_uuid, session=session)
return _convert_lb_to_external(lb)
except sa_exc.DBAPIError as e:
raise exc.StorageError("Failed getting logbook %s: %s"
% (book_uuid, e), e)
def get_logbooks(self):
session = self._make_session()
try:
raw_books = session.query(models.LogBook).all()
books = [_convert_lb_to_external(lb) for lb in raw_books]
except sa_exc.DBAPIError as e:
raise exc.StorageError("Failed getting logbooks: %s" % e, e)
for lb in books:
yield lb
def close(self):
pass
###
# Internal <-> external model + merging + other helper functions.
###
def _convert_fd_to_external(fd):
fd_c = logbook.FlowDetail(fd.name, uuid=fd.uuid)
fd_c.meta = fd.meta
fd_c.state = fd.state
for td in fd.taskdetails:
fd_c.add(_convert_td_to_external(td))
return fd_c
def _convert_fd_to_internal(fd, parent_uuid):
fd_m = models.FlowDetail(name=fd.name, uuid=fd.uuid,
parent_uuid=parent_uuid, meta=fd.meta,
state=fd.state)
fd_m.taskdetails = []
for td in fd:
fd_m.taskdetails.append(_convert_td_to_internal(td, fd_m.uuid))
return fd_m
def _convert_td_to_internal(td, parent_uuid):
return models.TaskDetail(name=td.name, uuid=td.uuid,
state=td.state, results=td.results,
exception=td.exception, meta=td.meta,
stacktrace=td.stacktrace,
version=td.version, parent_uuid=parent_uuid)
def _convert_td_to_external(td):
# Convert from sqlalchemy model -> external model, this allows us
# to change the internal sqlalchemy model easily by forcing a defined
# interface (that isn't the sqlalchemy model itself).
td_c = logbook.TaskDetail(td.name, uuid=td.uuid)
td_c.state = td.state
td_c.results = td.results
td_c.exception = td.exception
td_c.stacktrace = td.stacktrace
td_c.meta = td.meta
td_c.version = td.version
return td_c
def _convert_lb_to_external(lb_m):
"""Don't expose the internal sqlalchemy ORM model to the external api."""
lb_c = logbook.LogBook(lb_m.name, lb_m.uuid,
updated_at=lb_m.updated_at,
created_at=lb_m.created_at)
lb_c.meta = lb_m.meta
for fd_m in lb_m.flowdetails:
lb_c.add(_convert_fd_to_external(fd_m))
return lb_c
def _convert_lb_to_internal(lb_c):
"""Don't expose the external model to the sqlalchemy ORM model."""
lb_m = models.LogBook(uuid=lb_c.uuid, meta=lb_c.meta, name=lb_c.name)
lb_m.flowdetails = []
for fd_c in lb_c:
lb_m.flowdetails.append(_convert_fd_to_internal(fd_c, lb_c.uuid))
return lb_m
def _logbook_get_model(lb_id, session):
entry = session.query(models.LogBook).filter_by(uuid=lb_id).first()
if entry is None:
raise exc.NotFound("No logbook found with id: %s" % lb_id)
return entry
def _flow_details_get_model(f_id, session):
entry = session.query(models.FlowDetail).filter_by(uuid=f_id).first()
if entry is None:
raise exc.NotFound("No flow details found with id: %s" % f_id)
return entry
def _task_details_get_model(t_id, session):
entry = session.query(models.TaskDetail).filter_by(uuid=t_id).first()
if entry is None:
raise exc.NotFound("No task details found with id: %s" % t_id)
return entry
def _logbook_merge(lb_m, lb):
if lb_m.meta != lb.meta:
lb_m.meta = lb.meta
for fd in lb:
existing_fd = False
for fd_m in lb_m.flowdetails:
if fd_m.uuid == fd.uuid:
existing_fd = True
fd_m = _flowdetails_merge(fd_m, fd)
if not existing_fd:
lb_m.flowdetails.append(_convert_fd_to_internal(fd, lb_m.uuid))
return lb_m
def _flowdetails_merge(fd_m, fd):
if fd_m.meta != fd.meta:
fd_m.meta = fd.meta
if fd_m.state != fd.state:
fd_m.state = fd.state
for td in fd:
existing_td = False
for td_m in fd_m.taskdetails:
if td_m.uuid == td.uuid:
existing_td = True
td_m = _taskdetails_merge(td_m, td)
break
if not existing_td:
td_m = _convert_td_to_internal(td, fd_m.uuid)
fd_m.taskdetails.append(td_m)
return fd_m
def _taskdetails_merge(td_m, td):
if td_m.state != td.state:
td_m.state = td.state
if td_m.results != td.results:
td_m.results = td.results
if td_m.exception != td.exception:
td_m.exception = td.exception
if td_m.stacktrace != td.stacktrace:
td_m.stacktrace = td.stacktrace
if td_m.meta != td.meta:
td_m.meta = td.meta
return td_m
|
|
# Copyright 2012, VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import mock
from oslo.config import cfg
import testtools
from neutron.agent.linux import ovs_lib
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.openstack.common import jsonutils
from neutron.openstack.common import uuidutils
from neutron.plugins.openvswitch.common import constants
from neutron.tests import base
from neutron.tests import tools
OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0"
class TestBaseOVS(base.BaseTestCase):
def setUp(self):
super(TestBaseOVS, self).setUp()
self.root_helper = 'sudo'
self.ovs = ovs_lib.BaseOVS(self.root_helper)
self.br_name = 'bridge1'
def test_add_bridge(self):
with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl:
self.ovs.add_bridge(self.br_name)
mock_vsctl.assert_called_with(["--", "--may-exist",
"add-br", self.br_name])
def test_delete_bridge(self):
with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl:
self.ovs.delete_bridge(self.br_name)
mock_vsctl.assert_called_with(["--", "--if-exists", "del-br",
self.br_name])
def test_bridge_exists_returns_true(self):
with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl:
self.assertTrue(self.ovs.bridge_exists(self.br_name))
mock_vsctl.assert_called_with(['br-exists', self.br_name],
check_error=True)
def test_bridge_exists_returns_false_for_exit_code_2(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError('Exit code: 2\n')):
self.assertFalse(self.ovs.bridge_exists('bridge1'))
def test_bridge_exists_raises_unknown_exception(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError()):
with testtools.ExpectedException(RuntimeError):
self.ovs.bridge_exists('bridge1')
def test_get_bridge_name_for_port_name_returns_bridge_for_valid_port(self):
port_name = 'bar'
with mock.patch.object(self.ovs, 'run_vsctl',
return_value=self.br_name) as mock_vsctl:
bridge = self.ovs.get_bridge_name_for_port_name(port_name)
self.assertEqual(bridge, self.br_name)
mock_vsctl.assert_called_with(['port-to-br', port_name],
check_error=True)
def test_get_bridge_name_for_port_name_returns_none_for_exit_code_1(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError('Exit code: 1\n')):
self.assertFalse(self.ovs.get_bridge_name_for_port_name('bridge1'))
def test_get_bridge_name_for_port_name_raises_unknown_exception(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError()):
with testtools.ExpectedException(RuntimeError):
self.ovs.get_bridge_name_for_port_name('bridge1')
def _test_port_exists(self, br_name, result):
with mock.patch.object(self.ovs,
'get_bridge_name_for_port_name',
return_value=br_name):
self.assertEqual(self.ovs.port_exists('bar'), result)
def test_port_exists_returns_true_for_bridge_name(self):
self._test_port_exists(self.br_name, True)
def test_port_exists_returns_false_for_none(self):
self._test_port_exists(None, False)
class OVS_Lib_Test(base.BaseTestCase):
"""A test suite to exercise the OVS libraries shared by Neutron agents.
Note: these tests do not actually execute ovs-* utilities, and thus
can run on any system. That does, however, limit their scope.
"""
def setUp(self):
super(OVS_Lib_Test, self).setUp()
self.BR_NAME = "br-int"
self.TO = "--timeout=10"
self.root_helper = 'sudo'
self.br = ovs_lib.OVSBridge(self.BR_NAME, self.root_helper)
self.execute = mock.patch.object(
utils, "execute", spec=utils.execute).start()
def test_vifport(self):
"""Create and stringify vif port, confirm no exceptions."""
pname = "vif1.0"
ofport = 5
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
# test __init__
port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br)
self.assertEqual(port.port_name, pname)
self.assertEqual(port.ofport, ofport)
self.assertEqual(port.vif_id, vif_id)
self.assertEqual(port.vif_mac, mac)
self.assertEqual(port.switch.br_name, self.BR_NAME)
# test __str__
str(port)
def test_set_controller(self):
controller_names = ['tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555']
self.br.set_controller(controller_names)
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'set-controller', self.BR_NAME,
'tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555'],
root_helper=self.root_helper)
def test_del_controller(self):
self.br.del_controller()
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'del-controller', self.BR_NAME],
root_helper=self.root_helper)
def test_get_controller(self):
self.execute.return_value = 'tcp:127.0.0.1:6633\ntcp:172.17.16.10:5555'
names = self.br.get_controller()
self.assertEqual(names,
['tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555'])
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'get-controller', self.BR_NAME],
root_helper=self.root_helper)
def test_set_secure_mode(self):
self.br.set_secure_mode()
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'set-fail-mode', self.BR_NAME,
'secure'], root_helper=self.root_helper)
def test_set_protocols(self):
protocols = 'OpenFlow13'
self.br.set_protocols(protocols)
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'set', 'bridge', self.BR_NAME,
"protocols=%s" % protocols],
root_helper=self.root_helper)
def test_create(self):
self.br.add_bridge(self.BR_NAME)
self.br.create()
def test_destroy(self):
self.br.delete_bridge(self.BR_NAME)
self.br.destroy()
def test_reset_bridge(self):
self.br.destroy()
self.br.create()
self.br.reset_bridge()
def _build_timeout_opt(self, exp_timeout):
return "--timeout=%d" % exp_timeout if exp_timeout else self.TO
def _test_delete_port(self, exp_timeout=None):
exp_timeout_str = self._build_timeout_opt(exp_timeout)
pname = "tap5"
self.br.delete_port(pname)
self.execute.assert_called_once_with(
["ovs-vsctl", exp_timeout_str, "--", "--if-exists",
"del-port", self.BR_NAME, pname],
root_helper=self.root_helper)
def test_delete_port(self):
self._test_delete_port()
def test_call_command_non_default_timeput(self):
# This test is only for verifying a non-default timeout
# is correctly applied. Does not need to be repeated for
# every ovs_lib method
new_timeout = 5
self.br.vsctl_timeout = new_timeout
self._test_delete_port(new_timeout)
def test_add_flow(self):
ofport = "99"
vid = 4000
lsw_id = 18
cidr = '192.168.1.0/24'
flow_dict_1 = OrderedDict([('priority', 2),
('dl_src', 'ca:fe:de:ad:be:ef'),
('actions', 'strip_vlan,output:0')])
flow_dict_2 = OrderedDict([('priority', 1),
('actions', 'normal')])
flow_dict_3 = OrderedDict([('priority', 2),
('actions', 'drop')])
flow_dict_4 = OrderedDict([('priority', 2),
('in_port', ofport),
('actions', 'drop')])
flow_dict_5 = OrderedDict([
('priority', 4),
('in_port', ofport),
('dl_vlan', vid),
('actions', "strip_vlan,set_tunnel:%s,normal" % (lsw_id))])
flow_dict_6 = OrderedDict([
('priority', 3),
('tun_id', lsw_id),
('actions', "mod_vlan_vid:%s,output:%s" % (vid, ofport))])
flow_dict_7 = OrderedDict([
('priority', 4),
('nw_src', cidr),
('proto', 'arp'),
('actions', 'drop')])
self.br.add_flow(**flow_dict_1)
self.br.add_flow(**flow_dict_2)
self.br.add_flow(**flow_dict_3)
self.br.add_flow(**flow_dict_4)
self.br.add_flow(**flow_dict_5)
self.br.add_flow(**flow_dict_6)
self.br.add_flow(**flow_dict_7)
expected_calls = [
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,dl_src=ca:fe:de:ad:be:ef"
",actions=strip_vlan,output:0"],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=1,actions=normal"],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,actions=drop"],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,in_port=%s,actions=drop" % ofport],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=4,dl_vlan=%s,in_port=%s,"
"actions=strip_vlan,set_tunnel:%s,normal"
% (vid, ofport, lsw_id)],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=3,tun_id=%s,actions="
"mod_vlan_vid:%s,output:%s"
% (lsw_id, vid, ofport)],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=4,nw_src=%s,arp,actions=drop" % cidr],
process_input=None, root_helper=self.root_helper),
]
self.execute.assert_has_calls(expected_calls)
def test_add_flow_timeout_set(self):
flow_dict = OrderedDict([('priority', 1),
('hard_timeout', 1000),
('idle_timeout', 2000),
('actions', 'normal')])
self.br.add_flow(**flow_dict)
self.execute.assert_called_once_with(
["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=1000,idle_timeout=2000,priority=1,actions=normal"],
process_input=None,
root_helper=self.root_helper)
def test_add_flow_default_priority(self):
flow_dict = OrderedDict([('actions', 'normal')])
self.br.add_flow(**flow_dict)
self.execute.assert_called_once_with(
["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,priority=1,actions=normal"],
process_input=None,
root_helper=self.root_helper)
def test_get_port_ofport(self):
pname = "tap99"
ofport = "6"
self.execute.return_value = ofport
self.assertEqual(self.br.get_port_ofport(pname), ofport)
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "get", "Interface", pname, "ofport"],
root_helper=self.root_helper)
def test_get_datapath_id(self):
datapath_id = '"0000b67f4fbcc149"'
self.execute.return_value = datapath_id
self.assertEqual(self.br.get_datapath_id(), datapath_id.strip('"'))
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "get",
"Bridge", self.BR_NAME, "datapath_id"],
root_helper=self.root_helper)
def test_count_flows(self):
self.execute.return_value = 'ignore\nflow-1\n'
# counts the number of flows as total lines of output - 2
self.assertEqual(self.br.count_flows(), 1)
self.execute.assert_called_once_with(
["ovs-ofctl", "dump-flows", self.BR_NAME],
root_helper=self.root_helper,
process_input=None)
def test_delete_flow(self):
ofport = "5"
lsw_id = 40
vid = 39
self.br.delete_flows(in_port=ofport)
self.br.delete_flows(tun_id=lsw_id)
self.br.delete_flows(dl_vlan=vid)
expected_calls = [
mock.call(["ovs-ofctl", "del-flows", self.BR_NAME,
"in_port=" + ofport],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "del-flows", self.BR_NAME,
"tun_id=%s" % lsw_id],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "del-flows", self.BR_NAME,
"dl_vlan=%s" % vid],
process_input=None, root_helper=self.root_helper),
]
self.execute.assert_has_calls(expected_calls)
def test_delete_flow_with_priority_set(self):
params = {'in_port': '1',
'priority': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.delete_flows,
**params)
def test_dump_flows(self):
table = 23
nxst_flow = "NXST_FLOW reply (xid=0x4):"
flows = "\n".join([" cookie=0x0, duration=18042.514s, table=0, "
"n_packets=6, n_bytes=468, "
"priority=2,in_port=1 actions=drop",
" cookie=0x0, duration=18027.562s, table=0, "
"n_packets=0, n_bytes=0, "
"priority=3,in_port=1,dl_vlan=100 "
"actions=mod_vlan_vid:1,NORMAL",
" cookie=0x0, duration=18044.351s, table=0, "
"n_packets=9, n_bytes=594, priority=1 "
"actions=NORMAL", " cookie=0x0, "
"duration=18044.211s, table=23, n_packets=0, "
"n_bytes=0, priority=0 actions=drop"])
flow_args = '\n'.join([nxst_flow, flows])
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
run_ofctl.side_effect = [flow_args]
retflows = self.br.dump_flows_for_table(table)
self.assertEqual(flows, retflows)
def test_dump_flows_ovs_dead(self):
table = 23
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
run_ofctl.side_effect = ['']
retflows = self.br.dump_flows_for_table(table)
self.assertEqual(None, retflows)
def test_mod_flow_with_priority_set(self):
params = {'in_port': '1',
'priority': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.mod_flow,
**params)
def test_mod_flow_no_actions_set(self):
params = {'in_port': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.mod_flow,
**params)
def test_defer_apply_flows(self):
flow_expr = mock.patch.object(ovs_lib, '_build_flow_expr_str').start()
flow_expr.side_effect = ['added_flow_1', 'added_flow_2',
'deleted_flow_1']
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
self.br.defer_apply_on()
self.br.add_flow(flow='add_flow_1')
self.br.defer_apply_on()
self.br.add_flow(flow='add_flow_2')
self.br.delete_flows(flow='delete_flow_1')
self.br.defer_apply_off()
flow_expr.assert_has_calls([
mock.call({'flow': 'add_flow_1'}, 'add'),
mock.call({'flow': 'add_flow_2'}, 'add'),
mock.call({'flow': 'delete_flow_1'}, 'del')
])
run_ofctl.assert_has_calls([
mock.call('add-flows', ['-'], 'added_flow_1\nadded_flow_2\n'),
mock.call('del-flows', ['-'], 'deleted_flow_1\n')
])
def test_defer_apply_flows_concurrently(self):
flow_expr = mock.patch.object(ovs_lib, '_build_flow_expr_str').start()
flow_expr.side_effect = ['added_flow_1', 'deleted_flow_1',
'modified_flow_1', 'added_flow_2',
'deleted_flow_2', 'modified_flow_2']
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
def run_ofctl_fake(cmd, args, process_input=None):
self.br.defer_apply_on()
if cmd == 'add-flows':
self.br.add_flow(flow='added_flow_2')
elif cmd == 'del-flows':
self.br.delete_flows(flow='deleted_flow_2')
elif cmd == 'mod-flows':
self.br.mod_flow(flow='modified_flow_2')
run_ofctl.side_effect = run_ofctl_fake
self.br.defer_apply_on()
self.br.add_flow(flow='added_flow_1')
self.br.delete_flows(flow='deleted_flow_1')
self.br.mod_flow(flow='modified_flow_1')
self.br.defer_apply_off()
run_ofctl.side_effect = None
self.br.defer_apply_off()
flow_expr.assert_has_calls([
mock.call({'flow': 'added_flow_1'}, 'add'),
mock.call({'flow': 'deleted_flow_1'}, 'del'),
mock.call({'flow': 'modified_flow_1'}, 'mod'),
mock.call({'flow': 'added_flow_2'}, 'add'),
mock.call({'flow': 'deleted_flow_2'}, 'del'),
mock.call({'flow': 'modified_flow_2'}, 'mod')
])
run_ofctl.assert_has_calls([
mock.call('add-flows', ['-'], 'added_flow_1\n'),
mock.call('del-flows', ['-'], 'deleted_flow_1\n'),
mock.call('mod-flows', ['-'], 'modified_flow_1\n'),
mock.call('add-flows', ['-'], 'added_flow_2\n'),
mock.call('del-flows', ['-'], 'deleted_flow_2\n'),
mock.call('mod-flows', ['-'], 'modified_flow_2\n')
])
def test_add_tunnel_port(self):
pname = "tap99"
local_ip = "1.1.1.1"
remote_ip = "9.9.9.9"
ofport = "6"
command = ["ovs-vsctl", self.TO, '--', "--may-exist", "add-port",
self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=gre", "options:remote_ip=" + remote_ip,
"options:local_ip=" + local_ip,
"options:in_key=flow",
"options:out_key=flow"])
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(command, root_helper=self.root_helper), None),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper),
ofport),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(
self.br.add_tunnel_port(pname, remote_ip, local_ip),
ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_patch_port(self):
pname = "tap99"
peer = "bar10"
ofport = "6"
# Each element is a tuple of (expected mock call, return_value)
command = ["ovs-vsctl", self.TO, "add-port", self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=patch", "options:peer=" + peer])
expected_calls_and_values = [
(mock.call(command, root_helper=self.root_helper),
None),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper),
ofport)
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(self.br.add_patch_port(pname, peer), ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _test_get_vif_ports(self, is_xen=False):
pname = "tap99"
ofport = "6"
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
if is_xen:
external_ids = ('{xs-vif-uuid="%s", attached-mac="%s"}'
% (vif_id, mac))
else:
external_ids = ('{iface-id="%s", attached-mac="%s"}'
% (vif_id, mac))
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
"%s\n" % pname),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "external_ids"],
root_helper=self.root_helper),
external_ids),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper),
ofport),
]
if is_xen:
expected_calls_and_values.append(
(mock.call(["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=" + vif_id],
root_helper=self.root_helper),
vif_id)
)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
ports = self.br.get_vif_ports()
self.assertEqual(1, len(ports))
self.assertEqual(ports[0].port_name, pname)
self.assertEqual(ports[0].ofport, ofport)
self.assertEqual(ports[0].vif_id, vif_id)
self.assertEqual(ports[0].vif_mac, mac)
self.assertEqual(ports[0].switch.br_name, self.BR_NAME)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _encode_ovs_json(self, headings, data):
# See man ovs-vsctl(8) for the encoding details.
r = {"data": [],
"headings": headings}
for row in data:
ovs_row = []
r["data"].append(ovs_row)
for cell in row:
if isinstance(cell, (str, int, list)):
ovs_row.append(cell)
elif isinstance(cell, dict):
ovs_row.append(["map", cell.items()])
elif isinstance(cell, set):
ovs_row.append(["set", cell])
else:
raise TypeError('%r not int, str, list, set or dict' %
type(cell))
return jsonutils.dumps(r)
def _test_get_vif_port_set(self, is_xen):
if is_xen:
id_key = 'xs-vif-uuid'
else:
id_key = 'iface-id'
headings = ['name', 'external_ids']
data = [
# A vif port on this bridge:
['tap99', {id_key: 'tap99id', 'attached-mac': 'tap99mac'}, 1],
# A vif port on this bridge not yet configured
['tap98', {id_key: 'tap98id', 'attached-mac': 'tap98mac'}, []],
# Another vif port on this bridge not yet configured
['tap97', {id_key: 'tap97id', 'attached-mac': 'tap97mac'},
['set', []]],
# A vif port on another bridge:
['tap88', {id_key: 'tap88id', 'attached-mac': 'tap88id'}, 1],
# Non-vif port on this bridge:
['tun22', {}, 2],
]
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
'tap99\ntun22'),
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=name,external_ids,ofport",
"list", "Interface"],
root_helper=self.root_helper),
self._encode_ovs_json(headings, data)),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
if is_xen:
get_xapi_iface_id = mock.patch.object(self.br,
'get_xapi_iface_id').start()
get_xapi_iface_id.return_value = 'tap99id'
port_set = self.br.get_vif_port_set()
self.assertEqual(set(['tap99id']), port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
if is_xen:
get_xapi_iface_id.assert_called_once_with('tap99id')
def test_get_vif_ports_nonxen(self):
self._test_get_vif_ports(is_xen=False)
def test_get_vif_ports_xen(self):
self._test_get_vif_ports(is_xen=True)
def test_get_vif_port_set_nonxen(self):
self._test_get_vif_port_set(False)
def test_get_vif_port_set_xen(self):
self._test_get_vif_port_set(True)
def test_get_vif_ports_list_ports_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_ports)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_vif_port_set_list_ports_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_vif_port_set_list_interface_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
'tap99\n'),
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=name,external_ids,ofport",
"list", "Interface"],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_port_tag_dict(self):
headings = ['name', 'tag']
data = [
['int-br-eth2', set()],
['patch-tun', set()],
['qr-76d9e6b6-21', 1],
['tapce5318ff-78', 1],
['tape1400310-e6', 1],
]
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
'\n'.join((iface for iface, tag in data))),
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=name,tag",
"list", "Port"],
root_helper=self.root_helper),
self._encode_ovs_json(headings, data)),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
port_tags = self.br.get_port_tag_dict()
self.assertEqual(
port_tags,
{u'int-br-eth2': [],
u'patch-tun': [],
u'qr-76d9e6b6-21': 1,
u'tapce5318ff-78': 1,
u'tape1400310-e6': 1}
)
def test_clear_db_attribute(self):
pname = "tap77"
self.br.clear_db_attribute("Port", pname, "tag")
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "clear", "Port", pname, "tag"],
root_helper=self.root_helper)
def _test_iface_to_br(self, exp_timeout=None):
iface = 'tap0'
br = 'br-int'
root_helper = 'sudo'
self.execute.return_value = 'br-int'
exp_timeout_str = self._build_timeout_opt(exp_timeout)
self.assertEqual(ovs_lib.get_bridge_for_iface(root_helper, iface), br)
self.execute.assert_called_once_with(
["ovs-vsctl", exp_timeout_str, "iface-to-br", iface],
root_helper=root_helper)
def test_iface_to_br(self):
self._test_iface_to_br()
def test_iface_to_br_non_default_timeout(self):
new_timeout = 5
cfg.CONF.set_override('ovs_vsctl_timeout', new_timeout)
self._test_iface_to_br(new_timeout)
def test_iface_to_br_handles_ovs_vsctl_exception(self):
iface = 'tap0'
root_helper = 'sudo'
self.execute.side_effect = Exception
self.assertIsNone(ovs_lib.get_bridge_for_iface(root_helper, iface))
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "iface-to-br", iface],
root_helper=root_helper)
def test_delete_all_ports(self):
with mock.patch.object(self.br, 'get_port_name_list',
return_value=['port1']) as get_port:
with mock.patch.object(self.br, 'delete_port') as delete_port:
self.br.delete_ports(all_ports=True)
get_port.assert_called_once_with()
delete_port.assert_called_once_with('port1')
def test_delete_neutron_ports(self):
port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(),
'ca:fe:de:ad:be:ef', 'br')
port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(),
'ca:ee:de:ad:be:ef', 'br')
with mock.patch.object(self.br, 'get_vif_ports',
return_value=[port1, port2]) as get_ports:
with mock.patch.object(self.br, 'delete_port') as delete_port:
self.br.delete_ports(all_ports=False)
get_ports.assert_called_once_with()
delete_port.assert_has_calls([
mock.call('tap1234'),
mock.call('tap5678')
])
def test_delete_neutron_ports_list_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.delete_ports, all_ports=False)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _test_get_bridges(self, exp_timeout=None):
bridges = ['br-int', 'br-ex']
root_helper = 'sudo'
self.execute.return_value = 'br-int\nbr-ex\n'
timeout_str = self._build_timeout_opt(exp_timeout)
self.assertEqual(ovs_lib.get_bridges(root_helper), bridges)
self.execute.assert_called_once_with(
["ovs-vsctl", timeout_str, "list-br"],
root_helper=root_helper)
def test_get_bridges(self):
self._test_get_bridges()
def test_get_bridges_not_default_timeout(self):
new_timeout = 5
cfg.CONF.set_override('ovs_vsctl_timeout', new_timeout)
self._test_get_bridges(new_timeout)
def test_get_local_port_mac_succeeds(self):
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
return_value=mock.Mock(address='foo')):
self.assertEqual('foo', self.br.get_local_port_mac())
def test_get_local_port_mac_raises_exception_for_missing_mac(self):
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
return_value=mock.Mock(address=None)):
with testtools.ExpectedException(Exception):
self.br.get_local_port_mac()
def _test_get_vif_port_by_id(self, iface_id, data, br_name=None):
headings = ['external_ids', 'name', 'ofport']
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=external_ids,name,ofport",
"find", "Interface",
'external_ids:iface-id="%s"' % iface_id],
root_helper=self.root_helper),
self._encode_ovs_json(headings, data))]
if data:
if not br_name:
br_name = self.BR_NAME
expected_calls_and_values.append(
(mock.call(["ovs-vsctl", self.TO,
"iface-to-br", data[0][headings.index('name')]],
root_helper=self.root_helper),
br_name))
tools.setup_mock_calls(self.execute, expected_calls_and_values)
vif_port = self.br.get_vif_port_by_id(iface_id)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
return vif_port
def _test_get_vif_port_by_id_with_data(self, ofport=None, mac=None):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"]]
if mac:
external_ids.append(["attached-mac", mac])
data = [[["map", external_ids], "tap99",
ofport if ofport else '["set",[]]']]
vif_port = self._test_get_vif_port_by_id('tap99id', data)
if not ofport or ofport == -1 or not mac:
self.assertIsNone(vif_port)
return
self.assertEqual(vif_port.vif_id, 'tap99id')
self.assertEqual(vif_port.vif_mac, 'aa:bb:cc:dd:ee:ff')
self.assertEqual(vif_port.port_name, 'tap99')
self.assertEqual(vif_port.ofport, ofport)
def test_get_vif_by_port_id_with_ofport(self):
self._test_get_vif_port_by_id_with_data(
ofport=1, mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_without_ofport(self):
self._test_get_vif_port_by_id_with_data(mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_with_invalid_ofport(self):
self._test_get_vif_port_by_id_with_data(
ofport=-1, mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_without_mac(self):
self._test_get_vif_port_by_id_with_data(ofport=1)
def test_get_vif_by_port_id_with_no_data(self):
self.assertIsNone(self._test_get_vif_port_by_id('whatever', []))
def test_get_vif_by_port_id_different_bridge(self):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"]]
data = [[["map", external_ids], "tap99", 1]]
self.assertIsNone(self._test_get_vif_port_by_id('tap99id', data,
"br-ext"))
def _check_ovs_vxlan_version(self, installed_usr_version,
installed_klm_version,
installed_kernel_version,
expecting_ok):
with mock.patch(
'neutron.agent.linux.ovs_lib.get_installed_ovs_klm_version'
) as klm_cmd:
with mock.patch(
'neutron.agent.linux.ovs_lib.get_installed_ovs_usr_version'
) as usr_cmd:
with mock.patch(
'neutron.agent.linux.ovs_lib.get_installed_kernel_version'
) as kernel_cmd:
try:
klm_cmd.return_value = installed_klm_version
usr_cmd.return_value = installed_usr_version
kernel_cmd.return_value = installed_kernel_version
ovs_lib.check_ovs_vxlan_version(root_helper='sudo')
version_ok = True
except SystemError:
version_ok = False
self.assertEqual(version_ok, expecting_ok)
def test_check_minimum_version(self):
min_vxlan_ver = constants.MINIMUM_OVS_VXLAN_VERSION
min_kernel_ver = constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN
self._check_ovs_vxlan_version(min_vxlan_ver, min_vxlan_ver,
min_kernel_ver, expecting_ok=True)
def test_check_future_version(self):
install_ver = str(float(constants.MINIMUM_OVS_VXLAN_VERSION) + 0.01)
min_kernel_ver = constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN
self._check_ovs_vxlan_version(install_ver, install_ver,
min_kernel_ver, expecting_ok=True)
def test_check_fail_version(self):
install_ver = str(float(constants.MINIMUM_OVS_VXLAN_VERSION) - 0.01)
min_kernel_ver = constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN
self._check_ovs_vxlan_version(install_ver, install_ver,
min_kernel_ver, expecting_ok=False)
def test_check_fail_no_version(self):
min_kernel_ver = constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN
self._check_ovs_vxlan_version(None, None,
min_kernel_ver,
expecting_ok=False)
def test_check_fail_klm_version(self):
min_vxlan_ver = constants.MINIMUM_OVS_VXLAN_VERSION
min_kernel_ver = OVS_LINUX_KERN_VERS_WITHOUT_VXLAN
install_ver = str(float(min_vxlan_ver) - 0.01)
self._check_ovs_vxlan_version(min_vxlan_ver,
install_ver,
min_kernel_ver,
expecting_ok=False)
def test_check_pass_kernel_version(self):
min_vxlan_ver = constants.MINIMUM_OVS_VXLAN_VERSION
min_kernel_ver = constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN
self._check_ovs_vxlan_version(min_vxlan_ver, min_vxlan_ver,
min_kernel_ver, expecting_ok=True)
|
|
import operator
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pytest
from dask.dataframe.utils import tm
from pytest import param
import ibis
from ibis.common.exceptions import IbisTypeError
def test_array_length(t, df):
expr = t.projection(
[
t.array_of_float64.length().name('array_of_float64_length'),
t.array_of_int64.length().name('array_of_int64_length'),
t.array_of_strings.length().name('array_of_strings_length'),
]
)
result = expr.compile()
expected = dd.from_pandas(
pd.DataFrame(
{
'array_of_float64_length': [2, 1, 0],
'array_of_int64_length': [2, 0, 1],
'array_of_strings_length': [2, 0, 1],
}
),
npartitions=1,
)
tm.assert_frame_equal(result.compute(), expected.compute())
def test_array_length_scalar(client):
raw_value = [1, 2, 4]
value = ibis.literal(raw_value)
expr = value.length()
result = client.execute(expr)
expected = len(raw_value)
assert result == expected
def test_array_collect(t, df):
expr = t.group_by(t.dup_strings).aggregate(
collected=t.float64_with_zeros.collect()
)
result = expr.compile()
expected = (
df.groupby('dup_strings')
.float64_with_zeros.apply(list)
.reset_index()
.rename(columns={'float64_with_zeros': 'collected'})
)
tm.assert_frame_equal(result.compute(), expected.compute())
@pytest.mark.xfail(
raises=NotImplementedError, reason='TODO - windowing - #2553'
)
def test_array_collect_rolling_partitioned(t, df):
window = ibis.trailing_window(1, order_by=t.plain_int64)
colexpr = t.plain_float64.collect().over(window)
expr = t['dup_strings', 'plain_int64', colexpr.name('collected')]
result = expr.compile()
expected = dd.from_pandas(
pd.DataFrame(
{
'dup_strings': ['d', 'a', 'd'],
'plain_int64': [1, 2, 3],
'collected': [[4.0], [4.0, 5.0], [5.0, 6.0]],
}
),
npartitions=1,
)[expr.columns]
tm.assert_frame_equal(result.compute(), expected.compute())
@pytest.mark.xfail(raises=IbisTypeError, reason='Not sure if this should work')
def test_array_collect_scalar(client):
raw_value = 'abcd'
value = ibis.literal(raw_value)
expr = value.collect()
result = client.execute(expr)
expected = [raw_value]
assert result == expected
@pytest.mark.xfail(
raises=NotImplementedError,
reason='TODO - arrays - #2553'
# Need an ops.ArraySlice execution func that dispatches on dd.Series
)
@pytest.mark.parametrize(
['start', 'stop'],
[
(1, 3),
(1, 1),
(2, 3),
(2, 5),
(None, 3),
(None, None),
(3, None),
# negative slices are not supported
# TODO: uncomment once test as a whole is not xfailed
# param(
# -3,
# None,
# marks=pytest.mark.xfail(
# raises=ValueError, reason='Negative slicing not supported'
# ),
# ),
# param(
# None,
# -3,
# marks=pytest.mark.xfail(
# raises=ValueError, reason='Negative slicing not supported'
# ),
# ),
# param(
# -3,
# -1,
# marks=pytest.mark.xfail(
# raises=ValueError, reason='Negative slicing not supported'
# ),
# ),
],
)
def test_array_slice(t, df, start, stop):
expr = t.array_of_strings[start:stop]
result = expr.compile()
slicer = operator.itemgetter(slice(start, stop))
expected = df.array_of_strings.apply(slicer)
tm.assert_series_equal(result.compute(), expected.compute())
@pytest.mark.parametrize(
['start', 'stop'],
[
(1, 3),
(1, 1),
(2, 3),
(2, 5),
(None, 3),
(None, None),
(3, None),
# negative slices are not supported
param(
-3,
None,
marks=pytest.mark.xfail(
raises=ValueError, reason='Negative slicing not supported'
),
),
param(
None,
-3,
marks=pytest.mark.xfail(
raises=ValueError, reason='Negative slicing not supported'
),
),
param(
-3,
-1,
marks=pytest.mark.xfail(
raises=ValueError, reason='Negative slicing not supported'
),
),
],
)
def test_array_slice_scalar(client, start, stop):
raw_value = [-11, 42, 10]
value = ibis.literal(raw_value)
expr = value[start:stop]
result = client.execute(expr)
expected = raw_value[start:stop]
assert np.array_equal(result, expected)
@pytest.mark.parametrize('index', [1, 3, 4, 11, -11])
def test_array_index(t, df, index):
expr = t[t.array_of_float64[index].name('indexed')]
result = expr.compile()
expected = dd.from_pandas(
pd.DataFrame(
{
'indexed': df.array_of_float64.apply(
lambda x: x[index] if -len(x) <= index < len(x) else None
)
}
),
npartitions=1,
)
tm.assert_frame_equal(result.compute(), expected.compute())
@pytest.mark.parametrize('index', [1, 3, 4, 11])
def test_array_index_scalar(client, index):
raw_value = [-10, 1, 2, 42]
value = ibis.literal(raw_value)
expr = value[index]
result = client.execute(expr)
expected = raw_value[index] if index < len(raw_value) else None
assert result == expected
@pytest.mark.xfail(
raises=NotImplementedError,
reason='TODO - arrays - #2553'
# Need an ops.ArrayRepeat execution func that dispatches on dd.Series
)
@pytest.mark.parametrize('n', [1, 3, 4, 7, -2]) # negative returns empty list
@pytest.mark.parametrize('mul', [lambda x, n: x * n, lambda x, n: n * x])
def test_array_repeat(t, df, n, mul):
expr = t.projection([mul(t.array_of_strings, n).name('repeated')])
result = expr.compile()
expected = dd.from_pandas(
pd.DataFrame({'repeated': df.array_of_strings * n}),
npartitions=1,
)
tm.assert_frame_equal(result.compute(), expected.compute())
@pytest.mark.parametrize('n', [1, 3, 4, 7, -2]) # negative returns empty list
@pytest.mark.parametrize('mul', [lambda x, n: x * n, lambda x, n: n * x])
def test_array_repeat_scalar(client, n, mul):
raw_array = [1, 2]
array = ibis.literal(raw_array)
expr = mul(array, n)
result = client.execute(expr)
expected = mul(raw_array, n)
assert np.array_equal(result, expected)
@pytest.mark.xfail(
raises=ValueError,
reason='TODO - arrays - #2553'
# ValueError: Dask backend borrows Pandas backend's Cast execution
# function, which assumes array representation is np.array.
# NotImplementedError: Need an ops.ArrayConcat execution func that
# dispatches on dd.Series
)
@pytest.mark.parametrize('op', [lambda x, y: x + y, lambda x, y: y + x])
def test_array_concat(t, df, op):
x = t.array_of_float64.cast('array<string>')
y = t.array_of_strings
expr = op(x, y)
result = expr.compile()
expected = op(
df.array_of_float64.apply(lambda x: list(map(str, x))),
df.array_of_strings,
)
tm.assert_series_equal(result.compute(), expected.compute())
@pytest.mark.parametrize('op', [lambda x, y: x + y, lambda x, y: y + x])
def test_array_concat_scalar(client, op):
raw_left = [1, 2, 3]
raw_right = [3, 4]
left = ibis.literal(raw_left)
right = ibis.literal(raw_right)
expr = op(left, right)
result = client.execute(expr)
expected = op(raw_left, raw_right)
assert np.array_equal(result, expected)
|
|
import re
import salt.modules.jboss7_cli as jboss7_cli
from salt.exceptions import CommandExecutionError
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import patch
from tests.support.unit import TestCase
class CmdMock:
commands = []
command_response_func = None # if you want to test complete response object (with retcode, stdout and stderr)
cli_commands = []
default_response = {
"retcode": 0,
"stdout": """ {
"outcome" => "success"
}""",
"stderr": "",
}
def __init__(self, command_response_func=None):
self.command_response_func = command_response_func
def run_all(self, command):
self.commands.append(command)
if self.command_response_func is not None:
return self.command_response_func(command)
cli_command = self.__get_cli_command(command)
self.cli_commands.append(cli_command)
return self.default_response
@staticmethod
def __get_cli_command(command):
command_re = re.compile(r"--command=\"\s*(.+?)\s*\"$", re.DOTALL)
m = command_re.search(command) # --command has to be the last argument
if m:
cli_command = m.group(1)
return cli_command
return None
def get_last_command(self):
if len(self.commands) > 0:
return self.commands[-1]
else:
return None
def get_last_cli_command(self):
if len(self.cli_commands) > 0:
return self.cli_commands[-1]
else:
return None
def clear(self):
self.commands = []
self.command_response_func = None
self.cli_commands = []
class JBoss7CliTestCase(TestCase, LoaderModuleMockMixin):
cmd = CmdMock()
jboss_config = {
"cli_path": "/opt/jboss/jboss-eap-6.0.1/bin/jboss-cli.sh",
"controller": "123.234.345.456:9999",
"instance_name": "Instance1",
"cli_user": "jbossadm",
"cli_password": "jbossadm",
"status_url": "http://sampleapp.example.com:8080/",
}
def setup_loader_modules(self):
self.cmd = CmdMock()
self.addCleanup(delattr, self, "cmd")
return {jboss7_cli: {"__salt__": {"cmd.run_all": self.cmd.run_all}}}
def test_controller_authentication(self):
jboss7_cli.run_operation(self.jboss_config, "some cli operation")
self.assertEqual(
self.cmd.get_last_command(),
"/opt/jboss/jboss-eap-6.0.1/bin/jboss-cli.sh --connect"
' --controller="123.234.345.456:9999" --user="jbossadm"'
' --password="jbossadm" --command="some cli operation"',
)
def test_controller_without_authentication(self):
jboss_config = {
"cli_path": "/opt/jboss/jboss-eap-6.0.1/bin/jboss-cli.sh",
"controller": "123.234.345.456:9999",
}
jboss7_cli.run_operation(jboss_config, "some cli operation")
self.assertEqual(
self.cmd.get_last_command(),
"/opt/jboss/jboss-eap-6.0.1/bin/jboss-cli.sh --connect"
' --controller="123.234.345.456:9999" --command="some cli operation"',
)
def test_operation_execution(self):
operation = r"sample_operation"
jboss7_cli.run_operation(self.jboss_config, operation)
self.assertEqual(
self.cmd.get_last_command(),
r"/opt/jboss/jboss-eap-6.0.1/bin/jboss-cli.sh --connect"
r' --controller="123.234.345.456:9999" --user="jbossadm"'
r' --password="jbossadm" --command="sample_operation"',
)
def test_handling_jboss_error(self):
def command_response(command):
return {
"retcode": 1,
"stdout": r"""{
"outcome" => "failed",
"failure-description" => "JBAS014807: Management resource '[
(\"subsystem\" => \"datasources\"),
(\"data-source\" => \"non-existing\")
]' not found",
"rolled-back" => true,
"response-headers" => {"process-state" => "reload-required"}
}
""",
"stderr": "some err",
}
self.cmd.command_response_func = command_response
result = jboss7_cli.run_operation(self.jboss_config, "some cli command")
self.assertFalse(result["success"])
self.assertEqual(result["err_code"], "JBAS014807")
def test_handling_cmd_not_exists(self):
def command_response(command):
return {
"retcode": 127,
"stdout": """Command not exists""",
"stderr": "some err",
}
self.cmd.command_response_func = command_response
try:
jboss7_cli.run_operation(self.jboss_config, "some cli command")
# should throw an exception
assert False
except CommandExecutionError as err:
self.assertTrue(
str(err).startswith("Could not execute jboss-cli.sh script")
)
def test_handling_other_cmd_error(self):
def command_response(command):
return {
"retcode": 1,
"stdout": """Command not exists""",
"stderr": "some err",
}
self.cmd.command_response_func = command_response
try:
jboss7_cli.run_command(self.jboss_config, "some cli command")
# should throw an exception
self.fail("An exception should be thrown")
except CommandExecutionError as err:
self.assertTrue(str(err).startswith("Command execution failed"))
def test_matches_cli_output(self):
text = """{
"key1" => "value1"
"key2" => "value2"
}
"""
self.assertTrue(jboss7_cli._is_cli_output(text))
def test_not_matches_cli_output(self):
text = """Some error """
self.assertFalse(jboss7_cli._is_cli_output(text))
def test_parse_flat_dictionary(self):
text = """{
"key1" => "value1"
"key2" => "value2"
}"""
result = jboss7_cli._parse(text)
self.assertEqual(len(result), 2)
self.assertEqual(result["key1"], "value1")
self.assertEqual(result["key2"], "value2")
def test_parse_nested_dictionary(self):
text = """{
"key1" => "value1",
"key2" => {
"nested_key1" => "nested_value1"
}
}"""
result = jboss7_cli._parse(text)
self.assertEqual(len(result), 2)
self.assertEqual(result["key1"], "value1")
self.assertEqual(len(result["key2"]), 1)
self.assertEqual(result["key2"]["nested_key1"], "nested_value1")
def test_parse_string_after_dict(self):
text = """{
"result" => {
"jta" => true
},
"response-headers" => {"process-state" => "reload-required"}
}"""
result = jboss7_cli._parse(text)
self.assertTrue(result["result"]["jta"])
self.assertEqual(result["response-headers"]["process-state"], "reload-required")
def test_parse_all_datatypes(self):
text = """{
"outcome" => "success",
"result" => {
"allocation-retry" => undefined,
"connection-url" => "jdbc:mysql://localhost:3306/appdb",
"driver-name" => "mysql",
"enabled" => false,
"jta" => true
},
"response-headers" => {"process-state" => "reload-required"}
}"""
result = jboss7_cli._parse(text)
self.assertEqual(result["outcome"], "success")
self.assertIsNone(result["result"]["allocation-retry"])
self.assertEqual(
result["result"]["connection-url"], "jdbc:mysql://localhost:3306/appdb"
)
self.assertEqual(result["result"]["driver-name"], "mysql")
self.assertEqual(result["result"]["enabled"], False)
self.assertTrue(result["result"]["jta"])
self.assertEqual(result["response-headers"]["process-state"], "reload-required")
def test_multiline_strings_with_escaped_quotes(self):
text = r"""{
"outcome" => "failed",
"failure-description" => "JBAS014807: Management resource '[
(\"subsystem\" => \"datasources\"),
(\"data-source\" => \"asc\")
]' not found",
"rolled-back" => true,
"response-headers" => {"process-state" => "reload-required"}
}"""
result = jboss7_cli._parse(text)
self.assertEqual(result["outcome"], "failed")
self.assertTrue(result["rolled-back"])
self.assertEqual(result["response-headers"]["process-state"], "reload-required")
self.assertEqual(
result["failure-description"],
r"""JBAS014807: Management resource '[
(\"subsystem\" => \"datasources\"),
(\"data-source\" => \"asc\")
]' not found""",
)
def test_handling_double_backslash_in_return_values(self):
text = r"""{
"outcome" => "success",
"result" => {
"binding-type" => "simple",
"value" => "DOMAIN\\foo"
}
}"""
result = jboss7_cli._parse(text)
self.assertEqual(result["outcome"], "success")
self.assertEqual(result["result"]["binding-type"], "simple")
self.assertEqual(result["result"]["value"], r"DOMAIN\foo")
def test_numbers_without_quotes(self):
text = r"""{
"outcome" => "success",
"result" => {
"min-pool-size" => 1233,
"new-connection-sql" => undefined
}
}"""
result = jboss7_cli._parse(text)
self.assertEqual(result["outcome"], "success")
self.assertEqual(result["result"]["min-pool-size"], 1233)
self.assertIsNone(result["result"]["new-connection-sql"])
def test_all_datasource_properties(self):
text = r"""{
"outcome" => "success",
"result" => {
"allocation-retry" => undefined,
"allocation-retry-wait-millis" => undefined,
"allow-multiple-users" => undefined,
"background-validation" => undefined,
"background-validation-millis" => undefined,
"blocking-timeout-wait-millis" => undefined,
"check-valid-connection-sql" => undefined,
"connection-properties" => undefined,
"connection-url" => "jdbc:mysql:thin:@db.example.com",
"datasource-class" => undefined,
"driver-class" => undefined,
"driver-name" => "mysql",
"enabled" => true,
"exception-sorter-class-name" => undefined,
"exception-sorter-properties" => undefined,
"flush-strategy" => "FailingConnectionOnly",
"idle-timeout-minutes" => undefined,
"jndi-name" => "java:/appDS",
"jta" => true,
"max-pool-size" => 20,
"min-pool-size" => 3,
"new-connection-sql" => undefined,
"password" => "Password4321",
"pool-prefill" => undefined,
"pool-use-strict-min" => undefined,
"prepared-statements-cache-size" => undefined,
"query-timeout" => undefined,
"reauth-plugin-class-name" => undefined,
"reauth-plugin-properties" => undefined,
"security-domain" => undefined,
"set-tx-query-timeout" => false,
"share-prepared-statements" => false,
"spy" => false,
"stale-connection-checker-class-name" => undefined,
"stale-connection-checker-properties" => undefined,
"track-statements" => "NOWARN",
"transaction-isolation" => undefined,
"url-delimiter" => undefined,
"url-selector-strategy-class-name" => undefined,
"use-ccm" => "true",
"use-fast-fail" => false,
"use-java-context" => "false",
"use-try-lock" => undefined,
"user-name" => "user1",
"valid-connection-checker-class-name" => undefined,
"valid-connection-checker-properties" => undefined,
"validate-on-match" => false,
"statistics" => {
"jdbc" => undefined,
"pool" => undefined
}
},
"response-headers" => {"process-state" => "reload-required"}
}"""
result = jboss7_cli._parse(text)
self.assertEqual(result["outcome"], "success")
self.assertEqual(result["result"]["max-pool-size"], 20)
self.assertIsNone(result["result"]["new-connection-sql"])
self.assertIsNone(result["result"]["url-delimiter"])
self.assertFalse(result["result"]["validate-on-match"])
def test_datasource_resource_one_attribute_description(self):
cli_output = """{
"outcome" => "success",
"result" => {
"description" => "A JDBC data-source configuration",
"head-comment-allowed" => true,
"tail-comment-allowed" => true,
"attributes" => {
"connection-url" => {
"type" => STRING,
"description" => "The JDBC driver connection URL",
"expressions-allowed" => true,
"nillable" => false,
"min-length" => 1L,
"max-length" => 2147483647L,
"access-type" => "read-write",
"storage" => "configuration",
"restart-required" => "no-services"
}
},
"children" => {"connection-properties" => {"description" => "The connection-properties element allows you to pass in arbitrary connection properties to the Driver.connect(url, props) method"}}
}
}
"""
result = jboss7_cli._parse(cli_output)
self.assertEqual(result["outcome"], "success")
conn_url_attributes = result["result"]["attributes"]["connection-url"]
self.assertEqual(conn_url_attributes["type"], "STRING")
self.assertEqual(
conn_url_attributes["description"], "The JDBC driver connection URL"
)
self.assertTrue(conn_url_attributes["expressions-allowed"])
self.assertFalse(conn_url_attributes["nillable"])
self.assertEqual(conn_url_attributes["min-length"], 1)
self.assertEqual(conn_url_attributes["max-length"], 2147483647)
self.assertEqual(conn_url_attributes["access-type"], "read-write")
self.assertEqual(conn_url_attributes["storage"], "configuration")
self.assertEqual(conn_url_attributes["restart-required"], "no-services")
def test_datasource_complete_resource_description(self):
cli_output = """{
"outcome" => "success",
"result" => {
"description" => "A JDBC data-source configuration",
"head-comment-allowed" => true,
"tail-comment-allowed" => true,
"attributes" => {
"connection-url" => {
"type" => STRING,
"description" => "The JDBC driver connection URL",
"expressions-allowed" => true,
"nillable" => false,
"min-length" => 1L,
"max-length" => 2147483647L,
"access-type" => "read-write",
"storage" => "configuration",
"restart-required" => "no-services"
}
},
"children" => {"connection-properties" => {"description" => "The connection-properties element allows you to pass in arbitrary connection properties to the Driver.connect(url, props) method"}}
}
}
"""
result = jboss7_cli._parse(cli_output)
self.assertEqual(result["outcome"], "success")
conn_url_attributes = result["result"]["attributes"]["connection-url"]
self.assertEqual(conn_url_attributes["type"], "STRING")
self.assertEqual(
conn_url_attributes["description"], "The JDBC driver connection URL"
)
self.assertTrue(conn_url_attributes["expressions-allowed"])
self.assertFalse(conn_url_attributes["nillable"])
self.assertEqual(conn_url_attributes["min-length"], 1)
self.assertEqual(conn_url_attributes["max-length"], 2147483647)
self.assertEqual(conn_url_attributes["access-type"], "read-write")
self.assertEqual(conn_url_attributes["storage"], "configuration")
self.assertEqual(conn_url_attributes["restart-required"], "no-services")
def test_escaping_operation_with_backslashes_and_quotes(self):
operation = (
r'/subsystem=naming/binding="java:/sampleapp/web-module/ldap/username":add(binding-type=simple,'
r' value="DOMAIN\\\\user")'
)
jboss7_cli.run_operation(self.jboss_config, operation)
self.assertEqual(
self.cmd.get_last_command(),
r"/opt/jboss/jboss-eap-6.0.1/bin/jboss-cli.sh --connect"
r' --controller="123.234.345.456:9999" --user="jbossadm"'
r' --password="jbossadm"'
r' --command="/subsystem=naming/binding=\"java:/sampleapp/web-module/ldap/username\":add(binding-type=simple,'
r' value=\"DOMAIN\\\\\\\\user\")"',
)
def test_run_operation_wflyctl_error(self):
call_cli_ret = {
"retcode": 1,
"stdout": '{"failure-description" => "WFLYCTL0234523: ops"}',
}
with patch(
"salt.modules.jboss7_cli._call_cli", return_value=call_cli_ret
) as _call_cli:
ret = jboss7_cli.run_operation(None, "ls", False)
self.assertEqual(ret["err_code"], "WFLYCTL0234523")
def test_run_operation_no_code_error(self):
call_cli_ret = {
"retcode": 1,
"stdout": '{"failure-description" => "ERROR234523: ops"}',
}
with patch(
"salt.modules.jboss7_cli._call_cli", return_value=call_cli_ret
) as _call_cli:
ret = jboss7_cli.run_operation(None, "ls", False)
self.assertEqual(ret["err_code"], "-1")
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Linux preprocess plug-ins."""
from __future__ import unicode_literals
import unittest
from dfvfs.helpers import fake_file_system_builder
from dfvfs.path import fake_path_spec
from plaso.preprocessors import linux
from tests.preprocessors import test_lib
class LinuxHostnamePluginTest(test_lib.ArtifactPreprocessorPluginTestCase):
"""Tests for the Linux hostname plugin."""
_FILE_DATA = b'plaso.kiddaland.net\n'
def testParseFileData(self):
"""Tests the _ParseFileData function."""
file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()
file_system_builder.AddFile('/etc/hostname', self._FILE_DATA)
mount_point = fake_path_spec.FakePathSpec(location='/')
plugin = linux.LinuxHostnamePlugin()
knowledge_base = self._RunPreprocessorPluginOnFileSystem(
file_system_builder.file_system, mount_point, plugin)
self.assertEqual(knowledge_base.hostname, 'plaso.kiddaland.net')
class LinuxDistributionPluginTest(test_lib.ArtifactPreprocessorPluginTestCase):
"""Tests for the Linux distribution plugin."""
_FILE_DATA = b'Fedora release 26 (Twenty Six)\n'
def testParseFileData(self):
"""Tests the _ParseFileData function."""
file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()
file_system_builder.AddFile('/etc/system-release', self._FILE_DATA)
mount_point = fake_path_spec.FakePathSpec(location='/')
plugin = linux.LinuxDistributionPlugin()
knowledge_base = self._RunPreprocessorPluginOnFileSystem(
file_system_builder.file_system, mount_point, plugin)
system_product = knowledge_base.GetValue('operating_system_product')
self.assertEqual(system_product, 'Fedora release 26 (Twenty Six)')
class LinuxIssueFilePluginTest(test_lib.ArtifactPreprocessorPluginTestCase):
"""Tests for the Linux issue file plugin."""
_FILE_DATA = b"""\
Debian GNU/Linux 5.0 \\n \\l
"""
def testParseFileData(self):
"""Tests the _ParseFileData function."""
file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()
file_system_builder.AddFile('/etc/issue', self._FILE_DATA)
mount_point = fake_path_spec.FakePathSpec(location='/')
plugin = linux.LinuxIssueFilePlugin()
knowledge_base = self._RunPreprocessorPluginOnFileSystem(
file_system_builder.file_system, mount_point, plugin)
system_product = knowledge_base.GetValue('operating_system_product')
self.assertEqual(system_product, 'Debian GNU/Linux 5.0')
class LinuxStandardBaseReleasePluginTest(
test_lib.ArtifactPreprocessorPluginTestCase):
"""Tests for the Linux standard base (LSB) release plugin."""
_FILE_DATA = b"""\
DISTRIB_CODENAME=trusty
DISTRIB_DESCRIPTION="Ubuntu 14.04 LTS"
DISTRIB_ID=Ubuntu
DISTRIB_RELEASE=14.04"""
def testParseFileData(self):
"""Tests the _ParseFileData function."""
file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()
file_system_builder.AddFile('/etc/lsb-release', self._FILE_DATA)
mount_point = fake_path_spec.FakePathSpec(location='/')
plugin = linux.LinuxStandardBaseReleasePlugin()
knowledge_base = self._RunPreprocessorPluginOnFileSystem(
file_system_builder.file_system, mount_point, plugin)
system_product = knowledge_base.GetValue('operating_system_product')
self.assertEqual(system_product, 'Ubuntu 14.04 LTS')
class LinuxSystemdOperatingSystemPluginTest(
test_lib.ArtifactPreprocessorPluginTestCase):
"""Tests for the Linux operating system release plugin."""
_FILE_DATA = b"""\
NAME=Fedora
VERSION="26 (Workstation Edition)"
ID=fedora
VERSION_ID=26
PRETTY_NAME="Fedora 26 (Workstation Edition)"
ANSI_COLOR="0;34"
CPE_NAME="cpe:/o:fedoraproject:fedora:26"
HOME_URL="https://fedoraproject.org/"
BUG_REPORT_URL="https://bugzilla.redhat.com/"
REDHAT_BUGZILLA_PRODUCT="Fedora"
REDHAT_BUGZILLA_PRODUCT_VERSION=26
REDHAT_SUPPORT_PRODUCT="Fedora"
REDHAT_SUPPORT_PRODUCT_VERSION=26
PRIVACY_POLICY_URL=https://fedoraproject.org/wiki/Legal:PrivacyPolicy
VARIANT="Workstation Edition"
VARIANT_ID=workstation"""
def testParseFileData(self):
"""Tests the _ParseFileData function."""
file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()
file_system_builder.AddFile('/etc/os-release', self._FILE_DATA)
mount_point = fake_path_spec.FakePathSpec(location='/')
plugin = linux.LinuxSystemdOperatingSystemPlugin()
knowledge_base = self._RunPreprocessorPluginOnFileSystem(
file_system_builder.file_system, mount_point, plugin)
system_product = knowledge_base.GetValue('operating_system_product')
self.assertEqual(system_product, 'Fedora 26 (Workstation Edition)')
class LinuxTimeZonePluginTest(test_lib.ArtifactPreprocessorPluginTestCase):
"""Tests for the Linux time zone plugin."""
def testParseFileEntryWithLink(self):
"""Tests the _ParseFileEntry function on a symbolic link."""
file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()
file_system_builder.AddSymbolicLink(
'/etc/localtime', '/usr/share/zoneinfo/Europe/Zurich')
mount_point = fake_path_spec.FakePathSpec(location='/')
plugin = linux.LinuxTimeZonePlugin()
knowledge_base = self._RunPreprocessorPluginOnFileSystem(
file_system_builder.file_system, mount_point, plugin)
self.assertEqual(knowledge_base.timezone.zone, 'Europe/Zurich')
def testParseFileEntryWithTZif(self):
"""Tests the _ParseFileEntry function on a timezone information file."""
test_file_path = self._GetTestFilePath(['localtime.tzif'])
self._SkipIfPathNotExists(test_file_path)
file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()
file_system_builder.AddFileReadData('/etc/localtime', test_file_path)
mount_point = fake_path_spec.FakePathSpec(location='/')
plugin = linux.LinuxTimeZonePlugin()
knowledge_base = self._RunPreprocessorPluginOnFileSystem(
file_system_builder.file_system, mount_point, plugin)
self.assertEqual(knowledge_base.timezone.zone, 'CET')
def testParseFileEntryWithBogusTZif(self):
"""Tests the _ParseFileEntry function on a bogus TZif file."""
test_file_path = self._GetTestFilePath(['syslog'])
self._SkipIfPathNotExists(test_file_path)
file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()
file_system_builder.AddFileReadData('/etc/localtime', test_file_path)
mount_point = fake_path_spec.FakePathSpec(location='/')
plugin = linux.LinuxTimeZonePlugin()
knowledge_base = self._RunPreprocessorPluginOnFileSystem(
file_system_builder.file_system, mount_point, plugin)
self.assertEqual(knowledge_base.timezone.zone, 'UTC')
class LinuxUserAccountsPluginTest(test_lib.ArtifactPreprocessorPluginTestCase):
"""Tests for the Linux user accounts plugin."""
_FILE_DATA = (
b'root:x:0:0:root:/root:/bin/bash\n'
b'bin:x:1:1:bin:/bin:/sbin/nologin\n'
b'daemon:x:2:2:daemon:/sbin:/sbin/nologin\n'
b'adm:x:3:4:adm:/var/adm:/sbin/nologin\n'
b'lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin\n'
b'sync:x:5:0:sync:/sbin:/bin/sync\n'
b'shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\n'
b'halt:x:7:0:halt:/sbin:/sbin/halt\n'
b'mail:x:8:12:mail:/var/spool/mail:/sbin/nologin\n'
b'operator:x:11:0:operator:/root:/sbin/nologin\n'
b'games:x:12:100:games:/usr/games:/sbin/nologin\n'
b'ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin\n'
b'nobody:x:99:99:Nobody:/:/sbin/nologin\n')
def testParseFileData(self):
"""Tests the _ParseFileData function."""
file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()
file_system_builder.AddFile('/etc/passwd', self._FILE_DATA)
mount_point = fake_path_spec.FakePathSpec(location='/')
plugin = linux.LinuxUserAccountsPlugin()
knowledge_base = self._RunPreprocessorPluginOnFileSystem(
file_system_builder.file_system, mount_point, plugin)
users = sorted(
knowledge_base.user_accounts,
key=lambda user_account: user_account.identifier)
self.assertEqual(len(users), 13)
user_account = users[4]
self.assertEqual(user_account.identifier, '14')
self.assertEqual(user_account.group_identifier, '50')
self.assertEqual(user_account.user_directory, '/var/ftp')
self.assertEqual(user_account.username, 'ftp')
self.assertEqual(user_account.shell, '/sbin/nologin')
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2015 The Tornado Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
__all__ = ['Condition', 'Event', 'Semaphore', 'BoundedSemaphore', 'Lock']
import collections
from tornado import gen, ioloop
from tornado.concurrent import Future
class _TimeoutGarbageCollector(object):
"""Base class for objects that periodically clean up timed-out waiters.
Avoids memory leak in a common pattern like:
while True:
yield condition.wait(short_timeout)
print('looping....')
"""
def __init__(self):
self._waiters = collections.deque() # Futures.
self._timeouts = 0
def _garbage_collect(self):
# Occasionally clear timed-out waiters.
self._timeouts += 1
if self._timeouts > 100:
self._timeouts = 0
self._waiters = collections.deque(
w for w in self._waiters if not w.done())
class Condition(_TimeoutGarbageCollector):
"""A condition allows one or more coroutines to wait until notified.
Like a standard `threading.Condition`, but does not need an underlying lock
that is acquired and released.
With a `Condition`, coroutines can wait to be notified by other coroutines:
.. testcode::
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.locks import Condition
condition = Condition()
@gen.coroutine
def waiter():
print("I'll wait right here")
yield condition.wait() # Yield a Future.
print("I'm done waiting")
@gen.coroutine
def notifier():
print("About to notify")
condition.notify()
print("Done notifying")
@gen.coroutine
def runner():
# Yield two Futures; wait for waiter() and notifier() to finish.
yield [waiter(), notifier()]
IOLoop.current().run_sync(runner)
.. testoutput::
I'll wait right here
About to notify
Done notifying
I'm done waiting
`wait` takes an optional ``timeout`` argument, which is either an absolute
timestamp::
io_loop = IOLoop.current()
# Wait up to 1 second for a notification.
yield condition.wait(timeout=io_loop.time() + 1)
...or a `datetime.timedelta` for a timeout relative to the current time::
# Wait up to 1 second.
yield condition.wait(timeout=datetime.timedelta(seconds=1))
The method raises `tornado.gen.TimeoutError` if there's no notification
before the deadline.
"""
def __init__(self):
super(Condition, self).__init__()
self.io_loop = ioloop.IOLoop.current()
def __repr__(self):
result = '<%s' % (self.__class__.__name__, )
if self._waiters:
result += ' waiters[%s]' % len(self._waiters)
return result + '>'
def wait(self, timeout=None):
"""Wait for `.notify`.
Returns a `.Future` that resolves ``True`` if the condition is notified,
or ``False`` after a timeout.
"""
waiter = Future()
self._waiters.append(waiter)
if timeout:
def on_timeout():
waiter.set_result(False)
self._garbage_collect()
io_loop = ioloop.IOLoop.current()
timeout_handle = io_loop.add_timeout(timeout, on_timeout)
waiter.add_done_callback(
lambda _: io_loop.remove_timeout(timeout_handle))
return waiter
def notify(self, n=1):
"""Wake ``n`` waiters."""
waiters = [] # Waiters we plan to run right now.
while n and self._waiters:
waiter = self._waiters.popleft()
if not waiter.done(): # Might have timed out.
n -= 1
waiters.append(waiter)
for waiter in waiters:
waiter.set_result(True)
def notify_all(self):
"""Wake all waiters."""
self.notify(len(self._waiters))
class Event(object):
"""An event blocks coroutines until its internal flag is set to True.
Similar to `threading.Event`.
A coroutine can wait for an event to be set. Once it is set, calls to
``yield event.wait()`` will not block unless the event has been cleared:
.. testcode::
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.locks import Event
event = Event()
@gen.coroutine
def waiter():
print("Waiting for event")
yield event.wait()
print("Not waiting this time")
yield event.wait()
print("Done")
@gen.coroutine
def setter():
print("About to set the event")
event.set()
@gen.coroutine
def runner():
yield [waiter(), setter()]
IOLoop.current().run_sync(runner)
.. testoutput::
Waiting for event
About to set the event
Not waiting this time
Done
"""
def __init__(self):
self._future = Future()
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__, 'set' if self.is_set() else 'clear')
def is_set(self):
"""Return ``True`` if the internal flag is true."""
return self._future.done()
def set(self):
"""Set the internal flag to ``True``. All waiters are awakened.
Calling `.wait` once the flag is set will not block.
"""
if not self._future.done():
self._future.set_result(None)
def clear(self):
"""Reset the internal flag to ``False``.
Calls to `.wait` will block until `.set` is called.
"""
if self._future.done():
self._future = Future()
def wait(self, timeout=None):
"""Block until the internal flag is true.
Returns a Future, which raises `tornado.gen.TimeoutError` after a
timeout.
"""
if timeout is None:
return self._future
else:
return gen.with_timeout(timeout, self._future)
class _ReleasingContextManager(object):
"""Releases a Lock or Semaphore at the end of a "with" statement.
with (yield semaphore.acquire()):
pass
# Now semaphore.release() has been called.
"""
def __init__(self, obj):
self._obj = obj
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
self._obj.release()
class Semaphore(_TimeoutGarbageCollector):
"""A lock that can be acquired a fixed number of times before blocking.
A Semaphore manages a counter representing the number of `.release` calls
minus the number of `.acquire` calls, plus an initial value. The `.acquire`
method blocks if necessary until it can return without making the counter
negative.
Semaphores limit access to a shared resource. To allow access for two
workers at a time:
.. testsetup:: semaphore
from collections import deque
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.concurrent import Future
# Ensure reliable doctest output: resolve Futures one at a time.
futures_q = deque([Future() for _ in range(3)])
@gen.coroutine
def simulator(futures):
for f in futures:
yield gen.moment
f.set_result(None)
IOLoop.current().add_callback(simulator, list(futures_q))
def use_some_resource():
return futures_q.popleft()
.. testcode:: semaphore
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.locks import Semaphore
sem = Semaphore(2)
@gen.coroutine
def worker(worker_id):
yield sem.acquire()
try:
print("Worker %d is working" % worker_id)
yield use_some_resource()
finally:
print("Worker %d is done" % worker_id)
sem.release()
@gen.coroutine
def runner():
# Join all workers.
yield [worker(i) for i in range(3)]
IOLoop.current().run_sync(runner)
.. testoutput:: semaphore
Worker 0 is working
Worker 1 is working
Worker 0 is done
Worker 2 is working
Worker 1 is done
Worker 2 is done
Workers 0 and 1 are allowed to run concurrently, but worker 2 waits until
the semaphore has been released once, by worker 0.
`.acquire` is a context manager, so ``worker`` could be written as::
@gen.coroutine
def worker(worker_id):
with (yield sem.acquire()):
print("Worker %d is working" % worker_id)
yield use_some_resource()
# Now the semaphore has been released.
print("Worker %d is done" % worker_id)
In Python 3.5, the semaphore itself can be used as an async context
manager::
async def worker(worker_id):
async with sem:
print("Worker %d is working" % worker_id)
await use_some_resource()
# Now the semaphore has been released.
print("Worker %d is done" % worker_id)
.. versionchanged:: 4.3
Added ``async with`` support in Python 3.5.
"""
def __init__(self, value=1):
super(Semaphore, self).__init__()
if value < 0:
raise ValueError('semaphore initial value must be >= 0')
self._value = value
def __repr__(self):
res = super(Semaphore, self).__repr__()
extra = 'locked' if self._value == 0 else 'unlocked,value:{0}'.format(
self._value)
if self._waiters:
extra = '{0},waiters:{1}'.format(extra, len(self._waiters))
return '<{0} [{1}]>'.format(res[1:-1], extra)
def release(self):
"""Increment the counter and wake one waiter."""
self._value += 1
while self._waiters:
waiter = self._waiters.popleft()
if not waiter.done():
self._value -= 1
# If the waiter is a coroutine paused at
#
# with (yield semaphore.acquire()):
#
# then the context manager's __exit__ calls release() at the end
# of the "with" block.
waiter.set_result(_ReleasingContextManager(self))
break
def acquire(self, timeout=None):
"""Decrement the counter. Returns a Future.
Block if the counter is zero and wait for a `.release`. The Future
raises `.TimeoutError` after the deadline.
"""
waiter = Future()
if self._value > 0:
self._value -= 1
waiter.set_result(_ReleasingContextManager(self))
else:
self._waiters.append(waiter)
if timeout:
def on_timeout():
waiter.set_exception(gen.TimeoutError())
self._garbage_collect()
io_loop = ioloop.IOLoop.current()
timeout_handle = io_loop.add_timeout(timeout, on_timeout)
waiter.add_done_callback(
lambda _: io_loop.remove_timeout(timeout_handle))
return waiter
def __enter__(self):
raise RuntimeError(
"Use Semaphore like 'with (yield semaphore.acquire())', not like"
" 'with semaphore'")
__exit__ = __enter__
@gen.coroutine
def __aenter__(self):
yield self.acquire()
@gen.coroutine
def __aexit__(self, typ, value, tb):
self.release()
class BoundedSemaphore(Semaphore):
"""A semaphore that prevents release() being called too many times.
If `.release` would increment the semaphore's value past the initial
value, it raises `ValueError`. Semaphores are mostly used to guard
resources with limited capacity, so a semaphore released too many times
is a sign of a bug.
"""
def __init__(self, value=1):
super(BoundedSemaphore, self).__init__(value=value)
self._initial_value = value
def release(self):
"""Increment the counter and wake one waiter."""
if self._value >= self._initial_value:
raise ValueError("Semaphore released too many times")
super(BoundedSemaphore, self).release()
class Lock(object):
"""A lock for coroutines.
A Lock begins unlocked, and `acquire` locks it immediately. While it is
locked, a coroutine that yields `acquire` waits until another coroutine
calls `release`.
Releasing an unlocked lock raises `RuntimeError`.
`acquire` supports the context manager protocol in all Python versions:
>>> from tornado import gen, locks
>>> lock = locks.Lock()
>>>
>>> @gen.coroutine
... def f():
... with (yield lock.acquire()):
... # Do something holding the lock.
... pass
...
... # Now the lock is released.
In Python 3.5, `Lock` also supports the async context manager
protocol. Note that in this case there is no `acquire`, because
``async with`` includes both the ``yield`` and the ``acquire``
(just as it does with `threading.Lock`):
>>> async def f(): # doctest: +SKIP
... async with lock:
... # Do something holding the lock.
... pass
...
... # Now the lock is released.
.. versionchanged:: 3.5
Added ``async with`` support in Python 3.5.
"""
def __init__(self):
self._block = BoundedSemaphore(value=1)
def __repr__(self):
return "<%s _block=%s>" % (
self.__class__.__name__,
self._block)
def acquire(self, timeout=None):
"""Attempt to lock. Returns a Future.
Returns a Future, which raises `tornado.gen.TimeoutError` after a
timeout.
"""
return self._block.acquire(timeout)
def release(self):
"""Unlock.
The first coroutine in line waiting for `acquire` gets the lock.
If not locked, raise a `RuntimeError`.
"""
try:
self._block.release()
except ValueError:
raise RuntimeError('release unlocked lock')
def __enter__(self):
raise RuntimeError(
"Use Lock like 'with (yield lock)', not like 'with lock'")
__exit__ = __enter__
@gen.coroutine
def __aenter__(self):
yield self.acquire()
@gen.coroutine
def __aexit__(self, typ, value, tb):
self.release()
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Runs QMF tests using the qmf.client API.
import unittest, os, socket, time
from qmf.client import SyncRequestResponse, BrokerAgent, ReconnectDelays
from proton.handlers import MessagingHandler
from proton.reactor import Container
from proton.utils import BlockingConnection, ConnectionException
from proton import Message, Event
from threading import Thread
from Queue import Queue, Empty, Full
class TestPort(object):
"""Get an unused port using bind(0) and SO_REUSEADDR and hold it till close()
Can be used as `with TestPort() as tp:` Provides tp.host, tp.port and tp.addr
(a "host:port" string)
"""
def __init__(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(('127.0.0.1', 0)) # Testing exampless is local only
self.host, self.port = socket.getnameinfo(self.sock.getsockname(), 0)
self.addr = "%s:%s" % (self.host, self.port)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
self.sock.close()
class QmfClientTest(unittest.TestCase):
"""
Test QMFv2 support using the qmf.console library.
"""
def configure(self, config):
"""Called by the qpid-python-test framework with broker config"""
self.broker = config.broker
def setUp(self):
self.agent = BrokerAgent.connect(self.broker)
def test_broker(self):
self.assertEqual(self.agent.getBroker().name, "amqp-broker")
def test_connections(self):
connections = self.agent.getAllConnections()
self.assertTrue(len(connections) > 0)
def test_queues(self):
connections = self.agent.getAllConnections()
qnames = [ "qq%s"%i for i in xrange(10)]
for q in qnames:
self.agent.addQueue(q)
self.assertEqual(q, self.agent.getQueue(q).name)
queues = self.agent.getAllQueues()
self.assertLess(set(qnames), set([q.name for q in queues]))
self.agent.delQueue("qq0")
self.assertIs(None, self.agent.getQueue("qq0"))
try:
self.agent.delQueue("nosuch")
except:
pass
def test_exchanges(self):
connections = self.agent.getAllConnections()
enames = [ "ee%s"%i for i in xrange(10)]
for e in enames:
self.agent.addExchange('fanout', e)
self.assertEqual(e, self.agent.getExchange(e).name)
exchanges = self.agent.getAllExchanges()
self.assertLess(set(enames), set([e.name for e in exchanges]))
self.agent.delExchange("ee0")
self.assertIs(None, self.agent.getExchange("ee0"))
try:
self.agent.delExchange("nosuch")
except:
pass
def test_bind(self):
self.agent.addQueue('qq')
self.agent.addExchange('direct', 'ex')
self.agent.bind('ex', 'qq', 'kk')
self.assertTrue([b for b in self.agent.getAllBindings() if b.bindingKey == 'kk'])
self.agent.unbind('ex', 'qq', 'kk')
self.assertFalse([b for b in self.agent.getAllBindings() if b.bindingKey == 'kk'])
def test_fork(self):
"""Ensure that the client is fork-safe."""
self.agent.addQueue('parent')
pid = os.fork()
if pid: # parent
self.assertEqual((pid,0), os.waitpid(pid, 0))
self.assertIs(None, self.agent.addQueue('parent'))
self.assertEqual('child', self.agent.getQueue('child').name)
else: # child
# Can't use the parent's connection.
agent = BrokerAgent.connect(self.broker)
agent.delQueue('parent')
agent.addQueue('child')
os._exit(0) # Force exit, test framework will catch SystemExit
class DisconnectServer(MessagingHandler, Thread):
"""
Server that disconnects its clients to test automatic re-connect
"""
def __init__(self, addr):
Thread.__init__(self)
MessagingHandler.__init__(self)
self.addr = addr
self.response = None # Response message
self.senders = {}
self.listening = False
self.disconnect = Queue(0) # Disconnect requests
self.disconnected = Queue(0) # Disconnects executed
# Start listener and server thread
self.container = Container(self)
self.container.start()
while not self.listening and self.container.process():
pass
self.start()
def run(self):
while self.container.process():
pass
self.container.stop()
self.container.process()
def stop(self):
self.container.stop()
self.join()
def on_start(self, event):
self.acceptor = event.container.listen(self.addr)
self.listening = True
def on_connection_bound(self, event):
# Turn off security
event.transport.require_auth(False);
event.transport.sasl().allowed_mechs("ANONYMOUS");
self.transport = event.transport
def check_disconnect(self, event):
try:
self.disconnect.get_nowait()
event.transport.close_head()
event.transport.close_tail()
self.disconnected.put(event.type)
return True
except Empty:
return False
def on_link_opening(self, event):
if event.link.is_sender:
if event.link.remote_source == "STOP":
self.reactor.stop()
if event.link.remote_source and event.link.remote_source.dynamic:
event.link.source.address = str(id(event.link))
self.senders[event.link.source.address] = event.link
else:
event.link.source.address = event.link.remote_source.address
else:
event.link.target.address = event.link.remote_target.address
event.link.flow(1)
self.check_disconnect(event)
def on_message(self, event):
if self.check_disconnect(event):
return
em = event.message
m = self.response or em
m.address = em.reply_to
m.correlation_id = em.correlation_id
self.senders[m.address].send(m)
event.link.flow(1)
class ReconnectTests(unittest.TestCase):
def setUp(self):
with TestPort() as tp:
self.server = DisconnectServer(tp.addr)
def tearDown(self):
self.server.stop()
def test_reconnect_delays(self):
self.assertEquals([0, 1, 2, 4, 7, 7, 7, 7], list(ReconnectDelays(1, 7, 3)))
self.assertEquals([0, .2, .4, .8, 1.0], list(ReconnectDelays(.2, 1, 0)))
self.assertRaises(ValueError, ReconnectDelays, 0, 1)
self.assertRaises(ValueError, ReconnectDelays, 1, -1)
d = iter(ReconnectDelays(5, 5)) # 5's forever
self.assertEquals(0, d.next())
for x in xrange(100):
self.assertEquals(5, d.next())
query_response = Message(body=[],
properties={"method":"response", "qmf.agent":"broker",
"qmf.content":"_data", "qmf.opcode":"_query_response"})
method_response = Message(body={"_arguments":[]},
properties={"method":"response", "qmf.agent":"broker",
"qmf.content":"_data", "qmf.opcode":"_method_response"})
def test_reconnect_agent(self):
# Dummy response message
self.server.response = self.query_response
# Failure during initial connection should raise an exception, no reconnect
self.server.disconnect.put(True)
self.assertRaises(ConnectionException, BrokerAgent.connect, self.server.addr, reconnect_delays=[0, 0, 0])
self.assertEquals(Event.LINK_REMOTE_OPEN, self.server.disconnected.get())
agent = BrokerAgent.connect(self.server.addr, reconnect_delays=[0, 0, 0])
agent.getBroker() # Should work OK
self.server.disconnect.put(True) # Disconnect on message delivery
self.server.disconnect.put(True) # Disconnect first reconnect on link open
self.server.disconnect.put(True) # Disconnect second reconnect on link open
agent.getBroker()
self.assertEquals(Event.DELIVERY, self.server.disconnected.get())
self.assertEquals(Event.LINK_REMOTE_OPEN, self.server.disconnected.get())
self.assertEquals(Event.LINK_REMOTE_OPEN, self.server.disconnected.get())
# Try a healthy get
agent.getBroker()
self.server.disconnect.put(True)
agent.list("foo")
self.assertEquals(Event.DELIVERY, self.server.disconnected.get())
self.server.disconnect.put(True)
agent.getConnection("foo")
self.assertEquals(Event.DELIVERY, self.server.disconnected.get())
# Try a method call
self.server.response = self.method_response
self.server.disconnect.put(True)
agent.echo()
self.assertEquals(Event.DELIVERY, self.server.disconnected.get())
# We should give up after 4 disconnects
self.server.disconnect.put(True)
self.server.disconnect.put(True)
self.server.disconnect.put(True)
self.server.disconnect.put(True)
self.assertRaises(ConnectionException, agent.echo)
def test_reconnect_agent_delay(self):
self.server.response = self.query_response
agent = BrokerAgent.connect(self.server.addr, reconnect_delays=[0.1, 0.2])
def elapsed(f, *args, **kwargs):
t = time.time()
f(*args, **kwargs)
return time.time() - t
self.server.disconnect.put(True)
self.assertLess(0.1, elapsed(agent.getBroker))
self.server.disconnect.put(True)
self.server.disconnect.put(True)
self.assertLess(0.3, elapsed(agent.getBroker))
if __name__ == "__main__":
shutil.rmtree("brokertest.tmp", True)
os.execvp("qpid-python-test", ["qpid-python-test", "-m", "qmf_client_tests"])
|
|
#-------------------------------------------------------------------------------
# Name: Raster information from vector relations
# Purpose: Classify features of interest based on a raster with pixels that have classification values.
# Having a catalog in a vector layer with adresses of images related to each polygon, count
# the pixels with given values that are inside any given polygon. The raster files have a
# land usage classification that was automaticaly generated, this classification covers the
# whole country. We have rural properties boundaries and other poligons that we want to verify
# how much area was classified as being one of 13 distinct classes. This aproach gets each
# image boundary polygon intersection with each feature of interest and builds a raster mask.
# The mask has the same resolution as the original image (RapidEye, 5 meters) with binary values,
# being 1 if the pixel is part of the intersection and 0 if it is not. This mask is then multiplied
# as a matrix by the matrix of pixel values from the image (in this case 14 possible values).
# Finally a histogram is made with bins that separate the intended classes and the count of
# each bin is added to the vector layer with features of interest.
#
# Author: leandro.biondo
#
# Created: 05/10/2016
# Copyright: (c) leandro.biondo 2016
# Licence: GNU GLP
#-------------------------------------------------------------------------------
#!/usr/bin/env python
# import modules
import gdal
import numpy as np
from osgeo import ogr, osr
import glob
import os
gdal.UseExceptions()
#
#shapefilebr = "C:/biondo/buff_nasc.shp"
#driver = ogr.GetDriverByName("ESRI Shapefile")
#dataSourcebr = driver.Open(shapefilebr, True)
#layerbr = dataSourcebr.GetLayer()
#Here should be given the vector layer with the catalog, This catalog can be built with the Qgis plugin
#"Image Footprint", it is necessary to select image boudary option. The path (caminho) field will be used to open
#the images with classified pixels, you can use a * as mask if there are more then 1 catalog
for infile in glob.glob(r'/home/gecad/CAR/Demandas/Nascentes/aaa_nascentes_catalogo.shp'):
print infile
rapideye = infile
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource_rd = driver.Open(rapideye, True)
layer_rd = dataSource_rd.GetLayer()
shapefile = ('/home/gecad/CAR/Demandas/Nascentes/aaa_nascentes_catalogo.shp')
dataSource = driver.Open(shapefile, True)
layer = dataSource.GetLayer()
layer.CreateField(ogr.FieldDefn("indef", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("uso_cons", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("rvegnat", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("vereda", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("mangue", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("salgado", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("apicum", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("restinga", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("agua", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("vegremo", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("regene", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("areaurb", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("nuvens", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("foraLi", ogr.OFTInteger),False)
pixel_size = 5
NoData_value = 255
contard =0
c5=0
for feat_rd in layer_rd:
caminho_img = feat_rd.GetField("caminho")
print caminho_img
try:
src_ds = gdal.Open( caminho_img)
except RuntimeError, e:
print 'Unable to open INPUT'
print e
#break
continue
try:
srcband = src_ds.GetRasterBand(1)
print srcband
except RuntimeError, e:
# for example, try GetRasterBand(10)
print 'Band ( %i ) not found' % band_num
print e
#sys.exit(1)
continue
banda_class = srcband.ReadAsArray().astype(np.float)
if banda_class.size==(5000*5000):
classes = banda_class
geom=feat_rd.GetGeometryRef()
#print 'spat ', layer_rd.GetSpatialRef()
# print 'proj ', src_ds.GetProjection()
contorno=geom.GetEnvelope()
x_min = contorno[0]
y_max = contorno[3]
x_res = 5000
y_res = 5000
# target_ds = gdal.GetDriverByName('MEM').Create('', x_res, y_res, gdal.GDT_Byte)
# target_ds.SetGeoTransform(src_ds.GetGeoTransform())
# target_ds.SetProjection(src_ds.GetProjection())
# band = target_ds.GetRasterBand(1)
# band.SetNoDataValue(NoData_value)
#
contard=contard+1
conta=0
cont_loop=0
for feature in layer:
geom2=feature.GetGeometryRef()
verifica_f=feature.GetField("foraLi")
#print 'feat' , caminho_feat
#print verifica_f
cont_loop+=1
if geom2.Intersects(geom) :
c5+=1
if (verifica_f is None):
intersect = geom.Intersection(geom2)
print intersect.GetArea()
print (intersect.GetArea()/geom2.GetArea())
if (intersect.GetArea()/geom2.GetArea())<0.5:
continue
conta+=1
SpatialRef = osr.SpatialReference()
SpatialRef.SetWellKnownGeogCS( "EPSG:4674" )
memoutdriver=ogr.GetDriverByName('MEMORY')
memsource=memoutdriver.CreateDataSource('memData')
tmp=memoutdriver.Open('memData', 1)
dstlayer = memsource.CreateLayer('teste', SpatialRef)
target_ds = gdal.GetDriverByName('MEM').Create('', x_res, y_res, gdal.GDT_Byte)
target_ds.SetGeoTransform(src_ds.GetGeoTransform())
target_ds.SetProjection(src_ds.GetProjection())
band = target_ds.GetRasterBand(1)
band.SetNoDataValue(NoData_value)
dstfeature = ogr.Feature(dstlayer.GetLayerDefn())
dstfeature.SetGeometry(intersect)
dstlayer.CreateFeature(dstfeature)
# print 'resultado', dstfeature.GetGeometryRef().GetEnvelope()
# Rasterize
gdal.RasterizeLayer(target_ds, [1], dstlayer, burn_values=[1])
array = band.ReadAsArray()
#print np.histogram(array, bins=[0,1,250,300])
# Read as array
dstlayer=None
memsource.Destroy()
#tabela = srcband.ReadAsArray()
#print tabela
resposta1 = np.histogram(classes, bins=[0,1,20])
classes2 = classes*array
resposta = np.histogram(classes2, bins=[0,1,2,3,4,5,6,7,8,9,10,11,12,20])
feature.SetField("indef", int(resposta1[0][0]*25))
feature.SetField("uso_cons", int(resposta[0][1]*25))
feature.SetField("rvegnat", int(resposta[0][2]*25))
feature.SetField("vereda", int(resposta[0][3]*25))
feature.SetField("mangue", int(resposta[0][4]*25))
feature.SetField("salgado", int(resposta[0][5]*25))
feature.SetField("apicum", int(resposta[0][6]*25))
feature.SetField("restinga", int(resposta[0][7]*25))
feature.SetField("agua", int(resposta[0][8]*25))
feature.SetField("vegremo", int(resposta[0][9]*25))
feature.SetField("regene", int(resposta[0][10]*25))
feature.SetField("areaurb", int(resposta[0][11]*25))
feature.SetField("nuvens", int(resposta[0][12]*25))
feature.SetField("foraLi", int((resposta[0][0]-resposta1[0][0])*25))
layer.SetFeature(feature)
feature.Destroy()
print "ImagemImovel: %d | %d | %d | %d" % (c5,contard,conta,cont_loop)
c5+=1
#create an image file and put the results in 3 band for testing purposes
#
# saida = "/home/gecad/CAR/Demandas/Nascentes/img_testes/img%d%d.tif" % (contard,c5)
# format = "GTiff"
# driver2 = gdal.GetDriverByName( format )
# metadata = driver2.GetMetadata()
# if metadata.has_key(gdal.DCAP_CREATE) \
# and metadata[gdal.DCAP_CREATE] == 'YES':
# print 'Driver %s supports Create() method.' % format
# if metadata.has_key(gdal.DCAP_CREATECOPY) \
# and metadata[gdal.DCAP_CREATECOPY] == 'YES':
# print 'Driver %s supports CreateCopy() method.' % format
#
# dst_ds = driver2.Create( saida, 5000, 5000, 3, gdal.GDT_Float32, ['COMPRESS=LZW'] )
# srs = osr.SpatialReference()
# dst_ds.SetProjection(src_ds.GetProjection())
# dst_ds.SetGeoTransform(src_ds.GetGeoTransform())
#
# dst_ds.GetRasterBand(1).WriteArray(classes)
# dst_ds.GetRasterBand(2).WriteArray(array)
# dst_ds.GetRasterBand(3).WriteArray(classes2)
# dst_ds=None
# #
# if c5==10:
# layer=None
# dataSource=None
# layerbr=None
# dataSourcebr=None
# layer_rd=None
# dataSource_rd=None
# target_ds= None
# print 'fim forcado'
# break
#
target_ds= None
#break
layer.ResetReading()
layer=None
dataSource=None
layerbr=None
dataSourcebr=None
layer_rd=None
dataSource_rd=None
print 'fim'
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from .. import activations, initializations, regularizers, constraints
from ..utils.theano_utils import shared_zeros, on_gpu
from ..layers.core import Layer
if on_gpu():
from theano.sandbox.cuda import dnn
class Convolution1D(Layer):
def __init__(self, input_dim, nb_filter, filter_length,
init='uniform', activation='linear', weights=None,
border_mode='valid', subsample_length=1,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None):
if border_mode not in {'valid', 'full', 'same'}:
raise Exception('Invalid border mode for Convolution1D:', border_mode)
super(Convolution1D, self).__init__()
self.nb_filter = nb_filter
self.input_dim = input_dim
self.filter_length = filter_length
self.subsample_length = subsample_length
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.subsample = (1, subsample_length)
self.border_mode = border_mode
self.input = T.tensor3()
self.W_shape = (nb_filter, input_dim, filter_length, 1)
self.W = self.init(self.W_shape)
self.b = shared_zeros((nb_filter,))
self.params = [self.W, self.b]
self.regularizers = []
self.W_regularizer = regularizers.get(W_regularizer)
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
if self.activity_regularizer:
self.activity_regularizer.set_layer(self)
self.regularizers.append(self.activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.constraints = [self.W_constraint, self.b_constraint]
if weights is not None:
self.set_weights(weights)
def get_output(self, train):
X = self.get_input(train)
X = T.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 2, 1, 3)
border_mode = self.border_mode
if border_mode == 'same':
border_mode = 'full'
conv_out = T.nnet.conv.conv2d(X, self.W, border_mode=border_mode, subsample=self.subsample)
if self.border_mode == 'same':
shift_x = (self.filter_length - 1) // 2
conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, :]
output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = T.reshape(output, (output.shape[0], output.shape[1], output.shape[2])).dimshuffle(0, 2, 1)
return output
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"nb_filter": self.nb_filter,
"filter_length": self.filter_length,
"init": self.init.__name__,
"activation": self.activation.__name__,
"border_mode": self.border_mode,
"subsample_length": self.subsample_length,
"W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None,
"b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None,
"activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None,
"W_constraint": self.W_constraint.get_config() if self.W_constraint else None,
"b_constraint": self.b_constraint.get_config() if self.b_constraint else None}
class Convolution2D(Layer):
def __init__(self, nb_filter, stack_size, nb_row, nb_col,
init='glorot_uniform', activation='linear', weights=None,
border_mode='valid', subsample=(1, 1),
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None):
if border_mode not in {'valid', 'full', 'same'}:
raise Exception('Invalid border mode for Convolution2D:', border_mode)
super(Convolution2D, self).__init__()
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.subsample = subsample
self.border_mode = border_mode
self.nb_filter = nb_filter
self.stack_size = stack_size
self.nb_row = nb_row
self.nb_col = nb_col
self.input = T.tensor4()
self.W_shape = (nb_filter, stack_size, nb_row, nb_col)
self.W = self.init(self.W_shape)
self.b = shared_zeros((nb_filter,))
self.params = [self.W, self.b]
self.regularizers = []
self.W_regularizer = regularizers.get(W_regularizer)
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
if self.activity_regularizer:
self.activity_regularizer.set_layer(self)
self.regularizers.append(self.activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.constraints = [self.W_constraint, self.b_constraint]
if weights is not None:
self.set_weights(weights)
def get_output(self, train):
X = self.get_input(train)
border_mode = self.border_mode
if on_gpu() and dnn.dnn_available():
if border_mode == 'same':
assert(self.subsample == (1, 1))
pad_x = (self.nb_row - self.subsample[0]) // 2
pad_y = (self.nb_col - self.subsample[1]) // 2
conv_out = dnn.dnn_conv(img=X,
kerns=self.W,
border_mode=(pad_x, pad_y))
else:
conv_out = dnn.dnn_conv(img=X,
kerns=self.W,
border_mode=border_mode,
subsample=self.subsample)
else:
if border_mode == 'same':
border_mode = 'full'
conv_out = T.nnet.conv.conv2d(X, self.W,
border_mode=border_mode,
subsample=self.subsample)
if self.border_mode == 'same':
shift_x = (self.nb_row - 1) // 2
shift_y = (self.nb_col - 1) // 2
conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, shift_y:X.shape[3] + shift_y]
return self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
def get_config(self):
return {"name": self.__class__.__name__,
"nb_filter": self.nb_filter,
"stack_size": self.stack_size,
"nb_row": self.nb_row,
"nb_col": self.nb_col,
"init": self.init.__name__,
"activation": self.activation.__name__,
"border_mode": self.border_mode,
"subsample": self.subsample,
"W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None,
"b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None,
"activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None,
"W_constraint": self.W_constraint.get_config() if self.W_constraint else None,
"b_constraint": self.b_constraint.get_config() if self.b_constraint else None}
class MaxPooling1D(Layer):
def __init__(self, pool_length=2, stride=None, ignore_border=True):
super(MaxPooling1D, self).__init__()
self.pool_length = pool_length
self.stride = stride
if self.stride:
self.st = (self.stride, 1)
else:
self.st = None
self.input = T.tensor3()
self.poolsize = (pool_length, 1)
self.ignore_border = ignore_border
def get_output(self, train):
X = self.get_input(train)
X = T.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 2, 1, 3)
output = downsample.max_pool_2d(X, ds=self.poolsize, st=self.st, ignore_border=self.ignore_border)
output = output.dimshuffle(0, 2, 1, 3)
return T.reshape(output, (output.shape[0], output.shape[1], output.shape[2]))
def get_config(self):
return {"name": self.__class__.__name__,
"stride": self.stride,
"pool_length": self.pool_length,
"ignore_border": self.ignore_border}
class MaxPooling2D(Layer):
def __init__(self, poolsize=(2, 2), stride=None, ignore_border=True):
super(MaxPooling2D, self).__init__()
self.input = T.tensor4()
self.poolsize = poolsize
self.stride = stride
self.ignore_border = ignore_border
def get_output(self, train):
X = self.get_input(train)
output = downsample.max_pool_2d(X, ds=self.poolsize, st=self.stride, ignore_border=self.ignore_border)
return output
def get_config(self):
return {"name": self.__class__.__name__,
"poolsize": self.poolsize,
"ignore_border": self.ignore_border,
"stride": self.stride}
class UpSample1D(Layer):
def __init__(self, length=2):
super(UpSample1D, self).__init__()
self.length = length
self.input = T.tensor3()
def get_output(self, train):
X = self.get_input(train)
output = theano.tensor.extra_ops.repeat(X, self.length, axis=1)
return output
def get_config(self):
return {"name": self.__class__.__name__,
"length": self.length}
class UpSample2D(Layer):
def __init__(self, size=(2, 2)):
super(UpSample2D, self).__init__()
self.input = T.tensor4()
self.size = size
def get_output(self, train):
X = self.get_input(train)
Y = theano.tensor.extra_ops.repeat(X, self.size[0], axis=2)
output = theano.tensor.extra_ops.repeat(Y, self.size[1], axis=3)
return output
def get_config(self):
return {"name": self.__class__.__name__,
"size": self.size}
class ZeroPadding2D(Layer):
def __init__(self, pad=(1, 1)):
super(ZeroPadding2D, self).__init__()
self.pad = pad
self.input = T.tensor4()
def get_output(self, train):
X = self.get_input(train)
pad = self.pad
in_shape = X.shape
out_shape = (in_shape[0], in_shape[1], in_shape[2] + 2 * pad[0], in_shape[3] + 2 * pad[1])
out = T.zeros(out_shape)
indices = (slice(None), slice(None), slice(pad[0], in_shape[2] + pad[0]), slice(pad[1], in_shape[3] + pad[1]))
return T.set_subtensor(out[indices], X)
def get_config(self):
return {"name": self.__class__.__name__,
"pad": self.pad}
|
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glance_store as store_api
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import webob.exc
from glance.common import exception
from glance.common import store_utils
from glance.common import utils
import glance.db
from glance import i18n
import glance.registry.client.v1.api as registry
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
def initiate_deletion(req, location_data, id):
"""
Deletes image data from the location of backend store.
:param req: The WSGI/Webob Request object
:param location_data: Location to the image data in a data store
:param id: Opaque image identifier
"""
store_utils.delete_image_location_from_backend(req.context,
id, location_data)
def _kill(req, image_id, from_state):
"""
Marks the image status to `killed`.
:param req: The WSGI/Webob Request object
:param image_id: Opaque image identifier
:param from_state: Permitted current status for transition to 'killed'
"""
# TODO(dosaboy): http://docs.openstack.org/developer/glance/statuses.html
# needs updating to reflect the fact that queued->killed and saving->killed
# are both allowed.
registry.update_image_metadata(req.context, image_id,
{'status': 'killed'},
from_state=from_state)
def safe_kill(req, image_id, from_state):
"""
Mark image killed without raising exceptions if it fails.
Since _kill is meant to be called from exceptions handlers, it should
not raise itself, rather it should just log its error.
:param req: The WSGI/Webob Request object
:param image_id: Opaque image identifier
:param from_state: Permitted current status for transition to 'killed'
"""
try:
_kill(req, image_id, from_state)
except Exception:
LOG.exception(_LE("Unable to kill image %(id)s: ") % {'id': image_id})
def upload_data_to_store(req, image_meta, image_data, store, notifier):
"""
Upload image data to specified store.
Upload image data to the store and cleans up on error.
"""
image_id = image_meta['id']
db_api = glance.db.get_api()
image_size = image_meta.get('size')
try:
# By default image_data will be passed as CooperativeReader object.
# But if 'user_storage_quota' is enabled and 'remaining' is not None
# then it will be passed as object of LimitingReader to
# 'store_add_to_backend' method.
image_data = utils.CooperativeReader(image_data)
remaining = glance.api.common.check_quota(
req.context, image_size, db_api, image_id=image_id)
if remaining is not None:
image_data = utils.LimitingReader(image_data, remaining)
(uri,
size,
checksum,
location_metadata) = store_api.store_add_to_backend(
image_meta['id'],
image_data,
image_meta['size'],
store,
context=req.context)
location_data = {'url': uri,
'metadata': location_metadata,
'status': 'active'}
try:
# recheck the quota in case there were simultaneous uploads that
# did not provide the size
glance.api.common.check_quota(
req.context, size, db_api, image_id=image_id)
except exception.StorageQuotaFull:
with excutils.save_and_reraise_exception():
LOG.info(_LI('Cleaning up %s after exceeding '
'the quota') % image_id)
store_utils.safe_delete_from_backend(
req.context, image_meta['id'], location_data)
def _kill_mismatched(image_meta, attr, actual):
supplied = image_meta.get(attr)
if supplied and supplied != actual:
msg = (_("Supplied %(attr)s (%(supplied)s) and "
"%(attr)s generated from uploaded image "
"(%(actual)s) did not match. Setting image "
"status to 'killed'.") % {'attr': attr,
'supplied': supplied,
'actual': actual})
LOG.error(msg)
safe_kill(req, image_id, 'saving')
initiate_deletion(req, location_data, image_id)
raise webob.exc.HTTPBadRequest(explanation=msg,
content_type="text/plain",
request=req)
# Verify any supplied size/checksum value matches size/checksum
# returned from store when adding image
_kill_mismatched(image_meta, 'size', size)
_kill_mismatched(image_meta, 'checksum', checksum)
# Update the database with the checksum returned
# from the backend store
LOG.debug("Updating image %(image_id)s data. "
"Checksum set to %(checksum)s, size set "
"to %(size)d", {'image_id': image_id,
'checksum': checksum,
'size': size})
update_data = {'checksum': checksum,
'size': size}
try:
try:
state = 'saving'
image_meta = registry.update_image_metadata(req.context,
image_id,
update_data,
from_state=state)
except exception.Duplicate:
image = registry.get_image_metadata(req.context, image_id)
if image['status'] == 'deleted':
raise exception.NotFound()
else:
raise
except exception.NotAuthenticated as e:
# Delete image data due to possible token expiration.
LOG.debug("Authentication error - the token may have "
"expired during file upload. Deleting image data for "
" %s " % image_id)
initiate_deletion(req, location_data, image_id)
raise webob.exc.HTTPUnauthorized(explanation=e.msg, request=req)
except exception.NotFound:
msg = _LI("Image %s could not be found after upload. The image may"
" have been deleted during the upload.") % image_id
LOG.info(msg)
# NOTE(jculp): we need to clean up the datastore if an image
# resource is deleted while the image data is being uploaded
#
# We get "location_data" from above call to store.add(), any
# exceptions that occur there handle this same issue internally,
# Since this is store-agnostic, should apply to all stores.
initiate_deletion(req, location_data, image_id)
raise webob.exc.HTTPPreconditionFailed(explanation=msg,
request=req,
content_type='text/plain')
except store_api.StoreAddDisabled:
msg = _("Error in store configuration. Adding images to store "
"is disabled.")
LOG.exception(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPGone(explanation=msg, request=req,
content_type='text/plain')
except exception.Duplicate as e:
msg = (_("Attempt to upload duplicate image: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
# NOTE(dosaboy): do not delete the image since it is likely that this
# conflict is a result of another concurrent upload that will be
# successful.
notifier.error('image.upload', msg)
raise webob.exc.HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden upload attempt: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except store_api.StorageFull as e:
msg = (_("Image storage media is full: %s") %
utils.exception_to_str(e))
LOG.error(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except store_api.StorageWriteDenied as e:
msg = (_("Insufficient permissions on image storage media: %s") %
utils.exception_to_str(e))
LOG.error(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPServiceUnavailable(explanation=msg,
request=req,
content_type='text/plain')
except exception.ImageSizeLimitExceeded as e:
msg = (_("Denying attempt to upload image larger than %d bytes.")
% CONF.image_size_cap)
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except exception.StorageQuotaFull as e:
msg = (_("Denying attempt to upload image because it exceeds the "
"quota: %s") % utils.exception_to_str(e))
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except webob.exc.HTTPError:
# NOTE(bcwaldon): Ideally, we would just call 'raise' here,
# but something in the above function calls is affecting the
# exception context and we must explicitly re-raise the
# caught exception.
msg = _LE("Received HTTP error while uploading image %s") % image_id
notifier.error('image.upload', msg)
with excutils.save_and_reraise_exception():
LOG.exception(msg)
safe_kill(req, image_id, 'saving')
except (ValueError, IOError) as e:
msg = _("Client disconnected before sending all data to backend")
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
raise webob.exc.HTTPBadRequest(explanation=msg,
content_type="text/plain",
request=req)
except Exception as e:
msg = _("Failed to upload image %s") % image_id
LOG.exception(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPInternalServerError(explanation=msg,
request=req,
content_type='text/plain')
return image_meta, location_data
|
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
import logging
from itertools import product
from airflow.operators.s3_to_hive_operator import S3ToHiveTransfer
from collections import OrderedDict
from airflow.exceptions import AirflowException
from tempfile import NamedTemporaryFile, mkdtemp
import gzip
import bz2
import shutil
import filecmp
import errno
class S3ToHiveTransferTest(unittest.TestCase):
def setUp(self):
self.fn = {}
self.task_id = 'S3ToHiveTransferTest'
self.s3_key = 'S32hive_test_file'
self.field_dict = OrderedDict([('Sno', 'BIGINT'), ('Some,Text', 'STRING')])
self.hive_table = 'S32hive_test_table'
self.delimiter = '\t'
self.create = True
self.recreate = True
self.partition = {'ds': 'STRING'}
self.headers = True
self.check_headers = True
self.wildcard_match = False
self.input_compressed = False
self.kwargs = {'task_id': self.task_id,
's3_key': self.s3_key,
'field_dict': self.field_dict,
'hive_table': self.hive_table,
'delimiter': self.delimiter,
'create': self.create,
'recreate': self.recreate,
'partition': self.partition,
'headers': self.headers,
'check_headers': self.check_headers,
'wildcard_match': self.wildcard_match,
'input_compressed': self.input_compressed
}
try:
header = "Sno\tSome,Text \n".encode()
line1 = "1\tAirflow Test\n".encode()
line2 = "2\tS32HiveTransfer\n".encode()
self.tmp_dir = mkdtemp(prefix='test_tmps32hive_')
# create sample txt, gz and bz2 with and without headers
with NamedTemporaryFile(mode='wb+',
dir=self.tmp_dir,
delete=False) as f_txt_h:
self._set_fn(f_txt_h.name, '.txt', True)
f_txt_h.writelines([header, line1, line2])
fn_gz = self._get_fn('.txt', True) + ".gz"
with gzip.GzipFile(filename=fn_gz,
mode="wb") as f_gz_h:
self._set_fn(fn_gz, '.gz', True)
f_gz_h.writelines([header, line1, line2])
fn_bz2 = self._get_fn('.txt', True) + '.bz2'
with bz2.BZ2File(filename=fn_bz2,
mode="wb") as f_bz2_h:
self._set_fn(fn_bz2, '.bz2', True)
f_bz2_h.writelines([header, line1, line2])
# create sample txt, bz and bz2 without header
with NamedTemporaryFile(mode='wb+',
dir=self.tmp_dir,
delete=False) as f_txt_nh:
self._set_fn(f_txt_nh.name, '.txt', False)
f_txt_nh.writelines([line1, line2])
fn_gz = self._get_fn('.txt', False) + ".gz"
with gzip.GzipFile(filename=fn_gz,
mode="wb") as f_gz_nh:
self._set_fn(fn_gz, '.gz', False)
f_gz_nh.writelines([line1, line2])
fn_bz2 = self._get_fn('.txt', False) + '.bz2'
with bz2.BZ2File(filename=fn_bz2,
mode="wb") as f_bz2_nh:
self._set_fn(fn_bz2, '.bz2', False)
f_bz2_nh.writelines([line1, line2])
# Base Exception so it catches Keyboard Interrupt
except BaseException as e:
logging.error(e)
self.tearDown()
def tearDown(self):
try:
shutil.rmtree(self.tmp_dir)
except OSError as e:
# ENOENT - no such file or directory
if e.errno != errno.ENOENT:
raise e
# Helper method to create a dictionary of file names and
# file types (file extension and header)
def _set_fn(self, fn, ext, header):
key = self._get_key(ext, header)
self.fn[key] = fn
# Helper method to fetch a file of a
# certain format (file extension and header)
def _get_fn(self, ext, header):
key = self._get_key(ext, header)
return self.fn[key]
def _get_key(self, ext, header):
key = ext + "_" + ('h' if header else 'nh')
return key
def _cp_file_contents(self, fn_src, fn_dest):
with open(fn_src, 'rb') as f_src, open(fn_dest, 'wb') as f_dest:
shutil.copyfileobj(f_src, f_dest)
def _check_file_equality(self, fn_1, fn_2, ext):
# gz files contain mtime and filename in the header that
# causes filecmp to return False even if contents are identical
# Hence decompress to test for equality
if(ext == '.gz'):
with gzip.GzipFile(fn_1, 'rb') as f_1,\
NamedTemporaryFile(mode='wb') as f_txt_1,\
gzip.GzipFile(fn_2, 'rb') as f_2,\
NamedTemporaryFile(mode='wb') as f_txt_2:
shutil.copyfileobj(f_1, f_txt_1)
shutil.copyfileobj(f_2, f_txt_2)
f_txt_1.flush()
f_txt_2.flush()
return filecmp.cmp(f_txt_1.name, f_txt_2.name, shallow=False)
else:
return filecmp.cmp(fn_1, fn_2, shallow=False)
def test_bad_parameters(self):
self.kwargs['check_headers'] = True
self.kwargs['headers'] = False
self.assertRaisesRegexp(AirflowException,
"To check_headers.*",
S3ToHiveTransfer,
**self.kwargs)
def test__get_top_row_as_list(self):
self.kwargs['delimiter'] = '\t'
fn_txt = self._get_fn('.txt', True)
header_list = S3ToHiveTransfer(**self.kwargs).\
_get_top_row_as_list(fn_txt)
self.assertEqual(header_list, ['Sno', 'Some,Text'],
msg="Top row from file doesnt matched expected value")
self.kwargs['delimiter'] = ','
header_list = S3ToHiveTransfer(**self.kwargs).\
_get_top_row_as_list(fn_txt)
self.assertEqual(header_list, ['Sno\tSome', 'Text'],
msg="Top row from file doesnt matched expected value")
def test__match_headers(self):
self.kwargs['field_dict'] = OrderedDict([('Sno', 'BIGINT'),
('Some,Text', 'STRING')])
self.assertTrue(S3ToHiveTransfer(**self.kwargs).
_match_headers(['Sno', 'Some,Text']),
msg="Header row doesnt match expected value")
# Testing with different column order
self.assertFalse(S3ToHiveTransfer(**self.kwargs).
_match_headers(['Some,Text', 'Sno']),
msg="Header row doesnt match expected value")
# Testing with extra column in header
self.assertFalse(S3ToHiveTransfer(**self.kwargs).
_match_headers(['Sno', 'Some,Text', 'ExtraColumn']),
msg="Header row doesnt match expected value")
def test__delete_top_row_and_compress(self):
s32hive = S3ToHiveTransfer(**self.kwargs)
# Testing gz file type
fn_txt = self._get_fn('.txt', True)
gz_txt_nh = s32hive._delete_top_row_and_compress(fn_txt,
'.gz',
self.tmp_dir)
fn_gz = self._get_fn('.gz', False)
self.assertTrue(self._check_file_equality(gz_txt_nh, fn_gz, '.gz'),
msg="gz Compressed file not as expected")
# Testing bz2 file type
bz2_txt_nh = s32hive._delete_top_row_and_compress(fn_txt,
'.bz2',
self.tmp_dir)
fn_bz2 = self._get_fn('.bz2', False)
self.assertTrue(self._check_file_equality(bz2_txt_nh, fn_bz2, '.bz2'),
msg="bz2 Compressed file not as expected")
@unittest.skipIf(mock is None, 'mock package not present')
@mock.patch('airflow.operators.s3_to_hive_operator.HiveCliHook')
@mock.patch('airflow.operators.s3_to_hive_operator.S3Hook')
def test_execute(self, mock_s3hook, mock_hiveclihook):
# Testing txt, zip, bz2 files with and without header row
for test in product(['.txt', '.gz', '.bz2'], [True, False]):
ext = test[0]
has_header = test[1]
self.kwargs['headers'] = has_header
self.kwargs['check_headers'] = has_header
logging.info("Testing {0} format {1} header".
format(ext,
('with' if has_header else 'without'))
)
self.kwargs['input_compressed'] = (False if ext == '.txt' else True)
self.kwargs['s3_key'] = self.s3_key + ext
ip_fn = self._get_fn(ext, self.kwargs['headers'])
op_fn = self._get_fn(ext, False)
# Mock s3 object returned by S3Hook
mock_s3_object = mock.Mock(key=self.kwargs['s3_key'])
mock_s3_object.get_contents_to_file.side_effect = \
lambda dest_file: \
self._cp_file_contents(ip_fn, dest_file.name)
mock_s3hook().get_key.return_value = mock_s3_object
# file paramter to HiveCliHook.load_file is compared
# against expected file oputput
mock_hiveclihook().load_file.side_effect = \
lambda *args, **kwargs: \
self.assertTrue(
self._check_file_equality(args[0],
op_fn,
ext
),
msg='{0} output file not as expected'.format(ext))
# Execute S3ToHiveTransfer
s32hive = S3ToHiveTransfer(**self.kwargs)
s32hive.execute(None)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
#
# pKaTool - scripts for analysing chemical shift perturbations
# Copyright (C) 2010 Predrag Kukic & Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: [email protected]
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
qe=1.602E-19
e0=8.85E-12
au_field = 5.14225E+11
kT_to_MV = 2.5692 #from kT/eA to MV/cm
MV_to_au = 5142.25 #from MV/cm to ppm/au
MV_to_au_efg = 971744.7 # from MV/cm2 to ppm/au efg
nsp = {'N':977.0, 'H':90}
conversionHv2v3 = {
## v2 keys
## v3 values
'1HH1':'HH11', ## ARG
'1HH2':'HH12', ## ARG
'2HH1':'HH21', ## ARG
'2HH2':'HH22', ## ARG
'1HE2':'HE21', ## GLN
'2HE2':'HE22', ## GLN
'1HG1':'HG11', ## VAL
'2HG1':'HG12', ## VAL
'3HG1':'HG13', ## VAL
'1HG2':'HG21', ## VAL
'2HG2':'HG22', ## VAL
'3HG2':'HG23', ## VAL
'1HD1':'HD11', ## LEU
'2HD1':'HD12', ## LEU
'3HD1':'HD13', ## LEU
'1HD2':'HD21', ## LEU,ASN
'2HD2':'HD22', ## LEU,ASN
'3HD2':'HD23', ## LEU
}
conversionvHv3v2 = {
## v3 keys
## v2 values
'HH11':'1HH1', ## ARG
'HH12':'1HH2', ## ARG
'HH21':'2HH1', ## ARG
'HH22':'2HH2', ## ARG
'HE21':'1HE2', ## GLN
'HE22':'2HE2', ## GLN
'HG11':'1HG1', ## VAL
'HG12':'2HG1', ## VAL
'HG13':'3HG1', ## VAL
'HG21':'1HG2', ## VAL
'HG22':'2HG2', ## VAL
'HG23':'3HG2', ## VAL
'HD11':'1HD1', ## LEU
'HD12':'2HD1', ## LEU
'HD13':'3HD1', ## LEU
'HD21':'1HD2', ## LEU,ASN
'HD22':'2HD2', ## LEU,ASN
'HD23':'3HD2', ## LEU
}
chargesSol = {
'OW':-.834, ## TIP3
'O':-.834, ## TIP3
'HW1':.417, ## TIP3
'H1':.417, ## TIP3
'HW2':.417, ## TIP3
'H2':.417, ## TIP3
'CL':-1,
'Cl':-1,
'Cl-':-1,
}
chloride = [' Cl', ' CL', 'Cl-', 'CL-']
solvent = ['SOL', 'WAT']
modelPKA = {'ASP':4.0, 'GLU':4.4,
'HIS':6.3, 'CYS':8.7,'TYR':9.6,
'LYS':10.4, 'ARG':13.0
}
backboneAtoms = [
'N','H','H1','H2','H3','HXT', ## H3 if N-terminal (H,HXT in PDB)
'CA','HA','HA2','HA3', ## HA2 if Gly (HA3 in PDB)
'C','O','OC1','OC2','OXT', ## OC1,OC2 if C-terminal (O,OXT in PDB)
]
terminalAtoms = [
'H1','H2','H3','H','HXT', ## N-terminal
'C','O','OC1','OC2','OXT', ##C-terminal
'C',"""O''""","""O'""", ##C-terminal
]
distanceAtoms = {'ASP':'CG', 'GLU':'CD',
'HIS':'CE1', 'CYS':'SG','TYR':'CZ',
'LYS':'NZ', 'ARG':'CZ'
}
import string
def getTitGroup(line):
'''
Return titGroupID A:resNum:resName
'''
chainID = line[21]
resName = line[17:20].strip()
resNum = line[22:26].strip()
resNum = string.zfill(resNum, 4)
return '%s:%s:%s'%(chainID, resNum, resName)
def getCoordinates(line):
'''
Return [x, y, z] coordinates of the atom
'''
x = line[30:38].strip()
y = line[38:46].strip()
z = line[46:54].strip()
coordinates = '[%s, %s, %s]'%(x, y, z)
return coordinates
def getChainID(titGroup):
'''
Return the chainID of the titratable group
'''
return string.split(titGroup, ':')[0]
def getResNum(titGroup):
'''
Return residue number of the titGrouop
'''
return int(string.split(titGroup, ':')[1])
def getResName(titGroup):
'''
Return residue name of the titGroup
'''
return string.split(titGroup, ':')[2]
def parseResName(line):
'''
Parse residue name of the line
'''
return line[17:20].strip()
def parseAtomName(line):
'''
Parse atom name of the line
'''
return line[12:16].strip()
def parseResNum(line):
'''
Parse residue number from the line
'''
return int(line[22:26].strip())
def parseChainID(line):
'''
Parse residue number from the line
'''
return line[21]
def parsePQRCharge(line):
'''
Parse charges from the line
'''
return (float(line[55:62].strip()))
def getDistance(coord1, coord2):
'''
Calculate distance between two dots in space
'''
import numpy
import math
xDiff = coord2[0]-coord1[0]
yDiff = coord2[1]-coord1[1]
zDiff = coord2[2]-coord1[2]
Diff = [xDiff, yDiff, zDiff]
dist = math.sqrt(xDiff*xDiff+yDiff*yDiff+zDiff*zDiff)
return Diff, dist
def getDistanceAngle(coord1, coord2, titCoord):
'''
Calculates bonds and angles between them
'''
import numpy
bondVector, distBond = getDistance(coord1, coord2)
chargeVector, distCharge = getDistance(coord1, titCoord)
dp=numpy.dot(bondVector,chargeVector)
cosAngle=dp/(distBond*distCharge)
return distCharge, cosAngle
def fieldCoulomb(distCharge, cosAngle, charge):
'''
Calculate electric field using Coulombs law
'''
import math
E = 1.0/(4*math.pi*e0)*qe/(distCharge*1E-10)**2 *charge*cosAngle / au_field
return E
|
|
# pylint: disable-msg=W0104,R0914
# public symbols
__all__ = ["ExprEvaluator"]
import __builtin__
import ast
import math
import weakref
from openmdao.main.index import INDEX, ATTR, CALL, SLICE, EXTSLICE
from openmdao.main.printexpr import _get_attr_node, _get_long_name, \
transform_expression, ExprPrinter
def _import_functs(mod, dct, names=None):
if names is None:
names = dir(mod)
for name in names:
if not name.startswith('_'):
dct[name] = getattr(mod, name)
# this dict will act as the local scope when we eval our expressions
_expr_dict = {
'math': math,
}
# add stuff from math lib directly to our locals dict so users won't have to
# put 'math.' in front of all of their calls to standard math functions
# make numpy functions available if possible
import numpy
names = ['array', 'cosh', 'ldexp', 'hypot', 'tan', 'isnan', 'log', 'fabs',
'floor', 'sqrt', 'frexp', 'degrees', 'pi', 'log10', 'modf',
'copysign', 'cos', 'ceil', 'isinf', 'sinh', 'trunc',
'expm1', 'e', 'tanh', 'radians', 'sin', 'fmod', 'exp', 'log1p']
_import_functs(numpy, _expr_dict, names=names)
_expr_dict['pow'] = numpy.power # pow in math is not complex stepable, but this one is!
math_names = ['asin', 'asinh', 'atanh', 'atan', 'atan2', 'factorial',
'fsum', 'lgamma', 'erf', 'erfc', 'acosh', 'acos', 'gamma']
_import_functs(math, _expr_dict, names=math_names)
_expr_dict['numpy'] = numpy
# if scipy is available, add some functions
try:
import scipy.special
except ImportError:
pass
else:
_import_functs(scipy.special, _expr_dict, names=['gamma', 'polygamma'])
from numpy import ndarray, ndindex, zeros, complex, imag, issubdtype
_Missing = object()
def is_in_process(scope, vname):
"""Return True if the object referenced by vname is accessible
within scope via getattr from this process.
"""
vname = vname.split('[', 1)[0]
obj = scope
for name in vname.split('.'):
obj = getattr(obj, name, _Missing)
if obj is _Missing:
return False
return True
def in_expr_locals(scope, name):
"""Return True if the given (dotted) name refers to something in our
_expr_dict dict, e.g., math.sin. Raises a KeyError if the name
refers to something in _expr_dict that doesn't exist, e.g., math.foobar.
Returns False if the name refers to nothing in _expr_dict,
e.g., mycomp.x.
"""
if hasattr(scope, name):
return False
if hasattr(__builtin__, name) or name == '_local_setter_':
return True
parts = name.split('.')
obj = _expr_dict.get(parts[0], _Missing)
if obj is _Missing:
return False
for part in parts[1:]:
obj = getattr(obj, part, _Missing)
if obj is _Missing:
raise KeyError("Can't find '%s' in current scope" % name)
return True
class ExprVarScanner(ast.NodeVisitor):
"""This node visitor collects all attribute names (including dotted ones)
that occur in the given AST.
"""
def __init__(self):
self.varnames = set()
def visit_Name(self, node):
self.varnames.add(node.id)
def visit_Attribute(self, node):
long_name = _get_long_name(node)
if long_name:
self.varnames.add(long_name)
def get_names(self, scope):
"""Returns a tuple of the form (local_vars, external_vars)."""
local_vars = []
extern_vars = []
for v in self.varnames:
if in_expr_locals(scope, v):
continue
if is_in_process(scope, v):
local_vars.append(v)
else:
extern_vars.append(v)
return (local_vars, extern_vars)
class ExprTransformer(ast.NodeTransformer):
"""Transforms dotted name references, e.g., abc.d.g in an expression AST
into scope.get('abc.d.g') and turns assignments into the appropriate
set() calls. Also, translates function calls and indirect attribute
accesses into a form that can be passed to a downstream object and
executed there. For example, abc.d[xyz](1, pdq-10).value would translate
to, e.g., scope.get('abc.d', [(0,xyz), (0,[1,pdq-10]), (1,'value')]).
"""
def __init__(self, expreval, rhs=None, getter='get'):
self.expreval = expreval
self.rhs = rhs
self._stack = [] # use this to see if we're inside of parens or
# brackets so that we always translate to 'get'
# even if we're on the lhs
self.getter = getter
super(ExprTransformer, self).__init__()
def visit(self, node, subs=None):
"""Visit a node."""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
if visitor == self.generic_visit:
return visitor(node)
else:
return visitor(node, subs)
def _name_to_node(self, node, name, subs=None):
"""Given a dotted name, return the proper node depending on whether
the name is resolvable in 'local' scope or not.
"""
if name is None:
return super(ExprTransformer, self).generic_visit(node)
if in_expr_locals(self.expreval.scope, name):
return node
names = ['scope']
self.expreval.var_names.add(name)
args = [ast.Str(s=name)]
if self.rhs and len(self._stack) == 0:
fname = 'set'
args.append(self.rhs)
else:
fname = self.getter
keywords = []
names.append(fname)
called_obj = _get_attr_node(names)
if subs:
args.append(ast.List(elts=subs, ctx=ast.Load()))
return ast.copy_location(ast.Call(func=called_obj, args=args,
ctx=node.ctx, keywords=keywords), node)
def visit_Name(self, node, subs=None):
return self._name_to_node(node, node.id, subs)
def visit_Attribute(self, node, subs=None):
long_name = _get_long_name(node)
if long_name is None:
# this Attribute contains more than just names/attrs
if subs is None:
subs = []
subs[0:0] = [ast.Tuple(elts=[ast.Num(n=ATTR), ast.Str(s=node.attr)],
ctx=ast.Load())]
newnode = self.visit(node.value, subs)
if newnode is node.value:
return node
return newnode
return self._name_to_node(node, long_name, subs)
def _get_slice_vals(self, node):
lower = ast.Name(id='None', ctx=ast.Load()) if node.lower is None else self.visit(node.lower)
upper = ast.Name(id='None', ctx=ast.Load()) if node.upper is None else self.visit(node.upper)
step = ast.Name(id='None', ctx=ast.Load()) if node.step is None else self.visit(node.step)
return ast.Tuple(elts=[lower, upper, step], ctx=ast.Load())
def visit_Subscript(self, node, subs=None):
self._stack.append(node)
if subs is None:
subs = []
if isinstance(node.slice, ast.Index):
subs[0:0] = [ast.Tuple(elts=[ast.Num(n=INDEX), self.visit(node.slice.value)],
ctx=ast.Load())]
elif isinstance(node.slice, ast.Slice):
subs[0:0] = [ast.Tuple(elts=[ast.Num(n=SLICE), self._get_slice_vals(node.slice)],
ctx=ast.Load())]
elif isinstance(node.slice, ast.ExtSlice):
elts = [ast.Num(n=EXTSLICE)]
for val in node.slice.dims:
if isinstance(val, ast.Slice):
elts.append(self._get_slice_vals(val))
else:
elts.append(self.visit(val.value))
subs[0:0] = [ast.Tuple(elts=elts, ctx=ast.Load())]
else:
raise ValueError("unknown Subscript child node: %s"
% node.slice.__class__.__name__)
self._stack.pop()
newnode = self.visit(node.value, subs)
if newnode is node.value:
return node
elif isinstance(newnode, ast.Attribute):
node.value = newnode
node.slice = self.generic_visit(node.slice)
return node
return newnode
def visit_Call(self, node, subs=None):
name = _get_long_name(node.func)
if name is not None:
if in_expr_locals(self.expreval.scope, name) or '.' not in name:
return self.generic_visit(node)
if subs is None:
subs = []
self._stack.append(node)
call_list = []
if hasattr(node, 'kwargs') and node.kwargs:
if isinstance(node.kwargs, ast.Name):
raise SyntaxError("Can't translate '**%s'" % node.kwargs.id)
else:
raise SyntaxError("Can't translate '**' arguments")
if hasattr(node, 'starargs') and node.starargs:
if isinstance(node.starargs, ast.Name):
raise SyntaxError("Can't translate '**%s'" % node.starargs.id)
else:
raise SyntaxError("Can't translate '*' arguments")
if hasattr(node, 'keywords'):
elts = [ast.Tuple(elts=[ast.Str(kw.arg), self.visit(kw.value)],
ctx=ast.Load()) for kw in node.keywords]
if len(call_list) > 0 or len(elts) > 0:
call_list.append(ast.List(elts=elts, ctx=ast.Load()))
if len(node.args) > 0 or len(call_list) > 0:
call_list.append(ast.List(elts=[self.visit(arg) for arg in node.args],
ctx=ast.Load()))
self._stack.pop()
# call_list is reversed here because we built it backwards in order
# to make it a little easier to leave off unnecessary empty stuff
subs[0:0] = [ast.Tuple(elts=[ast.Num(n=CALL)] + call_list[::-1],
ctx=ast.Load())]
return self.visit(node.func, subs)
def visit_Module(self, node, subs=None):
# Make sure there is only one statement or expression
if len(node.body) > 1 or \
(node.body and not isinstance(node.body[0], (ast.Assign, ast.Expr))):
raise RuntimeError("Only one assignment statement or expression"
" is allowed")
top = super(ExprTransformer, self).generic_visit(node)
if top.body and isinstance(top.body[0], ast.Call):
top.body[0] = ast.Expr(value=top.body[0])
return top
def visit_Assign(self, node, subs=None):
if len(node.targets) > 1:
raise RuntimeError("only one expression is allowed on left hand"
" side of assignment")
rhs = self.visit(node.value)
lhs = ExprTransformer(self.expreval, rhs=rhs).visit(node.targets[0])
if isinstance(lhs, (ast.Name, ast.Subscript, ast.Attribute)):
lhs.ctx = ast.Store()
return ast.Assign(targets=[lhs], value=rhs)
return lhs
class ExprExaminer(ast.NodeVisitor):
""""Examines various properties of an expression for later analysis."""
def __init__(self, node, evaluator=None):
super(ExprExaminer, self).__init__()
self._in_idx = False
self.ref_ok = True
self.const = True
self.simplevar = True # if true, it's just a simple variable name
# (possibly with dots)
self.refs = set() # variables and/or subscripted variables referenced
# in this expression
self.const_indices = True
self.assignable = True
self._evaluator = evaluator
self.visit(node)
def _maybe_add_ref(self, name):
"""Will add a ref if it's not a name from the locals dict."""
if name != 'None' and self._in_idx:
self.const_indices = False
if not self.ref_ok:
return
if self._evaluator and in_expr_locals(self._evaluator.scope, name):
return
self.refs.add(name)
def visit_Index(self, node):
self.simplevar = self.const = False
self.visit(node.value)
def visit_Assign(self, node):
self.assignable = False
self.const = False
self.simplevar = False
self.generic_visit(node)
def visit_Slice(self, node):
self.simplevar = self.const = False
self.generic_visit(node)
def visit_ExtSlice(self, node):
self.simplevar = self.const = False
for d in node.dims:
self.visit(d)
def visit_Name(self, node):
self.const = False
self._maybe_add_ref(node.id)
self.generic_visit(node)
def visit_Attribute(self, node):
self.const = False
long_name = _get_long_name(node)
if long_name:
self._maybe_add_ref(long_name)
else:
self.simplevar = False
self.generic_visit(node)
def visit_Subscript(self, node):
self.const = False
p = ExprPrinter()
p.visit(node)
self._maybe_add_ref(p.get_text())
ok = self.ref_ok
self.ref_ok = False
self.visit(node.value)
old = self._in_idx
self._in_idx = True
self.visit(node.slice)
self._in_idx = old
self.ref_ok = ok
def visit_Num(self, node):
self.simplevar = False
if self.const:
self.assignable = False
self.generic_visit(node)
def _no_assign(self, node):
self.assignable = self.simplevar = False
self.generic_visit(node)
visit_Load = ast.NodeVisitor.generic_visit
visit_Store = ast.NodeVisitor.generic_visit
visit_Expr = ast.NodeVisitor.generic_visit
visit_Expression = ast.NodeVisitor.generic_visit
visit_Call = _no_assign
visit_USub = _no_assign
visit_UAdd = _no_assign
visit_And = _no_assign
visit_Or = _no_assign
# operators
visit_Add = _no_assign
visit_Sub = _no_assign
visit_Mult = _no_assign
visit_Div = _no_assign
visit_Mod = _no_assign
visit_Pow = _no_assign
visit_LShift = _no_assign
visit_Rshift = _no_assign
visit_BitOr = _no_assign
visit_BitXor = _no_assign
visit_BitAnd = _no_assign
visit_FloorDiv = _no_assign
# cmp operators
visit_Eq = _no_assign
visit_NotEq = _no_assign
visit_Lt = _no_assign
visit_LtE = _no_assign
visit_Gt = _no_assign
visit_GtE = _no_assign
visit_Is = _no_assign
visit_IsNot = _no_assign
visit_In = _no_assign
visit_NotIn = _no_assign
class ExprEvaluator(object):
"""A class that translates an expression string into a new string
containing any necessary framework access functions, e.g., set, get. The
compiled bytecode is stored within the object so that it doesn't have to
be reparsed during later evaluations. A scoping object is required at
construction time or evaluation time, and that object determines the form
of the translated expression. Array entry access, 'downstream' attribute
access, and function invocation are also translated in a similar way.
For a description of the format of the 'index' arg of set/get that is
generated by ExprEvaluator, see the doc string for the
``openmdao.main.index.process_index_entry`` function.
"""
def __init__(self, text, scope=None, getter='get'):
self._scope = None
self.scope = scope
self.text = text
self.getter = getter
self.var_names = set()
self.cached_grad_eq = None
@property
def text(self):
"""The expression string."""
return self._text
@text.setter
def text(self, value):
self._code = self._assignment_code = None
self._examiner = self.cached_grad_eq = None
self._text = value
@property
def scope(self):
"""The scoping object used to evaluate the expression."""
if self._scope:
scope = self._scope()
if scope is None:
raise RuntimeError('ExprEvaluator scoping object no longer'
' exists.')
return scope
return None
@scope.setter
def scope(self, value):
scp = None if self._scope is None else self._scope()
if scp is None or value is not scp:
self._code = self._assignment_code = None
self._examiner = self.cached_grad_eq = None
if value is not None:
self._scope = weakref.ref(value)
else:
self._scope = None
@classmethod
def _invalid_expression_error(cls, unresolved_vars, expr=None, msg=None):
"""
Creates and returns an invalid expression error that can be raised.
Also adds the unresolved variables as an attribute to the error.
This is so the message can be more specifically tailored by catching
the error, creating your own message, and passing the necessary
arguments to generate a new error.
An example of this can be seen in Constraint.__init__.
unresolved_vars: list of unresolved variables
expr: Expression string
msg: Message with {0} and {1} placeholders to be formatted.
{0} will be replaced by expr and {1} will be replaced
by the unresolved variables
"""
if not msg:
msg = "Expression '{0}' has invalid variables {1}"
if not expr:
expr = cls.text
# do some formatting for the error message
# wrap the variables in single quotes
formatted_vars = ["'{0}'".format(var) for var in unresolved_vars]
# if there is more than one variable,
# seperate the variables with commas
if len(formatted_vars) == 1:
formatted_vars = ''.join(formatted_vars)
else:
formatted_vars = ', '.join(formatted_vars)
# throw the error
return ValueError(msg.format(expr, formatted_vars))
def is_valid_assignee(self):
"""Returns True if the syntax of our expression is valid to
be on the left-hand side of an assignment. No check is
performed to see if the variable(s) in the expression actually
exist.
"""
if self._code is None:
self._pre_parse()
return self._allow_set
def refers_to(self, name):
"""Returns True if this expression refers to the given variable or
component.
"""
if name == self.text:
return True
elif name in self.text:
if name in self.get_referenced_varpaths(copy=False):
return True
if name in self.get_referenced_compnames():
return True
return False
def __getstate__(self):
"""Return dict representing this container's state."""
state = self.__dict__.copy()
# remove weakref to scope because it won't pickle
state['_scope'] = self.scope
state['_code'] = None # <type 'code'> won't pickle either.
state['cached_grad_eq'] = None
if state.get('_assignment_code'):
state['_assignment_code'] = None # more unpicklable <type 'code'>
return state
def __setstate__(self, state):
"""Restore this component's state."""
self.__dict__.update(state)
if self._scope is not None:
self._scope = weakref.ref(self._scope)
def _pre_parse(self):
try:
root = ast.parse(self.text, mode='eval')
except SyntaxError:
# might be an assignment, try mode='exec'
root = ast.parse(self.text, mode='exec')
self._allow_set = False
return root
if not isinstance(root.body,
(ast.Attribute, ast.Name, ast.Subscript)):
self._allow_set = False
else:
self._allow_set = True
return root
def _parse_get(self):
astree = self._pre_parse()
new_ast = ExprTransformer(self, getter=self.getter).visit(astree)
# compile the transformed AST
ast.fix_missing_locations(new_ast)
mode = 'exec' if isinstance(new_ast, ast.Module) else 'eval'
return (new_ast, compile(new_ast, '<string>', mode))
def _parse_set(self):
self._pre_parse()
if not self._allow_set:
raise ValueError("expression '%s' can't be set to a value"
% self.text)
root = ast.parse("%s=_local_setter_" % self.text, mode='exec')
## transform into a 'set' call to set the specified variable
assign_ast = ExprTransformer(self, getter=self.getter).visit(root)
ast.fix_missing_locations(assign_ast)
code = compile(assign_ast, '<string>', 'exec')
return (assign_ast, code)
def _parse(self):
self.var_names = set()
try:
new_ast, self._code = self._parse_get()
except SyntaxError as err:
raise SyntaxError("failed to parse expression '%s': %s"
% (self.text, str(err)))
return new_ast
def _get_updated_scope(self, scope):
if scope is not None:
self.scope = scope
return scope
return self.scope
def evaluate(self, scope=None):
"""Return the value of the scoped string, evaluated
using the eval() function.
"""
if scope is None:
scope = self.scope
else:
self.scope = scope
try:
if self._code is None:
self._parse()
return eval(self._code, _expr_dict, locals())
except Exception, err:
raise type(err)("can't evaluate expression "
"'%s': %s" % (self.text, str(err)))
def refs(self, copy=True):
"""Returns a list of all variables referenced,
including any array indices."""
if self._code is None:
self._parse()
if self._examiner is None:
self._examiner = ExprExaminer(ast.parse(self.text,
mode='eval'), self)
if copy:
return self._examiner.refs.copy()
else:
return self._examiner.refs
def _finite_difference(self, grad_code, var_dict, target_var, stepsize, index=None):
""" Perform central difference
"""
if index:
var_dict[target_var][index] += 0.5 * stepsize
else:
var_dict[target_var] += 0.5 * stepsize
yp = eval(grad_code, _expr_dict, locals())
if (isinstance(yp, ndarray)):
yp = yp.flatten()
if index:
var_dict[target_var][index] -= stepsize
else:
var_dict[target_var] -= stepsize
ym = eval(grad_code, _expr_dict, locals())
if isinstance(ym, ndarray):
ym = ym.flatten()
grad = (yp - ym) / stepsize
return grad
def _complex_step(self, grad_code, var_dict, target_var, stepsize, index=None):
""" Perform complex step
"""
if index:
var_dict[target_var][index] += stepsize * 1j
else:
var_dict[target_var] += stepsize * 1j
yp = eval(grad_code, _expr_dict, locals())
if (isinstance(yp, ndarray)):
yp = yp.flatten()
if not issubdtype(yp.dtype, complex):
return None
return imag(yp / stepsize)
elif not isinstance(yp, complex):
return None
else:
# note, imag returns a 0-d array, Don't know why.
return imag(yp / stepsize).reshape(1, )[0]
return imag(yp / stepsize)
def evaluate_gradient(self, stepsize=1.0e-6, wrt=None, scope=None):
"""Return a dict containing the gradient of the expression with respect
to each of the referenced varpaths. The gradient is calculated by 1st
order central difference for now.
stepsize: float
Step size for finite difference.
wrt: list of varpaths
Varpaths for which we want to calculate the gradient.
"""
scope = self._get_updated_scope(scope)
inputs = list(self.refs(copy=False))
if wrt is None:
wrt = inputs
elif isinstance(wrt, str):
wrt = [wrt]
var_dict = {}
new_names = {}
for name in inputs:
if '[' in name:
new_expr = ExprEvaluator(name, scope)
replace_val = new_expr.evaluate()
else:
replace_val = scope.get(name)
if isinstance(replace_val, ndarray):
replace_val = replace_val.astype(numpy.complex)
else:
replace_val = float(replace_val)
var_dict[name] = replace_val
new_name = "var_dict['%s']" % name
new_names[name] = new_name
# First time through, cache our gradient code.
if self.cached_grad_eq is None:
grad_text = transform_expression(self.text, new_names)
grad_root = ast.parse(grad_text, mode='eval')
self.cached_grad_eq = compile(grad_root, '<string>', 'eval')
grad_code = self.cached_grad_eq
gradient = {}
for var in wrt:
# A "fake" boundary connection in an assembly has a special
# format. All expression derivatives from inside the assembly are
# handled outside the assembly.
if var[0:4] == '@bin':
gradient[var] = 1.0
continue
# Don't take derivative with respect to a variable that is not in
# the expression
if var not in inputs:
gradient[var] = 0.0
continue
val = var_dict[var]
if isinstance(val, ndarray):
yp = eval(grad_code, _expr_dict, locals())
if isinstance(yp, ndarray):
gradient[var] = zeros((yp.size, val.size))
else:
gradient[var] = zeros((1, val.size))
for i, index in enumerate(ndindex(*val.shape)):
try:
base = var_dict[var][index]
grad = self._complex_step(grad_code, var_dict,
var, stepsize, index)
except:
grad = None
if grad is None:
var_dict[var][index] = base
grad = self._finite_difference(grad_code, var_dict, var,
stepsize, index)
gradient[var][:, i] = grad
var_dict[var][index] = base
else:
try:
base = var_dict[var]
grad = self._complex_step(grad_code, var_dict, var, stepsize)
except:
grad = None
if grad is None:
var_dict[var] = base
grad = self._finite_difference(grad_code, var_dict, var,
stepsize)
gradient[var] = grad
var_dict[var] = base
if isinstance(gradient[var], ndarray):
gradient[var] = gradient[var].reshape((gradient[var].size, 1))
return gradient
def set(self, val, scope=None, force=False):
"""Set the value of the referenced object to the specified value."""
scope = self._get_updated_scope(scope)
# self.assignment_code is a compiled version of an assignment
# statement of the form 'somevar = _local_setter_', so we set
# _local_setter_ here and the exec call will pull it out of the
# locals dict.
_local_setter_ = val
_local_force_ = force
if self._assignment_code is None:
_, self._assignment_code = self._parse_set()
exec (self._assignment_code, _expr_dict, locals())
def get_metadata(self, metaname=None, scope=None):
"""Return the specified piece of metadata if metaname is provided.
Otherwise return the whole metadata dictionary. If metaname is supplied
but does not exist for a given variable, None will be returned for the
variable.
Returns a list of tuples containing (varname, metadata)
corresponding to each variable referenced by this expression.
"""
scope = self._get_updated_scope(scope)
invalid_variables = []
metadata = []
for name in self.get_referenced_varpaths(copy=False):
try:
metadata.append((name, scope.get_metadata(name, metaname)))
except AttributeError:
invalid_variables.append(name)
if invalid_variables:
msg = "Couldn't find metadata for traits {traits}"
traits = ', '.join("'{0}'".format(var) for var in invalid_variables)
msg = msg.format(traits=traits)
raise AttributeError(msg)
return metadata
def get_referenced_varpaths(self, copy=True):
"""Return a set of pathnames relative to *scope.parent* and based on
the names of Variables referenced in our expression string.
"""
if self._code is None:
self._parse()
if copy:
return self.var_names.copy()
else:
return self.var_names
def get_referenced_compnames(self):
"""Return a set of Component names based on the pathnames of
Variables referenced in our expression string. No checking is
performed to verify that a given name refers to an actual Component.
"""
if self._code is None:
self._parse()
nameset = set()
for name in self.var_names:
parts = name.split('.', 1)
if len(parts) > 1:
nameset.add(parts[0])
return nameset
def check_resolve(self):
"""Return True if all variables referenced by our expression can
be resolved.
"""
return len(self.get_unresolved()) == 0
def get_unresolved(self):
"""Return a list of all variables that cannot be resolved."""
if self._code is None:
self._parse()
if len(self.var_names) > 0:
scope = self.scope
if scope:
return [n for n in self.var_names if not scope.contains(n)]
return self.var_names.copy()
return []
def scope_transform(self, scope, new_scope, parent=None):
"""Return a transformed version of our text string where the attribute
names are changed based on a change in scope to the given object.
"""
if self._code is None:
self._parse()
oldname = scope.name + '.' if scope.name else ''
newname = new_scope.name + '.' if new_scope.name else ''
if scope is new_scope.parent or scope is parent:
oldname = 'parent.'
elif new_scope is scope.parent or new_scope is parent:
newname = 'parent.'
mapping = {}
for var in self.get_referenced_varpaths(copy=False):
if var.startswith(newname):
mapping[var] = var[len(newname):]
else:
mapping[var] = oldname + var
try:
return transform_expression(self.text, mapping)
except SyntaxError as err:
raise SyntaxError("failed to transform expression '%s': %s"
% (self.text, str(err)))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.text == other.text
return False
def __repr__(self):
return '<ExprEval(text=%s)>' % self._text
def __str__(self):
return self._text
class ConnectedExprEvaluator(ExprEvaluator):
"""An ExprEvaluator that restricts the allowable syntax to only those
expressions that can be connected within a model. For example, array
indexing is allowed, but all indices must be constants if the expression
is on the destination side of a connection.
"""
def __init__(self, *args, **kwargs):
self._is_dest = kwargs.get('is_dest', False)
if 'is_dest' in kwargs:
del kwargs['is_dest']
super(ConnectedExprEvaluator, self).__init__(*args, **kwargs)
def _parse(self):
super(ConnectedExprEvaluator, self)._parse()
self._examiner = ExprExaminer(ast.parse(self.text, mode='eval'), self)
if self._is_dest:
if not self._examiner.const_indices:
raise RuntimeError("bad destination expression '%s': only"
" constant indices are allowed for arrays"
" and slices" % self.text)
if len(self._examiner.refs) != 1:
raise RuntimeError("bad connected expression '%s' must"
" reference exactly one variable" %
self.text)
if not self._examiner.assignable:
raise RuntimeError("bad destination expression '%s': not"
" assignable" % self.text)
def refers_to(self, name):
"""Returns True if this expression refers to the given variable or
component.
"""
if super(ConnectedExprEvaluator, self).refers_to(name):
return True
return name in self.refs(copy=False)
if __name__ == '__main__':
import sys
from openmdao.main.container import build_container_hierarchy
txt = ''.join(sys.argv[1:])
root = ast.parse(txt, mode='exec')
print 'original:\n %s' % txt
print '\noriginal AST dump:'
print ast.dump(root, annotate_fields=True)
print '\nprinted AST:'
ep = ExprPrinter()
ep.visit(root)
print ep.get_text()
top = build_container_hierarchy({
'a': {
'b': {'c': 1, 'd': 2}
},
'x': {
'y': {'z': 3.14}
}
})
expreval = ExprEvaluator(txt, scope=top)
root = ExprTransformer(expreval).visit(root)
print '\ntransformed AST dump:'
print ast.dump(root, annotate_fields=True)
print '\nprinted transformed AST:'
ep = ExprPrinter()
ep.visit(root)
print ep.get_text()
print '\nvars referenced: %s' % expreval.get_referenced_varpaths(copy=False)
print '\nattempting to compile the transformed AST...'
ast.fix_missing_locations(root)
code = compile(root, '<string>', 'exec')
|
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ArgumentsRequired
class coinspot(Exchange):
def describe(self):
return self.deep_extend(super(coinspot, self).describe(), {
'id': 'coinspot',
'name': 'CoinSpot',
'countries': ['AU'], # Australia
'rateLimit': 1000,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelOrder': True,
'createMarketOrder': None,
'createOrder': True,
'createReduceOnlyOrder': False,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchIsolatedPositions': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkOHLCV': False,
'fetchOrderBook': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/28208429-3cacdf9a-6896-11e7-854e-4c79a772a30f.jpg',
'api': {
'public': 'https://www.coinspot.com.au/pubapi',
'private': 'https://www.coinspot.com.au/api',
},
'www': 'https://www.coinspot.com.au',
'doc': 'https://www.coinspot.com.au/api',
'referral': 'https://www.coinspot.com.au/register?code=PJURCU',
},
'api': {
'public': {
'get': [
'latest',
],
},
'private': {
'post': [
'orders',
'orders/history',
'my/coin/deposit',
'my/coin/send',
'quote/buy',
'quote/sell',
'my/balances',
'my/orders',
'my/buy',
'my/sell',
'my/buy/cancel',
'my/sell/cancel',
'ro/my/balances',
'ro/my/balances/{cointype}',
'ro/my/deposits',
'ro/my/withdrawals',
'ro/my/transactions',
'ro/my/transactions/{cointype}',
'ro/my/transactions/open',
'ro/my/transactions/{cointype}/open',
'ro/my/sendreceive',
'ro/my/affiliatepayments',
'ro/my/referralpayments',
],
},
},
'markets': {
'ADA/AUD': {'id': 'ada', 'symbol': 'ADA/AUD', 'base': 'ADA', 'quote': 'AUD', 'baseId': 'ada', 'quoteId': 'aud', 'type': 'spot', 'spot': True},
'BTC/AUD': {'id': 'btc', 'symbol': 'BTC/AUD', 'base': 'BTC', 'quote': 'AUD', 'baseId': 'btc', 'quoteId': 'aud', 'type': 'spot', 'spot': True},
'BTC/USDT': {'id': 'btc', 'symbol': 'BTC/USDT', 'base': 'BTC', 'quote': 'USDT', 'baseId': 'btc', 'quoteId': 'usdt', 'type': 'spot', 'spot': True},
'ETH/AUD': {'id': 'eth', 'symbol': 'ETH/AUD', 'base': 'ETH', 'quote': 'AUD', 'baseId': 'eth', 'quoteId': 'aud', 'type': 'spot', 'spot': True},
'XRP/AUD': {'id': 'xrp', 'symbol': 'XRP/AUD', 'base': 'XRP', 'quote': 'AUD', 'baseId': 'xrp', 'quoteId': 'aud', 'type': 'spot', 'spot': True},
'LTC/AUD': {'id': 'ltc', 'symbol': 'LTC/AUD', 'base': 'LTC', 'quote': 'AUD', 'baseId': 'ltc', 'quoteId': 'aud', 'type': 'spot', 'spot': True},
'DOGE/AUD': {'id': 'doge', 'symbol': 'DOGE/AUD', 'base': 'DOGE', 'quote': 'AUD', 'baseId': 'doge', 'quoteId': 'aud', 'type': 'spot', 'spot': True},
'RFOX/AUD': {'id': 'rfox', 'symbol': 'RFOX/AUD', 'base': 'RFOX', 'quote': 'AUD', 'baseId': 'rfox', 'quoteId': 'aud', 'type': 'spot', 'spot': True},
'POWR/AUD': {'id': 'powr', 'symbol': 'POWR/AUD', 'base': 'POWR', 'quote': 'AUD', 'baseId': 'powr', 'quoteId': 'aud', 'type': 'spot', 'spot': True},
'NEO/AUD': {'id': 'neo', 'symbol': 'NEO/AUD', 'base': 'NEO', 'quote': 'AUD', 'baseId': 'neo', 'quoteId': 'aud', 'type': 'spot', 'spot': True},
'TRX/AUD': {'id': 'trx', 'symbol': 'TRX/AUD', 'base': 'TRX', 'quote': 'AUD', 'baseId': 'trx', 'quoteId': 'aud', 'type': 'spot', 'spot': True},
'EOS/AUD': {'id': 'eos', 'symbol': 'EOS/AUD', 'base': 'EOS', 'quote': 'AUD', 'baseId': 'eos', 'quoteId': 'aud', 'type': 'spot', 'spot': True},
'XLM/AUD': {'id': 'xlm', 'symbol': 'XLM/AUD', 'base': 'XLM', 'quote': 'AUD', 'baseId': 'xlm', 'quoteId': 'aud', 'type': 'spot', 'spot': True},
'RHOC/AUD': {'id': 'rhoc', 'symbol': 'RHOC/AUD', 'base': 'RHOC', 'quote': 'AUD', 'baseId': 'rhoc', 'quoteId': 'aud', 'type': 'spot', 'spot': True},
'GAS/AUD': {'id': 'gas', 'symbol': 'GAS/AUD', 'base': 'GAS', 'quote': 'AUD', 'baseId': 'gas', 'quoteId': 'aud', 'type': 'spot', 'spot': True},
},
'commonCurrencies': {
'DRK': 'DASH',
},
'options': {
'fetchBalance': 'private_post_my_balances',
},
})
def parse_balance(self, response):
result = {'info': response}
balances = self.safe_value_2(response, 'balance', 'balances')
if isinstance(balances, list):
for i in range(0, len(balances)):
currencies = balances[i]
currencyIds = list(currencies.keys())
for j in range(0, len(currencyIds)):
currencyId = currencyIds[j]
balance = currencies[currencyId]
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
result[code] = account
else:
currencyIds = list(balances.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balances, currencyId)
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
await self.load_markets()
method = self.safe_string(self.options, 'fetchBalance', 'private_post_my_balances')
response = await getattr(self, method)(params)
#
# read-write api keys
#
# ...
#
# read-only api keys
#
# {
# "status":"ok",
# "balances":[
# {
# "LTC":{"balance":0.1,"audbalance":16.59,"rate":165.95}
# }
# ]
# }
#
return self.parse_balance(response)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'cointype': market['id'],
}
orderbook = await self.privatePostOrders(self.extend(request, params))
return self.parse_order_book(orderbook, symbol, None, 'buyorders', 'sellorders', 'rate', 'amount')
def parse_ticker(self, ticker, market=None):
#
# {
# "btc":{
# "bid":"51970",
# "ask":"53000",
# "last":"52806.47"
# }
# }
#
symbol = self.safe_symbol(None, market)
timestamp = self.milliseconds()
last = self.safe_string(ticker, 'last')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'ask'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': None,
'quoteVolume': None,
'info': ticker,
}, market, False)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetLatest(params)
id = market['id']
id = id.lower()
prices = self.safe_value(response, 'prices')
#
# {
# "status":"ok",
# "prices":{
# "btc":{
# "bid":"52732.47000022",
# "ask":"53268.0699976",
# "last":"53284.03"
# }
# }
# }
#
ticker = self.safe_value(prices, id)
return self.parse_ticker(ticker, market)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'cointype': market['id'],
}
response = await self.privatePostOrdersHistory(self.extend(request, params))
#
# {
# "status":"ok",
# "orders":[
# {"amount":0.00102091,"rate":21549.09999991,"total":21.99969168,"coin":"BTC","solddate":1604890646143,"market":"BTC/AUD"},
# ],
# }
#
trades = self.safe_value(response, 'orders', [])
return self.parse_trades(trades, market, since, limit)
def parse_trade(self, trade, market=None):
#
# public fetchTrades
#
# {
# "amount":0.00102091,
# "rate":21549.09999991,
# "total":21.99969168,
# "coin":"BTC",
# "solddate":1604890646143,
# "market":"BTC/AUD"
# }
#
priceString = self.safe_string(trade, 'rate')
amountString = self.safe_string(trade, 'amount')
costString = self.safe_number(trade, 'total')
timestamp = self.safe_integer(trade, 'solddate')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '/')
return self.safe_trade({
'info': trade,
'id': None,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': None,
'type': None,
'side': None,
'takerOrMaker': None,
'price': priceString,
'amount': amountString,
'cost': costString,
'fee': None,
}, market)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
method = 'privatePostMy' + self.capitalize(side)
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
request = {
'cointype': self.market_id(symbol),
'amount': amount,
'rate': price,
}
return await getattr(self, method)(self.extend(request, params))
async def cancel_order(self, id, symbol=None, params={}):
side = self.safe_string(params, 'side')
if side != 'buy' and side != 'sell':
raise ArgumentsRequired(self.id + ' cancelOrder() requires a side parameter, "buy" or "sell"')
params = self.omit(params, 'side')
method = 'privatePostMy' + self.capitalize(side) + 'Cancel'
request = {
'id': id,
}
return await getattr(self, method)(self.extend(request, params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + path
if api == 'private':
self.check_required_credentials()
nonce = self.nonce()
body = self.json(self.extend({'nonce': nonce}, params))
headers = {
'Content-Type': 'application/json',
'key': self.apiKey,
'sign': self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
|
|
"""Logic for reading list behavior across all properties."""
from django.conf import settings
from django.core.cache import cache
from elasticsearch import TransportError
from elasticsearch_dsl import filter as es_filter
from bulbs.content.filters import NegateQueryFilter, SponsoredBoost
from bulbs.content.models import Content
from bulbs.content.search import randomize_es
from bulbs.sections.models import Section
from bulbs.special_coverage.models import SpecialCoverage
from .popular import get_popular_ids, popular_content
from .slicers import FirstSlotSlicer
class ReadingListMixin(object):
"""Mixin for Content-based objects to manage reading lists."""
def _get_reading_list_identifier(self):
# 1. Match content to sponsored Special Coverages
results = self.percolate_special_coverage(sponsored_only=True)
if results:
return results[0]
# 2."Popular" i.e., the content is one of the 25 most popular items.
popular_ids = get_popular_ids()
if popular_ids and self.id in popular_ids:
return "popular"
# 3. Any unsponsored special coverage reading list that contains this item.
results = self.percolate_special_coverage()
if results:
return results[0]
# 4. Any section that contains this item.
try:
results = Content.search_objects.client.percolate(
index=self.mapping.index,
doc_type=self.mapping.doc_type,
id=self.id,
body={"filter": es_filter.Prefix(_id="section").to_dict()}
)
except TransportError:
results = {"total": 0}
if results["total"] > 0:
for result in results["matches"]:
if not result["_id"].endswith("None"):
return result["_id"]
return "recent"
def validate_query(self, query):
"""Confirm query exists given common filters."""
if query is None:
return query
query = self.update_reading_list(query)
return query
def get_validated_augment_query(self, augment_query=None):
"""
Common rules for reading list augmentation hierarchy.
1. Sponsored Content.
2. Video Content.
"""
augment_query = self.validate_query(augment_query)
# Given an invalid query, reach for a Sponsored query.
if not augment_query:
augment_query = self.validate_query(Content.search_objects.sponsored())
# Given an invalid Sponsored query, reach for a Video query.
if not augment_query:
reading_list_config = getattr(settings, "READING_LIST_CONFIG", {})
excluded_channel_ids = reading_list_config.get("excluded_channel_ids", [])
augment_query = self.validate_query(Content.search_objects.evergreen_video(
excluded_channel_ids=excluded_channel_ids
))
return augment_query
def augment_reading_list(self, primary_query, augment_query=None, reverse_negate=False):
"""Apply injected logic for slicing reading lists with additional content."""
primary_query = self.validate_query(primary_query)
augment_query = self.get_validated_augment_query(augment_query=augment_query)
try:
# We use this for cases like recent where queries are vague.
if reverse_negate:
primary_query = primary_query.filter(NegateQueryFilter(augment_query))
else:
augment_query = augment_query.filter(NegateQueryFilter(primary_query))
augment_query = randomize_es(augment_query)
return FirstSlotSlicer(primary_query, augment_query)
except TransportError:
return primary_query
def get_special_coverage_identifiers(self):
cache_key = "special-coverage-identifiers-{}".format(self.id)
identifiers = cache.get(cache_key)
if identifiers is None:
identifiers = self.percolate_special_coverage()
cache.set(cache_key, identifiers, 60 * 5)
return identifiers
def get_reading_list_identifier(self):
cache_key = "reading-list-identifier-{}".format(self.id)
identifier = cache.get(cache_key)
if identifier is None:
identifier = self._get_reading_list_identifier()
cache.set(cache_key, identifier, 60 * 5)
return identifier
def update_reading_list(self, reading_list):
"""Generic behaviors for reading lists before being rendered."""
# remove the current piece of content from the query.
reading_list = reading_list.filter(
~es_filter.Ids(values=[self.id])
)
# remove excluded document types from the query.
reading_list_config = getattr(settings, "READING_LIST_CONFIG", {})
excluded_doc_types = reading_list_config.get("excluded_doc_types", [])
for obj in excluded_doc_types:
reading_list = reading_list.filter(~es_filter.Type(value=obj))
return reading_list
def get_reading_list_context(self, **kwargs):
"""Returns the context dictionary for a given reading list."""
reading_list = None
context = {
"name": "",
"content": reading_list,
"targeting": {},
"videos": []
}
if self.reading_list_identifier == "popular":
reading_list = popular_content()
context.update({"name": self.reading_list_identifier})
# Popular is augmented.
reading_list = self.augment_reading_list(reading_list)
context.update({"content": reading_list})
return context
if self.reading_list_identifier.startswith("specialcoverage"):
special_coverage = SpecialCoverage.objects.get_by_identifier(
self.reading_list_identifier
)
reading_list = special_coverage.get_content().query(
SponsoredBoost(field_name="tunic_campaign_id")
).sort("_score", "-published")
context["targeting"]["dfp_specialcoverage"] = special_coverage.slug
if special_coverage.tunic_campaign_id:
context["tunic_campaign_id"] = special_coverage.tunic_campaign_id
context["targeting"].update({
"dfp_campaign_id": special_coverage.tunic_campaign_id
})
# We do not augment sponsored special coverage lists.
reading_list = self.update_reading_list(reading_list)
else:
reading_list = self.augment_reading_list(reading_list)
context.update({
"name": special_coverage.name,
"videos": special_coverage.videos,
"content": reading_list
})
return context
if self.reading_list_identifier.startswith("section"):
section = Section.objects.get_by_identifier(self.reading_list_identifier)
reading_list = section.get_content()
reading_list = self.augment_reading_list(reading_list)
context.update({
"name": section.name,
"content": reading_list
})
return context
reading_list = Content.search_objects.search()
reading_list = self.augment_reading_list(reading_list, reverse_negate=True)
context.update({
"name": "Recent News",
"content": reading_list
})
return context
def get_reading_list(self, published=True):
"""
This is currently a misnomer, as it actually returns a dictionary object.
The returned object contains the reading list.
"""
return self.get_reading_list_context(published=True)
@property
def reading_list_identifier(self):
_reading_list_identifier = getattr(self, "_reading_list_identifier", None)
if not _reading_list_identifier:
setattr(self, "_reading_list_identifier", self.get_reading_list_identifier())
return self._reading_list_identifier
|
|
##
#######################################################################################################################
#
# Copyright (c) 2017-2021 Advanced Micro Devices, Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#######################################################################################################################
import os
import glob
import re
import sys
import hashlib
# enum like values for setting up return data structures
## Config Data
Enums = 0
Nodes = 1
Settings = 2
Hwl = 3
NodeParentIndices = 4
## Enum Data
EnumName = 0
EnumData = 1
EnumScope = 2
EnumValueName = 0
EnumValue = 1
## Settings Data
SettingName = 0
SettingNode = 1
SettingType = 2
SettingVarName = 3
SettingDesc = 4
SettingVarType = 5
SettingDefault = 6
SettingWinDefault = 7
SettingLnxDefault = 8
SettingScope = 9
SettingFriendlyName = 10
SettingStringLength = 11
SettingRegistryType = 12
SettingWhitelist = 13
SettingBlacklist = 14
SettingHash = 15
SettingDevDriver = 16
SettingMinVersion = 17
SettingMaxVersion = 18
# Config parse strings
StartEnum = "DefineEnum"
StartPrivEnum = "DefinePrivEnum"
StartNode = "Node"
StartLeaf = "Leaf"
StartHwl = "HWL"
StartDefaultScope = "DefaultScope"
# Settings parse strings
SettingParseStrings = {
SettingName : "SettingName",
SettingNode : "SettingNode",
SettingType : "SettingType",
SettingVarName : "VariableName",
SettingDesc : "Description",
SettingVarType : "VariableType",
SettingDefault : "VariableDefault",
SettingWinDefault : "VariableDefaultWin",
SettingLnxDefault : "VariableDefaultLnx",
SettingScope : "SettingScope",
SettingFriendlyName : "FriendlyName",
SettingStringLength : "StringLength",
SettingRegistryType : "RegistryType",
SettingWhitelist : "ISV_Whitelist",
SettingBlacklist : "ISV_Blacklist",
SettingDevDriver : "DevDriver",
SettingMinVersion : "MinVersion",
SettingMaxVersion : "MaxVersion"}
# Registry type lookup table, finds the RegistryType based on SettingType
RegistryTypeTable = {
"BOOL_STR": "Util::ValueType::Boolean",
"UINT_STR": "Util::ValueType::Uint",
"INT_STR": "Util::ValueType::Int",
"FLOAT_STR": "Util::ValueType::Float",
"HEX_STR": "Util::ValueType::Uint",
"HEX64_STR": "Util::ValueType::Uint64",
"STRING": "Util::ValueType::Str",
"STRING_DIR": "Util::ValueType::Str"
}
SettingScopeDefault = ""
# Helper class primarily used to keep track of line numbers during line by line file reading
class FileReader:
def __init__(self, filename):
self._file = open(filename, 'r')
self._lineNumber = 0
self._filename = filename
def readLine(self):
line = self._file.readline()
self._lineNumber += 1
return line
def getLineNumber(self):
return self._lineNumber
def getFilename(self):
return self._filename
def close(self):
self._file.close()
def isCfgComment(line):
if line.strip() == "":
return False
else:
char = line.lstrip()[0]
return char == "#" or char == "/" or char == "*"
def errorExit(msg):
print("ERROR: " + msg)
sys.exit(1)
def fnv1a(str):
fnv_prime = 0x01000193;
hval = 0x811c9dc5;
uint32Max = 2 ** 32;
for c in str:
hval = hval ^ ord(c);
hval = (hval * fnv_prime) % uint32Max;
return hval
# Encrypt the key string using sha1 secure hash.
def sha1(str):
m = hashlib.sha1()
#add random SALT to the text key
m.update("gqBGG$0$4EX@nPsBuF=|a6uRlBWo@ITpWN8WRGFKWdi7wlw@&AMJxAFWeRc2ls!li*0o#M4z%sj#|V_j".encode())
m.update(str.encode())
return m.hexdigest()
# extracts the the text in between the first set of single quotes
# Ex: "This is 'the best' example 'ever!'" would return "the best"
def extractSingleQuotedText(line):
return line.partition("'")[-1].partition("'")[0]
# same as extractSingleQuotedText except extracts text from double quotes
def extractDoubleQuotedText(line):
return line.partition('"')[-1].partition('"')[0]
# extracts double quoted text that may be split over multiple lines. The end quote is expected to be followed
# by a semicolon so quotes in the middle of the string are ignored
def extractMultiLineQuotedText(line, fileReader):
if line.find('";') >= 0:
# the entire string is on one line, just pass to extractDoubleQuotedText
return extractDoubleQuotedText(line)
else:
# readLine leaves a newline character in the string, but we specify those explicitly so strip the last one off
retText = line.partition('"')[-1].rpartition("\n")[0]
while True:
# subsequent lines may have a bunch of leading space to make the config file look pretty, but we don't
# want that space in the actual description.
line = fileReader.readLine().lstrip()
if line == "":
errorExit("End of file found when extracting multiline quoted text: %s:%d" %
(fileReader.getFilename(), fileReader.getLineNumber()))
if line.find('";') >= 0:
# this is the final line, just take everything up to the final quote
if line.find('"') == -1:
errorExit("Missing final quote in multiline quoted text: %s:%d" %
(fileReader.getFilename(), fileReader.getLineNumber()))
retText = retText + line.rpartition('"')[0]
break
else:
retText = retText + line.rpartition("\n")[0]
return retText
def parseEnum(line, fileReader):
# tokens are enclosed in single quotes, the first line contains the enum name & first enum name/value pair
enumName = extractSingleQuotedText(line)
enumValueNames = []
enumValues = []
enumScope = "public"
if line.find(StartPrivEnum) >= 0:
enumScope = "private"
# strip off everything up to and including the : and then start extracting names and values
line = line.partition(":")[-1]
while True:
enumValueNames.append(extractSingleQuotedText(line))
enumValues.append(extractSingleQuotedText(line.partition(",")[-1]))
if line == "":
errorExit("Found EOF while parsing enum %s: %s:%d" %
(enumName, fileReader.getFilename(), fileReader.getLineNumber()))
elif line.find(";") >= 0:
# we've reached the end of the enum definition, break out of this loop
break
else:
#otherwise read the next line and continue
line = fileReader.readLine()
return {EnumName: enumName, EnumData: {EnumValueName: enumValueNames, EnumValue: enumValues}, EnumScope: enumScope}
def parseLeaf(line, fileReader, hashAlgorithm):
settingData = { SettingName : "",
SettingType : "",
SettingVarName : "",
SettingDesc : "",
SettingVarType : "",
SettingDefault : "",
SettingWinDefault : "",
SettingLnxDefault : "",
SettingScope : "",
SettingFriendlyName : "",
SettingStringLength : "",
SettingRegistryType : "",
SettingWhitelist : "",
SettingBlacklist : "",
SettingHash : "",
SettingDevDriver : "",
SettingMinVersion : "",
SettingMaxVersion : "",}
while True:
if line == "":
errorExit("End of file found when processing a Leaf: %s:%d" %
(fileReader.getFilename(), fileReader.getLineNumber()))
elif line.find("}") >= 0:
# end of the leaf found, let's get out of here!
break
for key, value in SettingParseStrings.items():
if value in line:
settingData[key] = extractMultiLineQuotedText(line, fileReader)
# more stuff to parse, on to the next line
line = fileReader.readLine()
if settingData[SettingType] != "":
settingData[SettingRegistryType] = RegistryTypeTable[settingData[SettingType]]
if settingData[SettingScope] == "":
settingData[SettingScope] = SettingScopeDefault
if settingData[SettingName] != "" and settingData[SettingScope] != "PublicCatalystKey":
if hashAlgorithm == 1:
# SHA1 encryption
settingData[SettingHash] = sha1(settingData[SettingName])
elif hashAlgorithm == 2:
# plain text
settingData[SettingHash] = settingData[SettingName]
else:
# default scrambling, for hashAlgorithm == 0 and other cases not handled above.
settingData[SettingHash] = "#" + str(fnv1a(settingData[SettingName]))
return settingData
def parseConfigFile(configFileName):
return parseConfigFile2(configFileName, 0)
def parseConfigFile2(configFileName, hashAlgorithm):
fileReader = FileReader(configFileName)
enumDefs = []
settingDefs = []
nodeList = []
nodeParentIndexList = []
hwl = ""
inLeaf = False
parentIndexStack = [-1] # Always keep -1 on the stack since it represents "No Parent"
while True:
line = fileReader.readLine()
if line == "":
# we've reached the end of the file, break out of the while loop
break
elif isCfgComment(line):
# nothing to do for comments, just continue to the next line
continue
# Enum definition
if line.find(StartEnum) >= 0 or line.find(StartPrivEnum) >= 0:
enumDefs.append(parseEnum(line, fileReader))
# Start Node
if line.find(StartNode) >= 0:
# Use the index from the top of the parent index stack as the parent index for the current node
nodeParentIndexList.append(parentIndexStack[-1])
# Add the current node to the node list
nodeList.append(extractDoubleQuotedText(line))
# Add the index of the current node to the top of the parent index stack
parentIndexStack.append(len(nodeList) - 1)
if line.find(StartLeaf) >= 0:
line = fileReader.readLine()
if line.find("{") == -1:
errorExit("Malformed Leaf, missing start bracket: %s:%d" %
(fileReader.getFilename(), fileReader.getLineNumber()))
settingDefs.append(parseLeaf(line, fileReader, hashAlgorithm))
settingDefs[-1][SettingNode] = nodeList[-1]
if line.find(StartHwl) >= 0:
hwl = extractDoubleQuotedText(line)
# This field is only used internal to the configParser. It is optionally defined at the top of the
# config file and specifies the scope to use when a setting defintion is missing the SettingScope field
global SettingScopeDefault
if line.find(StartDefaultScope) >= 0:
SettingScopeDefault = extractDoubleQuotedText(line)
if line.strip() == "}":
# The parent index stack should have at least 2 indices in it if we're inside a node
if len(parentIndexStack) > 1:
# Pop the top of the parent index stack off since we've found the node's closing bracket
parentIndexStack.pop()
if inLeaf:
errorExit("inLeaf when we encountered an end of node close bracket: %s:%d" %
(fileReader.getFilename(), fileReader.getLineNumber()))
inLeaf = False
else:
errorExit("Unexpected end bracket: %s:%d" %
(fileReader.getFilename(), fileReader.getLineNumber()))
fileReader.close()
return {Enums: enumDefs, Settings: settingDefs, Nodes: nodeList, Hwl: hwl, NodeParentIndices: nodeParentIndexList}
|
|
##################################################################################################
# Copyright (c) 2012 Brett Dixon
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##################################################################################################
"""
Piece API
::
GET /image/id Returns a rendered page displaying the requested image
GET /video/id Returns a rendered page displaying the requested video
POST /image/id Add tags to an image object
POST /video/id Add tags to an video object
DELETE /image/id Flags the image as deleted in the database
DELETE /video/id Flags the video as deleted in the database
"""
import os
import json
import time
from django.core.exceptions import ObjectDoesNotExist
from django.http import JsonResponse
from django.http.request import RawPostDataException
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.contrib.auth.decorators import login_required
from path import Path
import psd_tools
from frog.models import (
Image,
Video,
Tag,
Piece,
cropBox,
pilImage,
Group,
Gallery,
SiteConfig,
FROG_SITE_URL,
FROG_THUMB_SIZE,
squareCropDimensions)
from frog.models import ViewRecord
from frog.common import Result, getObjectsFromGuids, getRoot, getUser
from frog.uploader import handle_uploaded_file
@require_http_methods(["POST", "PUT", "DELETE"])
def image(request, obj_id):
"""Handles a request based on method and calls the appropriate function"""
obj = Image.objects.get(pk=obj_id)
if request.method == "POST":
return post(request, obj)
elif request.method == "PUT":
return put(request, obj)
elif request.method == "DELETE":
return delete(request, obj)
@require_http_methods(["POST", "PUT", "DELETE"])
def video(request, obj_id):
"""Handles a request based on method and calls the appropriate function"""
obj = Video.objects.get(pk=obj_id)
if request.method == "POST":
return post(request, obj)
elif request.method == "PUT":
return put(request, obj)
elif request.method == "DELETE":
return delete(request, obj)
@login_required
@csrf_exempt
def data(request, guid):
obj = Piece.fromGuid(guid)
if request.method == "GET":
res = Result()
res.append(obj.json())
return JsonResponse(res.asDict())
elif request.method == "POST":
return post(request, obj)
elif request.method == "PUT":
return put(request, obj)
elif request.method == "DELETE":
return delete(request, obj)
@login_required
def getGuids(request):
res = Result()
guids = request.GET.get("guids", "").split(",")
for _ in getObjectsFromGuids(guids):
res.append(_.json())
return JsonResponse(res.asDict())
@login_required
@csrf_exempt
def like(request, guid):
obj = Piece.fromGuid(guid)
res = Result()
if obj.like(request):
emailLike(request, obj)
else:
res.message = 'Cannot "like" things more than once'
res.append(obj.json())
return JsonResponse(res.asDict())
@login_required
def post(request, obj):
try:
data = request.POST or json.loads(request.body)["body"]
except RawPostDataException:
data = request.POST
tags = data.get("tags", "").split(",")
resetthumbnail = data.get("reset-thumbnail", False)
crop = data.get("crop")
res = Result()
for tag in tags:
try:
t = Tag.objects.get(pk=int(tag))
except ValueError:
t, created = Tag.objects.get_or_create(name=tag)
if created:
res.append(t.json())
obj.tags.add(t)
if obj.custom_thumbnail and (crop or request.FILES or resetthumbnail):
try:
os.unlink(getRoot() / obj.custom_thumbnail.name)
except OSError:
pass
if crop:
box = [int(_) for _ in crop]
# -- Handle thumbnail upload
source = Path(obj.cropSource.name)
relativedest = obj.getPath(True) / "{:.0f}{}".format(time.time(), source.ext)
dest = getRoot() / relativedest
source = getRoot() / source
if not dest.parent.exists():
dest.parent.makedirs()
source.copy(dest)
obj.custom_thumbnail = relativedest
image = pilImage.open(dest).crop(box)
image.load()
# Resize
image.thumbnail(
(FROG_THUMB_SIZE, FROG_THUMB_SIZE), pilImage.ANTIALIAS
)
image.save(dest)
obj.save()
if request.FILES:
# -- Handle thumbnail upload
f = request.FILES.get("file")
relativedest = obj.getPath(True) / f.name
dest = getRoot() / relativedest
handle_uploaded_file(dest, f)
obj.custom_thumbnail = relativedest
try:
if dest.ext == ".psd":
image = psd_tools.PSDLoad(dest).as_PIL()
else:
image = pilImage.open(dest)
except IOError:
res.isError = True
res.message = "{} is not a supported thumbnail image type".format(
f.name
)
return JsonResponse(res.asDict())
# Resize
width, height = squareCropDimensions(*image.size)
image.thumbnail((width, height), pilImage.ANTIALIAS)
# Crop from center
box = cropBox(*image.size)
image.crop(box).save(dest)
obj.save()
if resetthumbnail:
obj.custom_thumbnail = None
obj.save()
res.value = obj.json()
return JsonResponse(res.asDict())
@login_required
def put(request, obj):
for key, value in json.loads(request.body)["body"].items():
if hasattr(obj, key):
setattr(obj, key, value)
obj.save()
res = Result()
res.append(obj.json())
return JsonResponse(res.asDict())
@login_required
def delete(request, obj):
obj.deleted = True
obj.save()
res = Result()
res.append(obj.json())
return JsonResponse(res.asDict())
def group(request, obj_id=None):
res = Result()
if request.method == "GET":
try:
group = Group.objects.get(pk=obj_id)
res.append(group.json())
except ObjectDoesNotExist as err:
res.isError = True
res.message = str(err)
elif request.method == "POST":
data = json.loads(request.body)["body"]
user = getUser(request)
if user:
items = getObjectsFromGuids(data["guids"])
gallery = data.get("gallery")
g = Group()
g.author = user
g.title = data.get("title", items[0].title)
g.thumbnail = items[0].thumbnail
g.description = data.get("description", items[0].description)
g.save()
g.guid = g.getGuid().guid
g.save()
if gallery:
Gallery.objects.get(pk=gallery).groups.add(g)
for item in items:
g.appendChild(item)
res.append(g.json())
else:
res.isError = True
res.message = "No user found to create group"
elif request.method == "PUT":
data = json.loads(request.body)["body"]
g = Group.objects.get(pk=obj_id)
action = data["action"]
index = data.get("index")
item = Piece.fromGuid(data["guid"])
if action == "append":
g.appendChild(item)
elif action == "insert":
g.insertChild(index, item)
else:
g.removeChild(item)
res.append(g.json())
else:
g = Group.objects.get(pk=obj_id)
g.delete()
return JsonResponse(res.asDict())
@require_http_methods(["POST"])
@login_required
def recordView(request):
res = Result()
data = json.loads(request.body)["body"]
item = getObjectsFromGuids([data["guid"]])
if item:
item = item[0]
created = ViewRecord.objects.get_or_create(
user=request.user, guid=data["guid"]
)[1]
if created:
item.view_count += 1
item.save()
res.append(item.view_count)
else:
res.isError = True
res.message = "No object foudn for guid {}".format(data["guid"])
return JsonResponse(res.asDict())
def emailLike(request, obj):
if not obj.author.frog_prefs.get_or_create()[0].json()["emailLikes"]:
return
if obj.author == request.user:
return
config = SiteConfig.getSiteConfig()
html = render_to_string(
"frog/comment_email.html",
{
"user": request.user,
"object": obj,
"comment": "",
"action_type": "liked",
"image": isinstance(obj, Image),
"SITE_URL": FROG_SITE_URL,
},
)
subject = "{}: {} liked {}".format(
config.name, request.user.username, obj.title
)
fromemail = request.user.email
to = obj.author.email
text_content = "This is an important message."
html_content = html
send_mail(subject, text_content, fromemail, [to], html_message=html_content)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 Abigail See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file contains code to run beam search decoding, including running ROUGE evaluation and producing JSON datafiles for the in-browser attention visualizer, which can be found here https://github.com/abisee/attn_vis"""
import os
import time
import tensorflow as tf
import beam_search
import data
import json
import rouge
import rouge_score
import util
import logging
import codecs
import six
import shutil
import numpy as np
from collections import OrderedDict
try:
from pyltp import SentenceSplitter
except ImportError:
SentenceSplitter = None
FLAGS = tf.app.flags.FLAGS
SECS_UNTIL_NEW_CKPT = 60 # max number of seconds before loading new checkpoint
def copy_model_post_fn(text, source_text=None):
"""unique sentence end !!!! => !
:param line:
:return:
"""
tokens = text.strip().split(" ")
source_tokens = source_text.split(" ")
source_token_cnt = {}
for token in source_tokens:
if token not in source_token_cnt:
source_token_cnt[token] = 0
source_token_cnt[token] += 1
commons = ["!", "?"]
if len(tokens) == 0:
return ""
else:
last_token = tokens[-1]
new_last_token = []
char_set = set()
for char in last_token:
if char not in new_last_token:
new_last_token.append(char)
new_last_token = "".join(new_last_token)
tokens[-1] = new_last_token
new_tokens = []
for i, token in enumerate(tokens):
if i > 0 and tokens[i] == tokens[i-1]:
if tokens[i] in commons:
continue
if tokens[i] not in source_token_cnt or source_token_cnt[tokens[i]] < 2:
continue
new_tokens.append(token)
return " ".join(new_tokens)
class BeamSearchDecoder(object):
"""Beam search decoder."""
def __init__(self, model, batcher, vocab):
"""Initialize decoder.
Args:
model: a Seq2SeqAttentionModel object.
batcher: a Batcher object.
vocab: Vocabulary object
"""
self._model = model
self._model.build_graph()
self._batcher = batcher
self._vocab = vocab
self._saver = tf.train.Saver() # we use this to load checkpoints for decoding
self._sess = tf.Session(config=util.get_config())
# Load an initial checkpoint to use for decoding
ckpt_path = util.load_ckpt(self._saver, self._sess)
if batcher is None:
return
if FLAGS.single_pass and FLAGS.Serving == False:
# Make a descriptive decode directory name
ckpt_name = "ckpt-" + ckpt_path.split('-')[-1] # this is something of the form "ckpt-123456"
if FLAGS.infer_dir is None:
self._decode_dir = os.path.join(FLAGS.log_root, get_decode_dir_name(ckpt_name))
else:
self._decode_dir = os.path.join(FLAGS.infer_dir, get_decode_dir_name(ckpt_name))
if os.path.exists(self._decode_dir):
if FLAGS.clear_decode_dir is True:
shutil.rmtree(self._decode_dir)
else:
raise Exception("single_pass decode directory %s should not already exist" % self._decode_dir)
else: # Generic decode dir name
self._decode_dir = os.path.join(FLAGS.log_root, "decode")
# Make the decode dir if necessary
if not FLAGS.Serving:
if not os.path.exists(self._decode_dir): os.makedirs(self._decode_dir)
if FLAGS.single_pass and FLAGS.Serving == False:
# Make the dirs to contain output written in the correct format for pyrouge
self._rouge_ref_path = os.path.join(self._decode_dir, "ref.txt")
self._rouge_dec_path = os.path.join(self._decode_dir, "infer.txt")
self._article_with_gen_abs_path = os.path.join(self._decode_dir, "article_ref_infer.txt")
self._ref_f = codecs.open(self._rouge_ref_path, "w", "utf-8")
self._dec_f = codecs.open(self._rouge_dec_path, "w", "utf-8")
self._gen_f = codecs.open(self._article_with_gen_abs_path, "w","utf-8")
self._rouge_result_path = os.path.join(self._decode_dir, "rouge_result.log")
def decode(self, input_batch = None, beam_nums_to_return=1, *args,**kwargs):
"""Decode examples until data is exhausted (if FLAGS.single_pass) and return, or decode indefinitely, loading latest checkpoint at regular intervals"""
t0 = time.time()
counter = 0
while True:
if input_batch is not None:
batch = input_batch
else:
batch = self._batcher.next_batch() # 1 example repeated across batch
if batch is None: # finished decoding dataset in single_pass mode
assert FLAGS.single_pass, "Dataset exhausted, but we are not in single_pass mode"
tf.logging.info("Decoder has finished reading dataset for single_pass.")
tf.logging.info("Output has been saved in %s,%s and %s. Now starting ROUGE eval...", self._rouge_ref_path, self._rouge_dec_path, self._article_with_gen_abs_path)
results_dict = rouge_eval_write(self._rouge_ref_path, self._rouge_dec_path, self._rouge_result_path)
return
if FLAGS.single_pass:
if FLAGS.max_infer_batch is not None and counter >= FLAGS.max_infer_batch:
tf.logging.info("up to max_infer_batch={}, begin to eval rogue".format(FLAGS.max_infer_batch))
results_dict = rouge_eval_write(self._rouge_ref_path, self._rouge_dec_path, self._rouge_result_path)
return
original_article = batch.original_articles[0] # string
original_abstract = batch.original_abstracts[0] # string
original_abstract_sents = batch.original_abstracts_sents[0] # list of strings
article_withunks = data.show_art_oovs(original_article, self._vocab) # string
abstract_withunks = data.show_abs_oovs(original_abstract, self._vocab, (batch.art_oovs[0] if FLAGS.pointer_gen else None)) # string
# Run beam search to get {beam_nums_to_return} best Hypothesis
best_hyp_list = beam_search.run_beam_search(self._sess, self._model, self._vocab, batch, beam_nums_to_return=beam_nums_to_return)
decoded_output_list = []
for bx, best_hyp in enumerate(best_hyp_list):
# Extract the output ids from the hypothesis and convert back to words
output_ids = [int(t) for t in best_hyp.tokens[1:]]
decoded_words = data.outputids2words(output_ids, self._vocab, (batch.art_oovs[0] if FLAGS.pointer_gen else None))
# Remove the [STOP] token from decoded_words, if necessary
try:
fst_stop_idx = decoded_words.index(data.STOP_DECODING) # index of the (first) [STOP] symbol
decoded_words = decoded_words[:fst_stop_idx]
except ValueError:
decoded_words = decoded_words
decoded_output = ' '.join(decoded_words) # single string
decoded_output = copy_model_post_fn(decoded_output, source_text=original_article)
decoded_output_list.append(decoded_output)
if input_batch is not None: # Finish decoding given single example
print_results(article_withunks, abstract_withunks, decoded_output_list) # log output to screen
return decoded_output_list
for decoded_output, best_hyp in zip(decoded_output_list, best_hyp_list):
if FLAGS.single_pass:
self.write_for_rouge(original_abstract_sents, decoded_words, counter, article=original_article) # write ref summary and decoded summary to file, to eval with pyrouge later
counter += 1 # this is how many examples we've decoded
else:
print_results(article_withunks, abstract_withunks, decoded_output) # log output to screen
self.write_for_attnvis(article_withunks, abstract_withunks, decoded_words, best_hyp.attn_dists, best_hyp.p_gens) # write info to .json file for visualization tool
# Check if SECS_UNTIL_NEW_CKPT has elapsed; if so return so we can load a new checkpoint
t1 = time.time()
if t1-t0 > SECS_UNTIL_NEW_CKPT:
tf.logging.info('We\'ve been decoding with same checkpoint for %i seconds. Time to load new checkpoint', t1-t0)
_ = util.load_ckpt(self._saver, self._sess)
t0 = time.time()
def write_for_rouge(self, reference_sents, decoded_words, ex_index, article=None):
"""Write output to file in correct format for eval with pyrouge. This is called in single_pass mode.
Args:
reference_sents: list of strings
decoded_words: list of strings
ex_index: int, the index with which to label the files
"""
# First, divide decoded output into sentences
decoded_sents = []
if SentenceSplitter is None:
decoded_sents = util.cut_sentence(decoded_words)
for i in range(len(decoded_sents)):
decoded_sents[i] = " ".join(decoded_sents[i])
else:
decoded_text = " ".join(decoded_words)
decoded_sents = SentenceSplitter.split(decoded_text.encode("utf-8"))
# pyrouge calls a perl script that puts the data into HTML files.
# Therefore we need to make our output HTML safe.
decoded_sents = [make_html_safe(w) for w in decoded_sents]
reference_sents = [make_html_safe(w) for w in reference_sents]
# Write to file
if article is not None:
with codecs.open(self._article_with_gen_abs_path, "a", "utf-8") as f:
f.write("article:\n")
f.write(article + "\n")
f.write("ref:\n")
for idx, sent in enumerate(reference_sents):
f.write(sent + "\n")
f.write("gen:\n")
for idx, sent in enumerate(decoded_sents):
if six.PY2 and type(sent) == str:
sent = sent.decode("utf-8")
f.write(sent + "\n")
f.write("\n")
with codecs.open(self._rouge_ref_path, "a", "utf-8") as f:
for idx,sent in enumerate(reference_sents):
if six.PY2 and type(sent) == str:
reference_sents[idx] = sent.decode("utf-8")
reference_sents_str = "".join(reference_sents)
f.write(reference_sents_str + "\n")
with codecs.open(self._rouge_dec_path, "a", "utf-8") as f:
for idx,sent in enumerate(decoded_sents):
if six.PY2 and type(sent) == str:
decoded_sents[idx] = sent.decode("utf-8")
decoded_sents_str = "".join(decoded_sents)
f.write(decoded_sents_str + "\n")
tf.logging.info("Wrote example %i to file" % ex_index)
def write_for_attnvis(self, article, abstract, decoded_words, attn_dists, p_gens):
"""Write some data to json file, which can be read into the in-browser attention visualizer tool:
https://github.com/abisee/attn_vis
Args:
article: The original article string.
abstract: The human (correct) abstract string.
attn_dists: List of arrays; the attention distributions.
decoded_words: List of strings; the words of the generated summary.
p_gens: List of scalars; the p_gen values. If not running in pointer-generator mode, list of None.
"""
article_lst = article.split() # list of words
decoded_lst = decoded_words # list of decoded words
to_write = {
'article_lst': [make_html_safe(t) for t in article_lst],
'decoded_lst': [make_html_safe(t) for t in decoded_lst],
'abstract_str': make_html_safe(abstract),
'attn_dists': attn_dists
}
if FLAGS.pointer_gen:
to_write['p_gens'] = p_gens
output_fname = os.path.join(self._decode_dir, 'attn_vis_data.json')
with open(output_fname, 'w') as output_file:
json.dump(to_write, output_file, ensure_ascii=False, indent=2)
tf.logging.info('Wrote visualization data to %s', output_fname)
def print_results(article, abstract, decoded_output):
"""Prints the article, the reference summmary and the decoded summary to screen"""
print("")
tf.logging.info('ARTICLE: %s', article)
tf.logging.info('REFERENCE SUMMARY: %s', abstract)
tf.logging.info('GENERATED SUMMARY: %s', decoded_output)
print("")
def make_html_safe(s):
"""Replace any angled brackets in string s to avoid interfering with HTML attention visualizer."""
s.replace("<", "<")
s.replace(">", ">")
return s
def rouge_eval_write(ref_path, dec_path, result_path):
"""Evaluate the files in ref_dir and dec_dir with pyrouge, returning results_dict"""
ref_sum_sents = util.read_sum_sents(ref_path, sent_token=False)
des_sum_sents = util.read_sum_sents(dec_path, sent_token=False)
assert len(ref_sum_sents) == len(des_sum_sents)
rou = rouge.Rouge()
scores = rou.get_scores(des_sum_sents, ref_sum_sents)
ave_scores = rou.get_scores(des_sum_sents, ref_sum_sents, avg=True)
rouge_eval_f = codecs.open(result_path, "w", "utf-8")
result = OrderedDict()
result["ave_scores"] = ave_scores
result["detail_scores"] = scores
json.dump(result, rouge_eval_f, indent=2)
tf.logging.info(ave_scores)
tf.logging.info("write eval result to {}".format(result_path))
return result
def get_decode_dir_name(ckpt_name):
"""Make a descriptive name for the decode dir, including the name of the checkpoint we use to decode. This is called in single_pass mode."""
if "train" in FLAGS.data_path: dataset = "train"
elif "val" in FLAGS.data_path: dataset = "val"
elif "test" in FLAGS.data_path: dataset = "test"
else: raise ValueError("FLAGS.data_path %s should contain one of train, val or test" % (FLAGS.data_path))
dirname = "decode_%s_%imaxenc_%ibeam_%imindec_%imaxdec" % (dataset, FLAGS.max_enc_steps, FLAGS.beam_size, FLAGS.min_dec_steps, FLAGS.max_dec_steps)
if ckpt_name is not None:
dirname += "_%s" % ckpt_name
return dirname
|
|
from __future__ import unicode_literals
import logging
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from paramiko.hostkeys import HostKeyEntry
import paramiko
from reviewboard.ssh.errors import UnsupportedSSHKeyError
class SSHHostKeys(paramiko.HostKeys):
"""Manages known lists of host keys.
This is a specialization of paramiko.HostKeys that interfaces with
a storage backend to get the list of host keys.
"""
def __init__(self, storage):
paramiko.HostKeys.__init__(self)
self.storage = storage
def load(self, filename):
"""Loads all known host keys from the storage backend."""
self._entries = []
lines = self.storage.read_host_keys()
for line in lines:
entry = HostKeyEntry.from_line(line)
if entry is not None:
self._entries.append(entry)
def save(self, filename):
pass
class SSHClient(paramiko.SSHClient):
"""A client for communicating with an SSH server.
SSHClient allows for communicating with an SSH server and managing
all known host and user keys.
This is a specialization of paramiko.SSHClient, and supports all the
same capabilities.
Key access goes through an SSHStorage backend. The storage backend knows
how to look up keys and write them.
The default backend works with the site directory's data/.ssh directory,
and supports namespaced directories for LocalSites.
"""
DEFAULT_STORAGE = 'reviewboard.ssh.storage.FileSSHStorage'
SUPPORTED_KEY_TYPES = (paramiko.RSAKey, paramiko.DSSKey)
def __init__(self, namespace=None, storage=None):
super(SSHClient, self).__init__()
self.namespace = namespace
self._load_storage()
self._host_keys = SSHHostKeys(self.storage)
self.load_host_keys('')
def _load_storage(self):
"""Loads the storage backend.
This will attempt to load the SSH storage backend. If there is an
error in loading the backend, it will be logged, and an
ImproperlyConfigured exception will be raised.
"""
try:
path = getattr(settings, 'RBSSH_STORAGE_BACKEND',
self.DEFAULT_STORAGE)
except ImportError:
# We may not be running in the Django environment.
path = self.DEFAULT_STORAGE
i = path.rfind('.')
module, class_name = path[:i], path[i + 1:]
try:
mod = __import__(module, {}, {}, [class_name])
except ImportError as e:
msg = 'Error importing SSH storage backend %s: "%s"' % (module, e)
logging.critical(msg)
raise ImproperlyConfigured(msg)
try:
self.storage = getattr(mod, class_name)(namespace=self.namespace)
except Exception as e:
msg = 'Error instantiating SSH storage backend %s: "%s"' % \
(module, e)
logging.critical(msg)
raise
def get_user_key(self):
"""Returns the keypair of the user running Review Board.
This will be an instance of :py:mod:`paramiko.PKey`, representing
a DSS or RSA key, as long as one exists. Otherwise, it may return None.
"""
key = None
fp = None
try:
key = self.storage.read_user_key()
except paramiko.SSHException as e:
logging.error('SSH: Unknown error accessing user key: %s' % e)
except paramiko.PasswordRequiredException as e:
logging.error('SSH: Unable to access password protected '
'key file: %s' % e)
except IOError as e:
logging.error('SSH: Error reading user key: %s' % e)
if fp:
fp.close()
return key
def delete_user_key(self):
"""Deletes the user key for this client.
If no key exists, this will do nothing.
"""
try:
self.storage.delete_user_key()
except Exception as e:
logging.error('Unable to delete SSH key file: %s' % e)
raise
def get_public_key(self, key):
"""Returns the public key portion of an SSH key.
This will be formatted for display.
"""
public_key = ''
if key:
base64 = key.get_base64()
# TODO: Move this wrapping logic into a common templatetag.
for i in range(0, len(base64), 64):
public_key += base64[i:i + 64] + '\n'
return public_key
def is_key_authorized(self, key):
"""Returns whether or not a public key is currently authorized."""
public_key = key.get_base64()
try:
lines = self.storage.read_authorized_keys()
for line in lines:
try:
authorized_key = line.split()[1]
except (ValueError, IndexError):
continue
if authorized_key == public_key:
return True
except IOError:
pass
return False
def generate_user_key(self, bits=2048):
"""Generates a new RSA keypair for the user running Review Board.
This will store the new key in the backend storage and return the
resulting key as an instance of :py:mod:`paramiko.RSAKey`.
If a key already exists, it's returned instead.
Callers are expected to handle any exceptions. This may raise
IOError for any problems in writing the key file, or
paramiko.SSHException for any other problems.
"""
key = self.get_user_key()
if not key:
key = paramiko.RSAKey.generate(bits)
self._write_user_key(key)
return key
def import_user_key(self, keyfile):
"""Imports an uploaded key file into Review Board.
``keyfile`` is expected to be an ``UploadedFile`` or a paramiko
``KeyFile``. If this is a valid key file, it will be saved in
the storage backend and the resulting key as an instance of
:py:mod:`paramiko.PKey` will be returned.
If a key of this name already exists, it will be overwritten.
Callers are expected to handle any exceptions. This may raise
IOError for any problems in writing the key file, or
paramiko.SSHException for any other problems.
This will raise UnsupportedSSHKeyError if the uploaded key is not
a supported type.
"""
# Try to find out what key this is.
for cls in self.SUPPORTED_KEY_TYPES:
key = None
try:
if not isinstance(keyfile, paramiko.PKey):
keyfile.seek(0)
key = cls.from_private_key(keyfile)
elif isinstance(keyfile, cls):
key = keyfile
except paramiko.SSHException:
# We don't have more detailed info than this, but most
# likely, it's not a valid key. Skip to the next.
continue
if key:
self._write_user_key(key)
return key
raise UnsupportedSSHKeyError()
def add_host_key(self, hostname, key):
"""Adds a host key to the known hosts file."""
self.storage.add_host_key(hostname, key)
def replace_host_key(self, hostname, old_key, new_key):
"""Replaces a host key in the known hosts file with another.
This is used for replacing host keys that have changed.
"""
self.storage.replace_host_key(hostname, old_key, new_key)
def _write_user_key(self, key):
"""Convenience function to write a user key and check for errors.
Any errors caused as a result of writing a user key will be logged.
"""
try:
self.storage.write_user_key(key)
except UnsupportedSSHKeyError as e:
logging.error('Failed to write unknown key type %s' % type(key))
raise
except IOError as e:
logging.error('Failed to write SSH user key: %s' % e)
raise
except Exception as e:
logging.error('Unknown error writing SSH user key: %s' % e,
exc_info=1)
raise
|
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: wallet.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='wallet.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x0cwallet.proto\"Q\n\x04\x43\x61rd\x12\x18\n\x10\x63\x61rd_holder_name\x18\x01 \x01(\t\x12\x13\n\x0b\x63\x61rd_number\x18\x02 \x01(\t\x12\x1a\n\x12\x63\x61rd_expiry_yyyymm\x18\x03 \x01(\x05\")\n\x12\x43\x61rdEncryptRequest\x12\x13\n\x04\x63\x61rd\x18\x01 \x01(\x0b\x32\x05.Card\"$\n\x13\x43\x61rdEncryptResponse\x12\r\n\x05token\x18\x01 \x01(\t\"#\n\x12\x43\x61rdDecryptRequest\x12\r\n\x05token\x18\x01 \x01(\t\"1\n\x13\x43\x61rdDecryptResponse\x12\x1a\n\x12\x63\x61rd_in_plain_text\x18\x01 \x01(\t2x\n\x06Wallet\x12\x36\n\x07\x65ncrypt\x12\x13.CardEncryptRequest\x1a\x14.CardEncryptResponse\"\x00\x12\x36\n\x07\x64\x65\x63rypt\x12\x13.CardDecryptRequest\x1a\x14.CardDecryptResponse\"\x00\x62\x06proto3')
)
_CARD = _descriptor.Descriptor(
name='Card',
full_name='Card',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='card_holder_name', full_name='Card.card_holder_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='card_number', full_name='Card.card_number', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='card_expiry_yyyymm', full_name='Card.card_expiry_yyyymm', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=16,
serialized_end=97,
)
_CARDENCRYPTREQUEST = _descriptor.Descriptor(
name='CardEncryptRequest',
full_name='CardEncryptRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='card', full_name='CardEncryptRequest.card', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=99,
serialized_end=140,
)
_CARDENCRYPTRESPONSE = _descriptor.Descriptor(
name='CardEncryptResponse',
full_name='CardEncryptResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='CardEncryptResponse.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=142,
serialized_end=178,
)
_CARDDECRYPTREQUEST = _descriptor.Descriptor(
name='CardDecryptRequest',
full_name='CardDecryptRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='CardDecryptRequest.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=180,
serialized_end=215,
)
_CARDDECRYPTRESPONSE = _descriptor.Descriptor(
name='CardDecryptResponse',
full_name='CardDecryptResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='card_in_plain_text', full_name='CardDecryptResponse.card_in_plain_text', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=217,
serialized_end=266,
)
_CARDENCRYPTREQUEST.fields_by_name['card'].message_type = _CARD
DESCRIPTOR.message_types_by_name['Card'] = _CARD
DESCRIPTOR.message_types_by_name['CardEncryptRequest'] = _CARDENCRYPTREQUEST
DESCRIPTOR.message_types_by_name['CardEncryptResponse'] = _CARDENCRYPTRESPONSE
DESCRIPTOR.message_types_by_name['CardDecryptRequest'] = _CARDDECRYPTREQUEST
DESCRIPTOR.message_types_by_name['CardDecryptResponse'] = _CARDDECRYPTRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Card = _reflection.GeneratedProtocolMessageType('Card', (_message.Message,), dict(
DESCRIPTOR = _CARD,
__module__ = 'wallet_pb2'
# @@protoc_insertion_point(class_scope:Card)
))
_sym_db.RegisterMessage(Card)
CardEncryptRequest = _reflection.GeneratedProtocolMessageType('CardEncryptRequest', (_message.Message,), dict(
DESCRIPTOR = _CARDENCRYPTREQUEST,
__module__ = 'wallet_pb2'
# @@protoc_insertion_point(class_scope:CardEncryptRequest)
))
_sym_db.RegisterMessage(CardEncryptRequest)
CardEncryptResponse = _reflection.GeneratedProtocolMessageType('CardEncryptResponse', (_message.Message,), dict(
DESCRIPTOR = _CARDENCRYPTRESPONSE,
__module__ = 'wallet_pb2'
# @@protoc_insertion_point(class_scope:CardEncryptResponse)
))
_sym_db.RegisterMessage(CardEncryptResponse)
CardDecryptRequest = _reflection.GeneratedProtocolMessageType('CardDecryptRequest', (_message.Message,), dict(
DESCRIPTOR = _CARDDECRYPTREQUEST,
__module__ = 'wallet_pb2'
# @@protoc_insertion_point(class_scope:CardDecryptRequest)
))
_sym_db.RegisterMessage(CardDecryptRequest)
CardDecryptResponse = _reflection.GeneratedProtocolMessageType('CardDecryptResponse', (_message.Message,), dict(
DESCRIPTOR = _CARDDECRYPTRESPONSE,
__module__ = 'wallet_pb2'
# @@protoc_insertion_point(class_scope:CardDecryptResponse)
))
_sym_db.RegisterMessage(CardDecryptResponse)
_WALLET = _descriptor.ServiceDescriptor(
name='Wallet',
full_name='Wallet',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=268,
serialized_end=388,
methods=[
_descriptor.MethodDescriptor(
name='encrypt',
full_name='Wallet.encrypt',
index=0,
containing_service=None,
input_type=_CARDENCRYPTREQUEST,
output_type=_CARDENCRYPTRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='decrypt',
full_name='Wallet.decrypt',
index=1,
containing_service=None,
input_type=_CARDDECRYPTREQUEST,
output_type=_CARDDECRYPTRESPONSE,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_WALLET)
DESCRIPTOR.services_by_name['Wallet'] = _WALLET
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class WalletStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.encrypt = channel.unary_unary(
'/Wallet/encrypt',
request_serializer=CardEncryptRequest.SerializeToString,
response_deserializer=CardEncryptResponse.FromString,
)
self.decrypt = channel.unary_unary(
'/Wallet/decrypt',
request_serializer=CardDecryptRequest.SerializeToString,
response_deserializer=CardDecryptResponse.FromString,
)
class WalletServicer(object):
# missing associated documentation comment in .proto file
pass
def encrypt(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def decrypt(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_WalletServicer_to_server(servicer, server):
rpc_method_handlers = {
'encrypt': grpc.unary_unary_rpc_method_handler(
servicer.encrypt,
request_deserializer=CardEncryptRequest.FromString,
response_serializer=CardEncryptResponse.SerializeToString,
),
'decrypt': grpc.unary_unary_rpc_method_handler(
servicer.decrypt,
request_deserializer=CardDecryptRequest.FromString,
response_serializer=CardDecryptResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Wallet', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaWalletServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
# missing associated documentation comment in .proto file
pass
def encrypt(self, request, context):
# missing associated documentation comment in .proto file
pass
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def decrypt(self, request, context):
# missing associated documentation comment in .proto file
pass
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaWalletStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
# missing associated documentation comment in .proto file
pass
def encrypt(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
# missing associated documentation comment in .proto file
pass
raise NotImplementedError()
encrypt.future = None
def decrypt(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
# missing associated documentation comment in .proto file
pass
raise NotImplementedError()
decrypt.future = None
def beta_create_Wallet_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('Wallet', 'decrypt'): CardDecryptRequest.FromString,
('Wallet', 'encrypt'): CardEncryptRequest.FromString,
}
response_serializers = {
('Wallet', 'decrypt'): CardDecryptResponse.SerializeToString,
('Wallet', 'encrypt'): CardEncryptResponse.SerializeToString,
}
method_implementations = {
('Wallet', 'decrypt'): face_utilities.unary_unary_inline(servicer.decrypt),
('Wallet', 'encrypt'): face_utilities.unary_unary_inline(servicer.encrypt),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Wallet_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('Wallet', 'decrypt'): CardDecryptRequest.SerializeToString,
('Wallet', 'encrypt'): CardEncryptRequest.SerializeToString,
}
response_deserializers = {
('Wallet', 'decrypt'): CardDecryptResponse.FromString,
('Wallet', 'encrypt'): CardEncryptResponse.FromString,
}
cardinalities = {
'decrypt': cardinality.Cardinality.UNARY_UNARY,
'encrypt': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'Wallet', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020 by Cristina Garcia-Cardona <[email protected]>
# Brendt Wohlberg <[email protected]>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""Backtracking methods for PGM algorithms"""
from __future__ import division, print_function
import numpy as np
__author__ = """Cristina Garcia-Cardona <[email protected]>"""
class BacktrackBase(object):
"""Base class for computing step size for
proximal gradient method via backtracking.
This class is intended to be a base class of other classes
that specialise to specific backtracking options.
After termination of the :meth:`update` method the new
state in the proximal gradient method is computed.
This also updates all the supporting variables.
"""
def __init__(self):
super(BacktrackBase, self).__init__()
def update(self, solverobj):
"""Update step size via backtracking.
Overriding this method is required.
"""
raise NotImplementedError()
class BacktrackStandard(BacktrackBase):
"""Class to estimate step size L by computing a linesearch
that guarantees that F <= Q according to the standard PGM
backtracking strategy in :cite:`beck-2009-fast`.
After termination of the :meth:`update` method the new
state in the proximal gradient method is computed.
This also updates all the supporting variables.
"""
def __init__(self, gamma_u=1.2, maxiter=50):
r"""
Parameters
----------
gamma_u : float
Multiplier applied to increase L when backtracking
in standard PGM (corresponding to :math:`\eta` in
:cite:`beck-2009-fast`).
maxiter : int
Maximum iterations of updating L when backtracking.
"""
super(BacktrackStandard, self).__init__()
# Initialise attributes controlling the backtracking
self.gamma_u = gamma_u
self.maxiter = maxiter
def update(self, solverobj):
"""
Parameters
----------
solverobj : PGM object
object containing state and functions
required to adjust the step size
"""
gradY = solverobj.grad_f() # Given Y(f), this update computes gradY(f)
maxiter = self.maxiter
iterBTrack = 0
linesearch = 1
while linesearch and iterBTrack < maxiter:
solverobj.xstep(gradY) # Given gradY(f), L, this updates X(f)
f = solverobj.obfn_f(solverobj.var_x())
Dxy = solverobj.eval_Dxy()
Q = solverobj.obfn_f(solverobj.var_y()) + \
solverobj.eval_linear_approx(Dxy, gradY) + \
(solverobj.L / 2.) * np.linalg.norm(Dxy.flatten(), 2)**2
if f <= Q:
linesearch = 0
else:
solverobj.L *= self.gamma_u
iterBTrack += 1
solverobj.F = f
solverobj.Q = Q
solverobj.iterBTrack = iterBTrack
# Update auxiliary sequence
solverobj.ystep()
class BacktrackRobust(BacktrackBase):
"""Class to estimate step size L by computing a linesearch
that guarantees that F <= Q according to the robust PGM
backtracking strategy in :cite:`florea-2017-robust`.
After termination of the :meth:`update` method the new
state in the proximal gradient method is computed.
This also updates all the supporting variables.
"""
def __init__(self, gamma_d=0.9, gamma_u=2.0, maxiter=50):
r"""
Parameters
----------
gamma_d : float
Multiplier applied to decrease L when
backtracking in robust PGM (:math:`\gamma_d` in
:cite:`florea-2017-robust`).
gamma_u : float
Multiplier applied to increase L when
backtracking in robust PGM (corresponding
Total :math:`\gamma_u` in :cite:`florea-2017-robust`).
maxiter : int
Maximum iterations of updating L when backtracking.
"""
super(BacktrackRobust, self).__init__()
# Initialise attributes controlling the backtracking
self.gamma_d = gamma_d
self.gamma_u = gamma_u
self.maxiter = maxiter
self.Tk = 0.
self.Zrb = None
def update(self, solverobj):
"""
Parameters
----------
solverobj : PGM object
object containing state and functions
required to adjust the step size
"""
if self.Zrb is None:
self.Zrb = solverobj.var_x().copy()
solverobj.L *= self.gamma_d
maxiter = self.maxiter
iterBTrack = 0
linesearch = 1
while linesearch and iterBTrack < maxiter:
t = float(1. + np.sqrt(1. + 4. * solverobj.L * self.Tk)) / \
(2. * solverobj.L)
T = self.Tk + t
y = (self.Tk * solverobj.var_xprv() + t * self.Zrb) / T
solverobj.var_y(y)
gradY = solverobj.xstep() # Given Y(f), L, this updates X(f)
f = solverobj.obfn_f(solverobj.var_x())
Dxy = solverobj.eval_Dxy()
Q = solverobj.obfn_f(solverobj.var_y()) + \
solverobj.eval_linear_approx(Dxy, gradY) + \
(solverobj.L / 2.) * np.linalg.norm(Dxy.flatten(), 2)**2
if f <= Q:
linesearch = 0
else:
solverobj.L *= self.gamma_u
iterBTrack += 1
self.Tk = T
self.Zrb += (t * solverobj.L * (solverobj.var_x() - solverobj.var_y()))
solverobj.F = f
solverobj.Q = Q
solverobj.iterBTrack = iterBTrack
|
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
get_element_by_attribute,
int_or_none,
limit_length,
lowercase_escape,
try_get,
)
class InstagramIE(InfoExtractor):
_VALID_URL = r'(?P<url>https?://(?:www\.)?instagram\.com/p/(?P<id>[^/?#&]+))'
_TESTS = [{
'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc',
'md5': '0d2da106a9d2631273e192b372806516',
'info_dict': {
'id': 'aye83DjauH',
'ext': 'mp4',
'title': 'Video by naomipq',
'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1371748545,
'upload_date': '20130620',
'uploader_id': 'naomipq',
'uploader': 'Naomi Leonor Phan-Quang',
'like_count': int,
'comment_count': int,
'comments': list,
},
}, {
# missing description
'url': 'https://www.instagram.com/p/BA-pQFBG8HZ/?taken-by=britneyspears',
'info_dict': {
'id': 'BA-pQFBG8HZ',
'ext': 'mp4',
'title': 'Video by britneyspears',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1453760977,
'upload_date': '20160125',
'uploader_id': 'britneyspears',
'uploader': 'Britney Spears',
'like_count': int,
'comment_count': int,
'comments': list,
},
'params': {
'skip_download': True,
},
}, {
# multi video post
'url': 'https://www.instagram.com/p/BQ0eAlwhDrw/',
'playlist': [{
'info_dict': {
'id': 'BQ0dSaohpPW',
'ext': 'mp4',
'title': 'Video 1',
},
}, {
'info_dict': {
'id': 'BQ0dTpOhuHT',
'ext': 'mp4',
'title': 'Video 2',
},
}, {
'info_dict': {
'id': 'BQ0dT7RBFeF',
'ext': 'mp4',
'title': 'Video 3',
},
}],
'info_dict': {
'id': 'BQ0eAlwhDrw',
'title': 'Post by instagram',
'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957',
},
}, {
'url': 'https://instagram.com/p/-Cmh1cukG2/',
'only_matching': True,
}, {
'url': 'http://instagram.com/p/9o6LshA7zy/embed/',
'only_matching': True,
}]
@staticmethod
def _extract_embed_url(webpage):
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?instagram\.com/p/[^/]+/embed.*?)\1',
webpage)
if mobj:
return mobj.group('url')
blockquote_el = get_element_by_attribute(
'class', 'instagram-media', webpage)
if blockquote_el is None:
return
mobj = re.search(
r'<a[^>]+href=([\'"])(?P<link>[^\'"]+)\1', blockquote_el)
if mobj:
return mobj.group('link')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
url = mobj.group('url')
webpage = self._download_webpage(url, video_id)
(video_url, description, thumbnail, timestamp, uploader,
uploader_id, like_count, comment_count, comments, height,
width) = [None] * 11
shared_data = self._parse_json(
self._search_regex(
r'window\._sharedData\s*=\s*({.+?});',
webpage, 'shared data', default='{}'),
video_id, fatal=False)
if shared_data:
media = try_get(
shared_data,
(lambda x: x['entry_data']['PostPage'][0]['graphql']['shortcode_media'],
lambda x: x['entry_data']['PostPage'][0]['media']),
dict)
if media:
video_url = media.get('video_url')
height = int_or_none(media.get('dimensions', {}).get('height'))
width = int_or_none(media.get('dimensions', {}).get('width'))
description = media.get('caption')
thumbnail = media.get('display_src')
timestamp = int_or_none(media.get('date'))
uploader = media.get('owner', {}).get('full_name')
uploader_id = media.get('owner', {}).get('username')
like_count = int_or_none(media.get('likes', {}).get('count'))
comment_count = int_or_none(media.get('comments', {}).get('count'))
comments = [{
'author': comment.get('user', {}).get('username'),
'author_id': comment.get('user', {}).get('id'),
'id': comment.get('id'),
'text': comment.get('text'),
'timestamp': int_or_none(comment.get('created_at')),
} for comment in media.get(
'comments', {}).get('nodes', []) if comment.get('text')]
if not video_url:
edges = try_get(
media, lambda x: x['edge_sidecar_to_children']['edges'],
list) or []
if edges:
entries = []
for edge_num, edge in enumerate(edges, start=1):
node = try_get(edge, lambda x: x['node'], dict)
if not node:
continue
node_video_url = try_get(node, lambda x: x['video_url'], compat_str)
if not node_video_url:
continue
entries.append({
'id': node.get('shortcode') or node['id'],
'title': 'Video %d' % edge_num,
'url': node_video_url,
'thumbnail': node.get('display_url'),
'width': int_or_none(try_get(node, lambda x: x['dimensions']['width'])),
'height': int_or_none(try_get(node, lambda x: x['dimensions']['height'])),
'view_count': int_or_none(node.get('video_view_count')),
})
return self.playlist_result(
entries, video_id,
'Post by %s' % uploader_id if uploader_id else None,
description)
if not video_url:
video_url = self._og_search_video_url(webpage, secure=False)
formats = [{
'url': video_url,
'width': width,
'height': height,
}]
if not uploader_id:
uploader_id = self._search_regex(
r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"',
webpage, 'uploader id', fatal=False)
if not description:
description = self._search_regex(
r'"caption"\s*:\s*"(.+?)"', webpage, 'description', default=None)
if description is not None:
description = lowercase_escape(description)
if not thumbnail:
thumbnail = self._og_search_thumbnail(webpage)
return {
'id': video_id,
'formats': formats,
'ext': 'mp4',
'title': 'Video by %s' % uploader_id,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'uploader_id': uploader_id,
'uploader': uploader,
'like_count': like_count,
'comment_count': comment_count,
'comments': comments,
}
class InstagramUserIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<username>[^/]{2,})/?(?:$|[?#])'
IE_DESC = 'Instagram user profile'
IE_NAME = 'instagram:user'
_TEST = {
'url': 'https://instagram.com/porsche',
'info_dict': {
'id': 'porsche',
'title': 'porsche',
},
'playlist_mincount': 2,
'playlist': [{
'info_dict': {
'id': '614605558512799803_462752227',
'ext': 'mp4',
'title': '#Porsche Intelligent Performance.',
'thumbnail': r're:^https?://.*\.jpg',
'uploader': 'Porsche',
'uploader_id': 'porsche',
'timestamp': 1387486713,
'upload_date': '20131219',
},
}],
'params': {
'extract_flat': True,
'skip_download': True,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
uploader_id = mobj.group('username')
entries = []
page_count = 0
media_url = 'http://instagram.com/%s/media' % uploader_id
while True:
page = self._download_json(
media_url, uploader_id,
note='Downloading page %d ' % (page_count + 1),
)
page_count += 1
for it in page['items']:
if it.get('type') != 'video':
continue
like_count = int_or_none(it.get('likes', {}).get('count'))
user = it.get('user', {})
formats = [{
'format_id': k,
'height': v.get('height'),
'width': v.get('width'),
'url': v['url'],
} for k, v in it['videos'].items()]
self._sort_formats(formats)
thumbnails_el = it.get('images', {})
thumbnail = thumbnails_el.get('thumbnail', {}).get('url')
# In some cases caption is null, which corresponds to None
# in python. As a result, it.get('caption', {}) gives None
title = (it.get('caption') or {}).get('text', it['id'])
entries.append({
'id': it['id'],
'title': limit_length(title, 80),
'formats': formats,
'thumbnail': thumbnail,
'webpage_url': it.get('link'),
'uploader': user.get('full_name'),
'uploader_id': user.get('username'),
'like_count': like_count,
'timestamp': int_or_none(it.get('created_time')),
})
if not page['items']:
break
max_id = page['items'][-1]['id'].split('_')[0]
media_url = (
'http://instagram.com/%s/media?max_id=%s' % (
uploader_id, max_id))
return {
'_type': 'playlist',
'entries': entries,
'id': uploader_id,
'title': uploader_id,
}
|
|
"""
Player
The Player class is a simple extension of the django User model using
the 'profile' system of django. A profile is a model that tack new
fields to the User model without actually editing the User model
(which would mean hacking into django internals which we want to avoid
for future compatability reasons). The profile, which we call
'Player', is accessed with user.get_profile() by the property 'player'
defined on ObjectDB objects. Since we can customize it, we will try to
abstract as many operations as possible to work on Player rather than
on User.
We use the Player to store a more mud-friendly style of permission
system as well as to allow the admin more flexibility by storing
attributes on the Player. Within the game we should normally use the
Player manager's methods to create users, since that automatically
adds the profile extension.
The default Django permission system is geared towards web-use, and is
defined on a per-application basis permissions. In django terms,
'src/objects' is one application, 'src/scripts' another, indeed all
folders in /src with a model.py inside them is an application. Django
permissions thus have the form
e.g. 'applicationlabel.permissionstring' and django automatically
defines a set of these for editing each application from its automatic
admin interface. These are still available should you want them.
For most in-game mud-use however, like commands and other things, it
does not make sense to tie permissions to the applications in src/ -
To the user these should all just be considered part of the game
engine. So instead we define our own separate permission system here,
borrowing heavily from the django original, but allowing the
permission string to look however we want, making them unrelated to
the applications.
To make the Player model more flexible for your own game, it can also
persistently store attributes of its own. This is ideal for extra
account info and OOC account configuration variables etc.
"""
from django.conf import settings
from django.db import models
from django.contrib.auth.models import User
from django.utils.encoding import smart_str
from src.server.caches import get_field_cache, set_field_cache, del_field_cache
from src.server.caches import get_prop_cache, set_prop_cache, del_prop_cache
from src.players import manager
from src.typeclasses.models import Attribute, TypedObject, TypeNick, TypeNickHandler
from src.typeclasses.typeclass import TypeClass
from src.commands.cmdsethandler import CmdSetHandler
from src.commands import cmdhandler
from src.utils import logger, utils
from src.utils.utils import inherits_from
__all__ = ("PlayerAttribute", "PlayerNick", "PlayerDB")
_SESSIONS = None
_AT_SEARCH_RESULT = utils.variable_from_module(*settings.SEARCH_AT_RESULT.rsplit('.', 1))
_GA = object.__getattribute__
_SA = object.__setattr__
_DA = object.__delattr__
_TYPECLASS = None
#------------------------------------------------------------
#
# PlayerAttribute
#
#------------------------------------------------------------
class PlayerAttribute(Attribute):
"""
PlayerAttributes work the same way as Attributes on game objects,
but are intended to store OOC information specific to each user
and game (example would be configurations etc).
"""
db_obj = models.ForeignKey("PlayerDB")
class Meta:
"Define Django meta options"
verbose_name = "Player Attribute"
#------------------------------------------------------------
#
# Player Nicks
#
#------------------------------------------------------------
class PlayerNick(TypeNick):
"""
The default nick types used by Evennia are:
inputline (default) - match against all input
player - match against player searches
obj - match against object searches
channel - used to store own names for channels
"""
db_obj = models.ForeignKey("PlayerDB", verbose_name="player")
class Meta:
"Define Django meta options"
verbose_name = "Nickname for Players"
verbose_name_plural = "Nicknames Players"
unique_together = ("db_nick", "db_type", "db_obj")
class PlayerNickHandler(TypeNickHandler):
"""
Handles nick access and setting. Accessed through ObjectDB.nicks
"""
NickClass = PlayerNick
#------------------------------------------------------------
#
# PlayerDB
#
#------------------------------------------------------------
class PlayerDB(TypedObject):
"""
This is a special model using Django's 'profile' functionality
and extends the default Django User model. It is defined as such
by use of the variable AUTH_PROFILE_MODULE in the settings.
One accesses the fields/methods. We try use this model as much
as possible rather than User, since we can customize this to
our liking.
The TypedObject supplies the following (inherited) properties:
key - main name
typeclass_path - the path to the decorating typeclass
typeclass - auto-linked typeclass
date_created - time stamp of object creation
permissions - perm strings
dbref - #id of object
db - persistent attribute storage
ndb - non-persistent attribute storage
The PlayerDB adds the following properties:
user - Connected User object. django field, needs to be save():d.
obj - game object controlled by player
character - alias for obj
name - alias for user.username
sessions - sessions connected to this player
is_superuser - bool if this player is a superuser
"""
#
# PlayerDB Database model setup
#
# inherited fields (from TypedObject):
# db_key, db_typeclass_path, db_date_created, db_permissions
# this is the one-to-one link between the customized Player object and
# this profile model. It is required by django.
user = models.ForeignKey(User, unique=True, db_index=True,
help_text="The <I>User</I> object holds django-specific authentication for each Player. A unique User should be created and tied to each Player, the two should never be switched or changed around. The User will be deleted automatically when the Player is.")
# the in-game object connected to this player (if any).
# Use the property 'obj' to access.
db_obj = models.ForeignKey("objects.ObjectDB", null=True, blank=True,
verbose_name="character", help_text='In-game object.')
# store a connected flag here too, not just in sessionhandler.
# This makes it easier to track from various out-of-process locations
db_is_connected = models.BooleanField(default=False, verbose_name="is_connected", help_text="If player is connected to game or not")
# database storage of persistant cmdsets.
db_cmdset_storage = models.CharField('cmdset', max_length=255, null=True,
help_text="optional python path to a cmdset class. If creating a Character, this will default to settings.CMDSET_DEFAULT.")
# Database manager
objects = manager.PlayerManager()
class Meta:
app_label = 'players'
verbose_name = 'Player'
def __init__(self, *args, **kwargs):
"Parent must be initiated first"
TypedObject.__init__(self, *args, **kwargs)
# handlers
_SA(self, "cmdset", CmdSetHandler(self))
_GA(self, "cmdset").update(init_mode=True)
_SA(self, "nicks", PlayerNickHandler(self))
# Wrapper properties to easily set database fields. These are
# @property decorators that allows to access these fields using
# normal python operations (without having to remember to save()
# etc). So e.g. a property 'attr' has a get/set/del decorator
# defined that allows the user to do self.attr = value,
# value = self.attr and del self.attr respectively (where self
# is the object in question).
# obj property (wraps db_obj)
#@property
def obj_get(self):
"Getter. Allows for value = self.obj"
return get_field_cache(self, "obj")
#@obj.setter
def obj_set(self, value):
"Setter. Allows for self.obj = value"
global _TYPECLASS
if not _TYPECLASS:
from src.typeclasses.typeclass import TypeClass as _TYPECLASS
if isinstance(value, _TYPECLASS):
value = value.dbobj
try:
set_field_cache(self, "obj", value)
except Exception:
logger.log_trace()
raise Exception("Cannot assign %s as a player object!" % value)
#@obj.deleter
def obj_del(self):
"Deleter. Allows for del self.obj"
del_field_cache(self, "obj")
obj = property(obj_get, obj_set, obj_del)
# whereas the name 'obj' is consistent with the rest of the code,
# 'character' is a more intuitive property name, so we
# define this too, as an alias to player.obj.
#@property
def character_get(self):
"Getter. Allows for value = self.character"
return get_field_cache(self, "obj")
#@character.setter
def character_set(self, character):
"Setter. Allows for self.character = value"
if inherits_from(character, TypeClass):
character = character.dbobj
set_field_cache(self, "obj", character)
#@character.deleter
def character_del(self):
"Deleter. Allows for del self.character"
del_field_cache(self, "obj")
character = property(character_get, character_set, character_del)
# cmdset_storage property
# This seems very sensitive to caching, so leaving it be for now /Griatch
#@property
def cmdset_storage_get(self):
"Getter. Allows for value = self.name. Returns a list of cmdset_storage."
if _GA(self, "db_cmdset_storage"):
return [path.strip() for path in _GA(self, "db_cmdset_storage").split(',')]
return []
#@cmdset_storage.setter
def cmdset_storage_set(self, value):
"Setter. Allows for self.name = value. Stores as a comma-separated string."
if utils.is_iter(value):
value = ",".join([str(val).strip() for val in value])
_SA(self, "db_cmdset_storage", value)
_GA(self, "save")()
#@cmdset_storage.deleter
def cmdset_storage_del(self):
"Deleter. Allows for del self.name"
_SA(self, "db_cmdset_storage", "")
_GA(self, "save")()
cmdset_storage = property(cmdset_storage_get, cmdset_storage_set, cmdset_storage_del)
#@property
def is_connected_get(self):
"Getter. Allows for value = self.is_connected"
return get_field_cache(self, "is_connected")
#@is_connected.setter
def is_connected_set(self, value):
"Setter. Allows for self.is_connected = value"
set_field_cache(self, "is_connected", value)
#@is_connected.deleter
def is_connected_del(self):
"Deleter. Allows for del is_connected"
set_field_cache(self, "is_connected", False)
is_connected = property(is_connected_get, is_connected_set, is_connected_del)
class Meta:
"Define Django meta options"
verbose_name = "Player"
verbose_name_plural = "Players"
#
# PlayerDB main class properties and methods
#
def __str__(self):
return smart_str("%s(player %s)" % (_GA(self, "name"), _GA(self, "dbid")))
def __unicode__(self):
return u"%s(player#%s)" % (_GA(self, "name"), _GA(self, "dbid"))
# this is required to properly handle attributes and typeclass loading
_typeclass_paths = settings.PLAYER_TYPECLASS_PATHS
_attribute_class = PlayerAttribute
_db_model_name = "playerdb" # used by attributes to safely store objects
_default_typeclass_path = settings.BASE_PLAYER_TYPECLASS or "src.players.player.Player"
# name property (wraps self.user.username)
#@property
def name_get(self):
"Getter. Allows for value = self.name"
name = get_prop_cache(self, "_name")
if not name:
name = _GA(self,"user").username
set_prop_cache(self, "_name", name)
return name
#@name.setter
def name_set(self, value):
"Setter. Allows for player.name = newname"
_GA(self, "user").username = value
_GA(self, "user").save()
set_prop_cache(self, "_name", value)
#@name.deleter
def name_del(self):
"Deleter. Allows for del self.name"
raise Exception("Player name cannot be deleted!")
name = property(name_get, name_set, name_del)
key = property(name_get, name_set, name_del)
#@property
def uid_get(self):
"Getter. Retrieves the user id"
uid = get_prop_cache(self, "_uid")
if not uid:
uid = _GA(self, "user").id
set_prop_cache(self, "_uid", uid)
return uid
def uid_set(self, value):
raise Exception("User id cannot be set!")
def uid_del(self):
raise Exception("User id cannot be deleted!")
uid = property(uid_get, uid_set, uid_del)
# sessions property
#@property
def sessions_get(self):
"Getter. Retrieve sessions related to this player/user"
global _SESSIONS
if not _SESSIONS:
from src.server.sessionhandler import SESSIONS as _SESSIONS
return _SESSIONS.sessions_from_player(self)
#@sessions.setter
def sessions_set(self, value):
"Setter. Protects the sessions property from adding things"
raise Exception("Cannot set sessions manually!")
#@sessions.deleter
def sessions_del(self):
"Deleter. Protects the sessions property from deletion"
raise Exception("Cannot delete sessions manually!")
sessions = property(sessions_get, sessions_set, sessions_del)
#@property
def is_superuser_get(self):
"Superusers have all permissions."
is_suser = get_prop_cache(self, "_is_superuser")
if is_suser == None:
is_suser = _GA(self, "user").is_superuser
set_prop_cache(self, "_is_superuser", is_suser)
return is_suser
is_superuser = property(is_superuser_get)
#
# PlayerDB class access methods
#
def msg(self, outgoing_string, from_obj=None, data=None):
"""
Evennia -> User
This is the main route for sending data back to the user from the server.
"""
if from_obj:
try:
_GA(from_obj, "at_msg_send")(outgoing_string, to_obj=self, data=data)
except Exception:
pass
if (_GA(self, "character") and not
_GA(self, "character").at_msg_receive(outgoing_string, from_obj=from_obj, data=data)):
# the at_msg_receive() hook may block receiving of certain messages
return
outgoing_string = utils.to_str(outgoing_string, force_string=True)
for session in _GA(self, 'sessions'):
session.msg(outgoing_string, data)
def swap_character(self, new_character, delete_old_character=False):
"""
Swaps character, if possible
"""
return _GA(self, "__class__").objects.swap_character(self, new_character, delete_old_character=delete_old_character)
def delete(self, *args, **kwargs):
"Make sure to delete user also when deleting player - the two may never exist separately."
try:
if _GA(self, "user"):
_GA(_GA(self, "user"), "delete")()
except AssertionError:
pass
try:
super(PlayerDB, self).delete(*args, **kwargs)
except AssertionError:
# this means deleting the user already cleared out the Player object.
pass
#
# Execution/action methods
#
def execute_cmd(self, raw_string):
"""
Do something as this player. This command transparently
lets its typeclass execute the command.
raw_string - raw command input coming from the command line.
"""
# nick replacement - we require full-word matching.
raw_string = utils.to_unicode(raw_string)
raw_list = raw_string.split(None)
raw_list = [" ".join(raw_list[:i+1]) for i in range(len(raw_list)) if raw_list[:i+1]]
for nick in PlayerNick.objects.filter(db_obj=self, db_type__in=("inputline","channel")):
if nick.db_nick in raw_list:
raw_string = raw_string.replace(nick.db_nick, nick.db_real, 1)
break
return cmdhandler.cmdhandler(self.typeclass, raw_string)
def search(self, ostring, return_character=False):
"""
This is similar to the ObjectDB search method but will search for Players only. Errors
will be echoed, and None returned if no Player is found.
return_character - will try to return the character the player controls instead of
the Player object itself. If no Character exists (since Player is
OOC), None will be returned.
"""
matches = _GA(self, "__class__").objects.player_search(ostring)
matches = _AT_SEARCH_RESULT(self, ostring, matches, global_search=True)
if matches and return_character:
try:
return _GA(matches, "character")
except:
pass
return matches
|
|
import numpy as np
import h5py
from sklearn.decomposition import PCA
import math
import Starfish
from Starfish.grid_tools import HDF5Interface, determine_chunk_log
from Starfish.covariance import Sigma, sigma, V12, V22, V12m, V22m
from Starfish import constants as C
def Phi(eigenspectra, M):
'''
Warning: for any spectra of real-world dimensions, this routine will
likely over flow memory.
:param eigenspectra:
:type eigenspectra: 2D array
:param M: number of spectra in the synthetic library
:type M: int
Calculate the matrix Phi using the kronecker products.
'''
return np.hstack([np.kron(np.eye(M), eigenspectrum[np.newaxis].T) for eigenspectrum in eigenspectra])
def get_w_hat(eigenspectra, fluxes, M):
'''
Since we will overflow memory if we actually calculate Phi, we have to
determine w_hat in a memory-efficient manner.
'''
m = len(eigenspectra)
out = np.empty((M * m,))
for i in range(m):
for j in range(M):
out[i * M + j] = eigenspectra[i].T.dot(fluxes[j])
PhiPhi = np.linalg.inv(skinny_kron(eigenspectra, M))
return PhiPhi.dot(out)
def skinny_kron(eigenspectra, M):
'''
Compute Phi.T.dot(Phi) in a memory efficient manner.
eigenspectra is a list of 1D numpy arrays.
'''
m = len(eigenspectra)
out = np.zeros((m * M, m * M))
# Compute all of the dot products pairwise, beforehand
dots = np.empty((m, m))
for i in range(m):
for j in range(m):
dots[i,j] = eigenspectra[i].T.dot(eigenspectra[j])
for i in range(M * m):
for jj in range(m):
ii = i // M
j = jj * M + (i % M)
out[i, j] = dots[ii, jj]
return out
def Gprior(x, s, r):
return r**s * x**(s - 1) * np.exp(- x * r) / math.gamma(s)
def Glnprior(x, s, r):
return s * np.log(r) + (s - 1.) * np.log(x) - r*x - math.lgamma(s)
class PCAGrid:
'''
Create and query eigenspectra.
'''
def __init__(self, wl, dv, flux_mean, flux_std, eigenspectra, w, w_hat, gparams):
'''
:param wl: wavelength array
:type wl: 1D np.array
:param dv: delta velocity
:type dv: float
:param flux_mean: mean flux spectrum
:type flux_mean: 1D np.array
:param flux_std: standard deviation flux spectrum
:type flux_std: 1D np.array
:param eigenspectra: the principal component eigenspectra
:type eigenspectra: 2D np.array
:param w: weights to reproduce any spectrum in the original grid
:type w: 2D np.array (m, M)
:param gparams: The stellar parameters of the synthetic library
:type gparams: 2D array of parameters (nspec, nparam)
'''
self.wl = wl
self.dv = dv
self.flux_mean = flux_mean
self.flux_std = flux_std
self.eigenspectra = eigenspectra
self.m = len(self.eigenspectra)
self.w = w
self.w_hat = w_hat
self.gparams = gparams
self.npix = len(self.wl)
self.M = self.w.shape[1] # The number of spectra in the synthetic grid
@classmethod
def create(cls, interface):
'''
Create a PCA grid object from a synthetic spectral library, with
configuration options specified in a dictionary.
:param interface: HDF5Interface containing the instrument-processed spectra.
:type interface: HDF5Interface
:param ncomp: number of eigenspectra to keep
:type ncomp: int
'''
wl = interface.wl
dv = interface.dv
npix = len(wl)
# number of spectra in the synthetic library
M = len(interface.grid_points)
fluxes = np.empty((M, npix))
z = 0
for i, spec in enumerate(interface.fluxes):
fluxes[z,:] = spec
z += 1
# Normalize all of the fluxes to an average value of 1
# In order to remove uninteresting correlations
fluxes = fluxes/np.average(fluxes, axis=1)[np.newaxis].T
# Subtract the mean from all of the fluxes.
flux_mean = np.average(fluxes, axis=0)
fluxes -= flux_mean
# "Whiten" each spectrum such that the variance for each wavelength is 1
flux_std = np.std(fluxes, axis=0)
fluxes /= flux_std
# Use the scikit-learn PCA module
# Automatically select enough components to explain > threshold (say
# 0.99, or 99%) of the variance.
pca = PCA(n_components=Starfish.PCA["threshold"])
pca.fit(fluxes)
comp = pca.transform(fluxes)
components = pca.components_
mean = pca.mean_
variance_ratio = pca.explained_variance_ratio_
ncomp = len(components)
print("found {} components explaining {}% of the" \
" variance".format(ncomp, 100* Starfish.PCA["threshold"]))
print("Shape of PCA components {}".format(components.shape))
if not np.allclose(mean, np.zeros_like(mean)):
import sys
sys.exit("PCA mean is more than just numerical noise. Something's wrong!")
# Otherwise, the PCA mean is just numerical noise that we can ignore.
#print("Keeping only the first {} components".format(ncomp))
#eigenspectra = components[0:ncomp]
eigenspectra = components[:]
gparams = interface.grid_points
# Create w, the weights corresponding to the synthetic grid
w = np.empty((ncomp, M))
for i,pcomp in enumerate(eigenspectra):
for j,spec in enumerate(fluxes):
w[i,j] = np.sum(pcomp * spec)
# Calculate w_hat, Eqn 20 Habib
w_hat = get_w_hat(eigenspectra, fluxes, M)
return cls(wl, dv, flux_mean, flux_std, eigenspectra, w, w_hat, gparams)
def write(self, filename=Starfish.PCA["path"]):
'''
Write the PCA decomposition to an HDF5 file.
:param filename: name of the HDF5 to create.
:type filename: str
'''
hdf5 = h5py.File(filename, "w")
hdf5.attrs["dv"] = self.dv
# Store the eigenspectra plus the wavelength, mean, and std arrays.
pdset = hdf5.create_dataset("eigenspectra", (self.m + 3, self.npix),
compression='gzip', dtype="f8", compression_opts=9)
pdset[0,:] = self.wl
pdset[1,:] = self.flux_mean
pdset[2,:] = self.flux_std
pdset[3:, :] = self.eigenspectra
wdset = hdf5.create_dataset("w", (self.m, self.M), compression='gzip',
dtype="f8", compression_opts=9)
wdset[:] = self.w
w_hatdset = hdf5.create_dataset("w_hat", (self.m * self.M,), compression='gzip', dtype="f8", compression_opts=9)
w_hatdset[:] = self.w_hat
gdset = hdf5.create_dataset("gparams", (self.M, len(Starfish.parname)), compression='gzip', dtype="f8", compression_opts=9)
gdset[:] = self.gparams
hdf5.close()
@classmethod
def open(cls, filename=Starfish.PCA["path"]):
'''
Initialize an object using the PCA already stored to an HDF5 file.
:param filename: filename of an HDF5 object containing the PCA components.
:type filename: str
'''
hdf5 = h5py.File(filename, "r")
pdset = hdf5["eigenspectra"]
dv = hdf5.attrs["dv"]
wl = pdset[0,:]
flux_mean = pdset[1,:]
flux_std = pdset[2,:]
eigenspectra = pdset[3:,:]
wdset = hdf5["w"]
w = wdset[:]
w_hatdset = hdf5["w_hat"]
w_hat = w_hatdset[:]
gdset = hdf5["gparams"]
gparams = gdset[:]
pcagrid = cls(wl, dv, flux_mean, flux_std, eigenspectra, w, w_hat, gparams)
hdf5.close()
return pcagrid
def determine_chunk_log(self, wl_data, buffer=Starfish.grid["buffer"]):
'''
Possibly truncate the wl, eigenspectra, and flux_mean and flux_std in
response to some data.
:param wl_data: The spectrum dataset you want to fit.
:type wl_data: np.array
'''
# determine the indices
wl_min, wl_max = np.min(wl_data), np.max(wl_data)
wl_min -= buffer
wl_max += buffer
ind = determine_chunk_log(self.wl, wl_min, wl_max)
assert (min(self.wl[ind]) <= wl_min) and (max(self.wl[ind]) >= wl_max),\
"PCA/emulator chunking ({:.2f}, {:.2f}) didn't encapsulate " \
"full wl range ({:.2f}, {:.2f}).".format(min(self.wl[ind]),\
max(self.wl[ind]), wl_min, wl_max)
self.wl = self.wl[ind]
self.npix = len(self.wl)
self.eigenspectra = self.eigenspectra[:, ind]
self.flux_mean = self.flux_mean[ind]
self.flux_std = self.flux_std[ind]
def get_index(self, params):
'''
Given a np.array of stellar params (corresponding to a grid point),
deliver the index that corresponds to the
entry in the fluxes, list_grid_points, and weights
'''
return np.sum(np.abs(self.gparams - params), axis=1).argmin()
def get_weights(self, params):
'''
Given a np.array of parameters (corresponding to a grid point),
deliver the weights that reconstruct this spectrum out of eigenspectra.
'''
ii = self.get_index(params)
return self.w[:,ii]
def reconstruct(self, weights):
'''
Reconstruct a spectrum given some weights.
Also correct for original scaling.
'''
f = np.empty((self.m, self.npix))
for i, (pcomp, weight) in enumerate(zip(self.eigenspectra, weights)):
f[i, :] = pcomp * weight
return np.sum(f, axis=0) * self.flux_std + self.flux_mean
def reconstruct_all(self):
'''
Return a (M, npix) array with all of the spectra reconstructed.
'''
recon_fluxes = np.empty((self.M, self.npix))
for i in range(self.M):
f = np.empty((self.m, self.npix))
for j, (pcomp, weight) in enumerate(zip(self.eigenspectra, self.w[:,i])):
f[j, :] = pcomp * weight
recon_fluxes[i, :] = np.sum(f, axis=0) * self.flux_std + self.flux_mean
return recon_fluxes
class Emulator:
def __init__(self, pca, eparams):
'''
Provide the emulation products.
:param pca: object storing the principal components, the eigenpsectra
:type pca: PCAGrid
:param eparams: Optimized GP hyperparameters.
:type eparams: 1D np.array
'''
self.pca = pca
self.lambda_xi = eparams[0]
self.h2params = eparams[1:].reshape(self.pca.m, -1)**2
#Determine the minimum and maximum bounds of the grid
self.min_params = np.min(self.pca.gparams, axis=0)
self.max_params = np.max(self.pca.gparams, axis=0)
#self.eigenspectra = self.PCAGrid.eigenspectra
self.dv = self.pca.dv
self.wl = self.pca.wl
self.iPhiPhi = (1./self.lambda_xi) * np.linalg.inv(skinny_kron(self.pca.eigenspectra, self.pca.M))
self.V11 = self.iPhiPhi + Sigma(self.pca.gparams, self.h2params)
self._params = None # Where we want to interpolate
self.V12 = None
self.V22 = None
self.mu = None
self.sig = None
@classmethod
def open(cls, filename=Starfish.PCA["path"]):
'''
Create an Emulator object from an HDF5 file.
'''
#Create the PCAGrid from this filename
pcagrid = PCAGrid.open(filename)
hdf5 = h5py.File(filename, "r")
eparams = hdf5["eparams"][:]
hdf5.close()
return cls(pcagrid, eparams)
def determine_chunk_log(self, wl_data):
'''
Possibly truncate the wl grid in response to some data. Also truncate eigenspectra, and flux_mean and flux_std.
'''
self.pca.determine_chunk_log(wl_data)
self.wl = self.pca.wl
@property
def params(self):
return self._params
@params.setter
def params(self, pars):
# If the pars is outside of the range of emulator values, raise a ModelError
if np.any(pars < self.min_params) or np.any(pars > self.max_params):
raise C.ModelError("Querying emulator outside of original PCA parameter range.")
# Assumes pars is a single parameter combination, as a 1D np.array
self._params = pars
# Do this according to R&W eqn 2.18, 2.19
# Recalculate V12, V21, and V22.
self.V12 = V12(self._params, self.pca.gparams, self.h2params, self.pca.m)
self.V22 = V22(self._params, self.h2params, self.pca.m)
# Recalculate the covariance
self.mu = self.V12.T.dot(np.linalg.solve(self.V11, self.pca.w_hat))
self.mu.shape = (-1)
self.sig = self.V22 - self.V12.T.dot(np.linalg.solve(self.V11, self.V12))
@property
def matrix(self):
return (self.mu, self.sig)
def draw_many_weights(self, params):
'''
:param params: multiple parameters to produce weight draws at.
:type params: 2D np.array
'''
# Local variables, different from instance attributes
v12 = V12m(params, self.pca.gparams, self.h2params, self.pca.m)
v22 = V22m(params, self.h2params, self.pca.m)
mu = v12.T.dot(np.linalg.solve(self.V11, self.pca.w_hat))
sig = v22 - v12.T.dot(np.linalg.solve(self.V11, v12))
weights = np.random.multivariate_normal(mu, sig)
# Reshape these weights into a 2D matrix
weights.shape = (len(params), self.pca.m)
return weights
def draw_weights(self):
'''
Using the current settings, draw a sample of PCA weights
'''
if self.V12 is None:
print("No parameters are set, yet. Must set parameters first.")
return
return np.random.multivariate_normal(self.mu, self.sig)
def reconstruct(self):
'''
Reconstructing a spectrum using a random draw of weights. In this case,
we are making the assumption that we are always drawing a weight at a
single stellar value.
'''
weights = self.draw_weights()
return self.pca.reconstruct(weights)
def main():
pass
if __name__=="__main__":
main()
|
|
import unittest
import os
from sqlalchemy.orm.collections import InstrumentedList
from app import app, db
from app.models.shopcart import Shopcart
from app.models.product import Product
from app.models.dataerror import DataValidationError
DATABASE_URI = os.getenv('DATABASE_URI', 'mysql+pymysql://root:@localhost:3306/shopcarts_test')
class TestShopcart(unittest.TestCase):
""" Shopcart Model Tests """
@classmethod
def setUpClass(cls):
app.debug = False
# Set up the test database
if DATABASE_URI:
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI
def setUp(self):
db.drop_all() # clean up the last tests
db.create_all() # make our sqlalchemy tables
Product.seed_db()
def tearDown(self):
# Clean up after tests
db.session.remove()
db.drop_all()
def test_it_can_be_instantiated(self):
""" Test Instantiation """
cart = Shopcart(1)
self.assertEqual(cart.user_id, 1)
self.assertEqual(cart.products, [])
def test_shopcart_representation(self):
""" Test the shopcart is printed correctly """
cart = Shopcart(1)
self.assertEqual(str(cart), "<UserId 1 - 0 Products>")
def test_it_can_be_saved(self):
""" Test Model is Saved to database """
# Check that there are no shopcarts
items = Shopcart.all()
self.assertEqual(len(items), 0)
# Save a shopcart and check it was added to memory
cart = Shopcart(1)
cart.save()
items = Shopcart.all()
self.assertEqual(len(items), 1)
# Check that the saved item is actually the one we saved
fetched = items[0]
self.assertEqual(fetched.user_id, 1)
self.assertEqual(fetched.products, [])
def test_string_is_invalid_product(self):
"""Test that strings are not accepted as products"""
# Passing a string
with self.assertRaises(DataValidationError):
Shopcart(1, 'product1')
def test_float_is_invalid_product(self):
"""Test that floats are not accepted as products"""
# Passing a double
with self.assertRaises(DataValidationError):
Shopcart(1, 2.0)
def test_set_is_invalid_product(self):
"""Test for not allowing sets in products"""
# Passing a set
with self.assertRaises(DataValidationError):
Shopcart(1, {1})
def test_invalid_dict_is_invalid_product(self):
"""Test that invalid dict are not accepted as products"""
# Passing a double
with self.assertRaises(DataValidationError):
Shopcart(1, {1: "many"})
def test_that_products_are_always_a_list(self):
"""Test that the shopcart model has products as a list"""
# Initializing just an id without product
shopcart = Shopcart(1)
self.assertEqual(type(shopcart.products), InstrumentedList)
# initializing None as products
shopcart = Shopcart(1, None)
self.assertEqual(type(shopcart.products), InstrumentedList)
# initializing empty list
shopcart = Shopcart(1, [])
self.assertEqual(type(shopcart.products), InstrumentedList)
# Passing just a product id
shopcart = Shopcart(1, 5)
self.assertEqual(type(shopcart.products), InstrumentedList)
# A dictionary of products
shopcart = Shopcart(1, [{"pid":1, "quantity":3}, {"pid":2, "quantity":8}, {"pid":3, "quantity":21}])
self.assertEqual(type(shopcart.products), InstrumentedList)
def test_initializing_with_products(self):
"""Testing initializing a cart with products """
# Create a new shopcart without a product
shopcart = Shopcart(0)
self.assertTrue(len(shopcart.products) == 0)
# Create a new shopcart with a product
shopcart = Shopcart(0, 5)
self.assertEqual(len(shopcart.products), 1)
self.assertEqual(shopcart.products[0].quantity, 1)
# Creating a valid dictionary
shopcart = Shopcart(0, [{"pid":1, "quantity":7}, {"pid":2, "quantity":21}, {"pid":3, "quantity":55}])
self.assertEqual(len(shopcart.products), 3)
self.assertEqual(shopcart.products[0].quantity, 7)
self.assertEqual(shopcart.products[1].quantity, 21)
self.assertEqual(shopcart.products[2].quantity, 55)
# Saving products
shopcart.save()
shopcarts = Shopcart.all()
self.assertEqual(len(shopcarts), 1)
#C orrect Type
s = shopcarts[0]
self.assertEqual(type(s.products), InstrumentedList)
# Correct Length
self.assertEqual(len(s.products) ,3)
def test_adding_with_a_product(self):
""" Test to add a product to an exiting shopcart"""
shopcart = Shopcart(7)
shopcart.save()
shopcarts = shopcart.all()
s = shopcarts[0]
self.assertEqual(s.user_id, 7)
self.assertEqual(len(s.products), 0)
# Adding product 1 with quant 34
s.add_product(1, 34)
#There's only one product
self.assertEqual(len(s.products), 1)
# It's the correct one with correct quant
self.assertEqual(s.products[0].quantity, 34)
# Adding a second
s.add_product(2, 55)
# #There's two products
self.assertEqual(len(s.products), 2)
# # It's the correct one with correct quant
self.assertEqual(s.products[1].quantity, 55)
def test_adding_a_product_that_already_exists(self):
""" Test to add a product that exists in a cart """
shopcart = Shopcart(7, [{"pid": 1, "quantity": 5}])
shopcart.save()
shopcart.add_product(1,5)
self.assertEqual(shopcart.products[0].quantity, 10)
def test_adding_an_invalid_product(self):
""" Test to add invalid product"""
shopcart = Shopcart(21)
shopcart.save()
shopcarts = shopcart.all()
s = shopcarts[0]
self.assertEqual(s.user_id, 21)
self.assertEqual(len(s.products), 0)
# Adding product 21.5
with self.assertRaises(DataValidationError):
s.add_product([{"pid": 21.5}])
# Adding a second error
with self.assertRaises(DataValidationError):
s.add_product([{"pid": 1, "quantity": 0.5}])
def test_get_all_shopcarts(self):
""" Test All Shopcarts Can Be Retrieved """
# Add 3 shopcarts to memory and check that we can retrieve them all
cart1 = Shopcart(1)
cart2 = Shopcart(2)
cart3 = Shopcart(3)
cart1.save()
cart2.save()
cart3.save()
# Invoke method and check the returned data
shopcarts = Shopcart.all()
self.assertEqual(len(shopcarts), 3)
self.assertEqual(shopcarts[0].user_id, 1)
self.assertEqual(shopcarts[1].user_id, 2)
self.assertEqual(shopcarts[2].user_id, 3)
def test_find_a_shopcart(self):
""" Find a shopcart by uid """
Shopcart(2).save()
Shopcart(5).save()
cart = Shopcart.find(5)
self.assertEqual(cart.user_id, 5)
self.assertEqual(len(cart.products), 0)
def test_find_shopcart_that_doesnt_exist(self):
""" Try to find a non-existant Shopcart """
Shopcart(2).save()
cart = Shopcart.find(5)
self.assertIsNone(cart)
def test_delete_a_shopcart(self):
""" Test A Shopcart Can Be Deleted """
cart = Shopcart(1, 1)
cart.save()
self.assertEqual(len(Shopcart.all()), 1)
# Delete the shopcart and make sure it isn't in the database
cart.delete()
self.assertEqual(len(Shopcart.all()), 0)
def test_delete_products_from_shopcart(self):
""" Test a product in a shopcart can be deleted """
cart = Shopcart(1, [{"pid":1, "quantity":2}, {"pid":5, "quantity":7}])
cart.save()
cart.delete_product(5)
self.assertEqual(len(cart.products), 1)
def test_shopcarts_are_pruned(self):
""" Test empty shopcarts are pruned """
Shopcart(1).save()
Shopcart(2).save()
Shopcart(3, [{"pid":5, "quantity":7}]).save()
Shopcart.prune()
self.assertEqual(len(Shopcart.all()), 1)
def test_get_shopcarts_with_a_specific_product(self):
Shopcart(1, [{"pid":1, "quantity":7}, {"pid":2, "quantity":5}]).save()
Shopcart(2, [{"pid":3, "quantity":1}]).save()
Shopcart(3, [{"pid":4, "quantity":1}, {"pid":5, "quantity":4},{"pid":1, "quantity":3}]).save()
self.assertEqual(len(Shopcart.all()), 3)
self.assertEqual(len(Shopcart.find_by_product(1)), 2)
self.assertEqual(len(Shopcart.find_by_product(5)), 1)
self.assertEqual(len(Shopcart.find_by_product(6)), 0)
def test_add_multiple_products(self):
""" Add multiple products to an existing cart """
cart = Shopcart(1, [{"pid":1, "quantity":1}])
cart.save()
cart.add_products([{"pid":1, "quantity":2}, {"pid":2, "quantity":4}])
self.assertEqual(len(cart.products), 2)
self.assertEqual(cart.products[0].quantity, 3)
self.assertEqual(cart.products[1].quantity, 4)
def test_add_products_with_invalid_type(self):
""" Try to add multiple products not as a dict """
cart = Shopcart(1)
with self.assertRaises(DataValidationError):
cart.add_products([(1, 2), (2, 4)])
def test_create_product_helper_function_with_invalid_pid(self):
""" Try to create a product association to a non-existant product """
with self.assertRaises(DataValidationError):
Shopcart.create_product(10)
def test_shopcart_serialization(self):
""" Test serializing a shopcart """
cart = Shopcart(1, [{"pid":1, "quantity":2}, {"pid":2, "quantity":1}])
cart = cart.serialize()
self.assertEqual(cart['user_id'], 1)
self.assertEqual(cart['products'][1]["name"], "Apple")
self.assertEqual(cart['products'][1]["quantity"], 2)
self.assertEqual(cart['products'][2]["name"], "Pen")
self.assertEqual(cart['products'][2]["quantity"], 1)
if __name__ == '__main__':
unittest.main()
|
|
#===----------------------------------------------------------------------===##
#
# The LLVM Compiler Infrastructure
#
# This file is dual licensed under the MIT and the University of Illinois Open
# Source Licenses. See LICENSE.TXT for details.
#
#===----------------------------------------------------------------------===##
import errno
import os
import time
import random
import lit.Test # pylint: disable=import-error
import lit.TestRunner # pylint: disable=import-error
from lit.TestRunner import ParserKind, IntegratedTestKeywordParser \
# pylint: disable=import-error
import lit.util # pylint: disable=import-error
from libcxx.test.executor import LocalExecutor as LocalExecutor
import libcxx.util
class LibcxxTestFormat(object):
"""
Custom test format handler for use with the test format use by libc++.
Tests fall into two categories:
FOO.pass.cpp - Executable test which should compile, run, and exit with
code 0.
FOO.fail.cpp - Negative test case which is expected to fail compilation.
FOO.sh.cpp - A test that uses LIT's ShTest format.
"""
def __init__(self, cxx, use_verify_for_fail, execute_external,
executor, exec_env):
self.cxx = cxx.copy()
self.use_verify_for_fail = use_verify_for_fail
self.execute_external = execute_external
self.executor = executor
self.exec_env = dict(exec_env)
@staticmethod
def _make_custom_parsers():
return [
IntegratedTestKeywordParser('FLAKY_TEST.', ParserKind.TAG,
initial_value=False),
IntegratedTestKeywordParser('MODULES_DEFINES:', ParserKind.LIST,
initial_value=[])
]
@staticmethod
def _get_parser(key, parsers):
for p in parsers:
if p.keyword == key:
return p
assert False and "parser not found"
# TODO: Move this into lit's FileBasedTest
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for filename in os.listdir(source_path):
# Ignore dot files and excluded tests.
if filename.startswith('.') or filename in localConfig.excludes:
continue
filepath = os.path.join(source_path, filename)
if not os.path.isdir(filepath):
if any([filename.endswith(ext)
for ext in localConfig.suffixes]):
yield lit.Test.Test(testSuite, path_in_suite + (filename,),
localConfig)
def execute(self, test, lit_config):
while True:
try:
return self._execute(test, lit_config)
except OSError as oe:
if oe.errno != errno.ETXTBSY:
raise
time.sleep(0.1)
def _execute(self, test, lit_config):
name = test.path_in_suite[-1]
name_root, name_ext = os.path.splitext(name)
is_libcxx_test = test.path_in_suite[0] == 'libcxx'
is_sh_test = name_root.endswith('.sh')
is_pass_test = name.endswith('.pass.cpp')
is_fail_test = name.endswith('.fail.cpp')
assert is_sh_test or name_ext == '.cpp', 'non-cpp file must be sh test'
if test.config.unsupported:
return (lit.Test.UNSUPPORTED,
"A lit.local.cfg marked this unsupported")
parsers = self._make_custom_parsers()
script = lit.TestRunner.parseIntegratedTestScript(
test, additional_parsers=parsers, require_script=is_sh_test)
# Check if a result for the test was returned. If so return that
# result.
if isinstance(script, lit.Test.Result):
return script
if lit_config.noExecute:
return lit.Test.Result(lit.Test.PASS)
# Check that we don't have run lines on tests that don't support them.
if not is_sh_test and len(script) != 0:
lit_config.fatal('Unsupported RUN line found in test %s' % name)
tmpDir, tmpBase = lit.TestRunner.getTempPaths(test)
substitutions = lit.TestRunner.getDefaultSubstitutions(test, tmpDir,
tmpBase)
script = lit.TestRunner.applySubstitutions(script, substitutions)
test_cxx = self.cxx.copy()
if is_fail_test:
test_cxx.useCCache(False)
test_cxx.useWarnings(False)
extra_modules_defines = self._get_parser('MODULES_DEFINES:',
parsers).getValue()
if '-fmodules' in test.config.available_features:
test_cxx.compile_flags += [('-D%s' % mdef.strip()) for
mdef in extra_modules_defines]
test_cxx.addWarningFlagIfSupported('-Wno-macro-redefined')
# FIXME: libc++ debug tests #define _LIBCPP_ASSERT to override it
# If we see this we need to build the test against uniquely built
# modules.
if is_libcxx_test:
with open(test.getSourcePath(), 'r') as f:
contents = f.read()
if '#define _LIBCPP_ASSERT' in contents:
test_cxx.useModules(False)
# Dispatch the test based on its suffix.
if is_sh_test:
if not isinstance(self.executor, LocalExecutor):
# We can't run ShTest tests with a executor yet.
# For now, bail on trying to run them
return lit.Test.UNSUPPORTED, 'ShTest format not yet supported'
return lit.TestRunner._runShTest(test, lit_config,
self.execute_external, script,
tmpBase)
elif is_fail_test:
return self._evaluate_fail_test(test, test_cxx, parsers)
elif is_pass_test:
return self._evaluate_pass_test(test, tmpBase, lit_config,
test_cxx, parsers)
else:
# No other test type is supported
assert False
def _clean(self, exec_path): # pylint: disable=no-self-use
libcxx.util.cleanFile(exec_path)
def _evaluate_pass_test(self, test, tmpBase, lit_config,
test_cxx, parsers):
execDir = os.path.dirname(test.getExecPath())
source_path = test.getSourcePath()
exec_path = tmpBase + '.exe'
object_path = tmpBase + '.o'
# Create the output directory if it does not already exist.
lit.util.mkdir_p(os.path.dirname(tmpBase))
try:
# Compile the test
cmd, out, err, rc = test_cxx.compileLinkTwoSteps(
source_path, out=exec_path, object_file=object_path,
cwd=execDir)
compile_cmd = cmd
if rc != 0:
report = libcxx.util.makeReport(cmd, out, err, rc)
report += "Compilation failed unexpectedly!"
return lit.Test.FAIL, report
# Run the test
local_cwd = os.path.dirname(source_path)
env = None
if self.exec_env:
env = self.exec_env
# TODO: Only list actually needed files in file_deps.
# Right now we just mark all of the .dat files in the same
# directory as dependencies, but it's likely less than that. We
# should add a `// FILE-DEP: foo.dat` to each test to track this.
data_files = [os.path.join(local_cwd, f)
for f in os.listdir(local_cwd) if f.endswith('.dat')]
is_flaky = self._get_parser('FLAKY_TEST.', parsers).getValue()
max_retry = 3 if is_flaky else 1
for retry_count in range(max_retry):
cmd, out, err, rc = self.executor.run(exec_path, [exec_path],
local_cwd, data_files,
env)
if rc == 0:
res = lit.Test.PASS if retry_count == 0 else lit.Test.FLAKYPASS
return res, ''
elif rc != 0 and retry_count + 1 == max_retry:
report = libcxx.util.makeReport(cmd, out, err, rc)
report = "Compiled With: %s\n%s" % (compile_cmd, report)
report += "Compiled test failed unexpectedly!"
return lit.Test.FAIL, report
assert False # Unreachable
finally:
# Note that cleanup of exec_file happens in `_clean()`. If you
# override this, cleanup is your reponsibility.
libcxx.util.cleanFile(object_path)
self._clean(exec_path)
def _evaluate_fail_test(self, test, test_cxx, parsers):
source_path = test.getSourcePath()
# FIXME: lift this detection into LLVM/LIT.
with open(source_path, 'r') as f:
contents = f.read()
verify_tags = ['expected-note', 'expected-remark', 'expected-warning',
'expected-error', 'expected-no-diagnostics']
use_verify = self.use_verify_for_fail and \
any([tag in contents for tag in verify_tags])
# FIXME(EricWF): GCC 5 does not evaluate static assertions that
# are dependant on a template parameter when '-fsyntax-only' is passed.
# This is fixed in GCC 6. However for now we only pass "-fsyntax-only"
# when using Clang.
if test_cxx.type != 'gcc':
test_cxx.flags += ['-fsyntax-only']
if use_verify:
test_cxx.useVerify()
cmd, out, err, rc = test_cxx.compile(source_path, out=os.devnull)
expected_rc = 0 if use_verify else 1
if rc == expected_rc:
return lit.Test.PASS, ''
else:
report = libcxx.util.makeReport(cmd, out, err, rc)
report_msg = ('Expected compilation to fail!' if not use_verify else
'Expected compilation using verify to pass!')
return lit.Test.FAIL, report + report_msg + '\n'
|
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an imdb (image database)."""
from fast_rcnn.config import cfg, get_output_dir
from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv
import argparse
from utils.timer import Timer
import numpy as np
import cv2
import caffe
from fast_rcnn.nms_wrapper import nms
import cPickle
from utils.blob import im_list_to_blob
import os
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_rois_blob(im_rois, im_scale_factors):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid
"""
rois, levels = _project_im_rois(im_rois, im_scale_factors)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (list): image pyramid levels used by each projected RoI
"""
im_rois = im_rois.astype(np.float, copy=False)
if len(scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
rois = im_rois * scales[levels]
return rois, levels
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
if not cfg.TEST.HAS_RPN:
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
return blobs, im_scale_factors
def im_detect(net, im, boxes=None):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals or None (for RPN)
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
blobs, im_scales = _get_blobs(im, boxes)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
blobs['rois'] = blobs['rois'][index, :]
boxes = boxes[index, :]
if cfg.TEST.HAS_RPN:
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
if cfg.TEST.HAS_RPN:
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
else:
net.blobs['rois'].reshape(*(blobs['rois'].shape))
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
if cfg.TEST.HAS_RPN:
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
else:
forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False)
blobs_out = net.forward(**forward_kwargs)
if cfg.TEST.HAS_RPN:
assert len(im_scales) == 1, "Only single-image batch implemented"
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scales[0]
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
return scores, pred_boxes
def vis_detections(im, class_name, dets, thresh=0.3):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.title('{} {:.3f}'.format(class_name, score))
plt.show()
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(num_classes)]
for cls_ind in xrange(num_classes):
for im_ind in xrange(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# CPU NMS is much faster than GPU NMS when the number of boxes
# is relative small (e.g., < 10k)
# TODO(rbg): autotune NMS dispatch
keep = nms(dets, thresh, force_cpu=True)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def test_net(net, imdb, max_per_image=400, thresh=-np.inf, vis=False):
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
output_dir = get_output_dir(imdb, net)
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
if not cfg.TEST.HAS_RPN:
roidb = imdb.roidb
for i in xrange(num_images):
# filter out any ground truth boxes
if cfg.TEST.HAS_RPN:
box_proposals = None
else:
# The roidb may contain ground-truth rois (for example, if the roidb
# comes from the training or val split). We only want to evaluate
# detection on the *non*-ground-truth rois. We select those the rois
# that have the gt_classes field set to 0, which means there's no
# ground truth.
box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
im = cv2.imread(imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes = im_detect(net, im, box_proposals)
_t['im_detect'].toc()
_t['misc'].tic()
# skip j = 0, because it's the background class
for j in xrange(1, imdb.num_classes):
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
if cfg.TEST.AGNOSTIC:
cls_boxes = boxes[inds, 4:8]
else:
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
if vis:
vis_detections(im, imdb.classes[j], cls_dets)
all_boxes[j][i] = cls_dets
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in xrange(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in xrange(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
_t['misc'].toc()
print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time)
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
print 'Evaluating detections'
imdb.evaluate_detections(all_boxes, output_dir)
|
|
import os
import sys
import re
import math
from orbit.utils import orbitFinalize
#===============================================================
class _possibleElementType:
"""
Class. Specifies all possible element types
"""
def __init__(self):
"""
Constructor. Creates list of element types.
"""
self.__names_type = []
self.__names_type.append("drift")
self.__names_type.append("aperture")
self.__names_type.append("sbend")
self.__names_type.append("rbend")
self.__names_type.append("quad")
self.__names_type.append("quadrupole")
self.__names_type.append("sextupole")
self.__names_type.append("octupole")
self.__names_type.append("multipole")
self.__names_type.append("solenoid")
self.__names_type.append("kicker")
self.__names_type.append("hkicker")
self.__names_type.append("vkicker")
self.__names_type.append("tkicker")
self.__names_type.append("hkick")
self.__names_type.append("vkick")
self.__names_type.append("rfcavity")
self.__names_type.append("rcollimator")
self.__names_type.append("collimator")
self.__names_type.append("marker")
self.__names_type.append("monitor")
self.__names_type.append("hmonitor")
self.__names_type.append("vmonitor")
self.__names_type.append("dipedge")
self.__names_type.append("elseparator")
def __del__(self):
"""
Method. Deletes element.
"""
del self.__names_type
def checkType(self, name_in):
"""
Method. Confirms validity of element type.
"""
name = name_in.lower()
if(self.__names_type.count(name) == 0):
msg = "Error creating lattice element:"
msg = msg + os.linesep
msg = msg + "There is no element with type:" + name
msg = msg + os.linesep
msg = msg + "Stop."
msg = msg + os.linesep
orbitFinalize(msg)
return name_in
#===============================================================
class MADX_LattElement:
"""Class. Represents an arbitrary element in the lattice"""
_typeChecker = _possibleElementType()
def __init__(self, name, Typename):
"""
Constructor. Creates element with name, type,
and parameter dictionary.
"""
self.__name = name
self.__type = self._typeChecker.checkType(Typename)
self.__par = {}
def __del__(self):
"""
Method. Deletes parameters.
"""
del self.__par
def getName(self):
"""
Method. Returns name of element
"""
return self.__name
def setType(self, tp):
"""
Method. Sets the type of element without checking.
"""
self.__type = tp
def getType(self):
"""
Method. Returns type of element
"""
return self.__type
def addParameter(self, nameOfPar, parVal):
"""
Method. Adds parameter and value to element.
"""
self.__par[nameOfPar] = parVal
def hasParameter(self, nameOfPar):
if self.__par.has_key(nameOfPar) == 0:
return 0
else:
return 1
def getParameter(self, nameOfPar):
"""
Method. Returns name of parameter.
"""
if self.__par.has_key(nameOfPar) == 0:
print "class MAD_LattElement, method getParameter"
print "The name of Element = ", self.__name
print "The type of Element = ", self.__type
print "The Element's key-val = ", self.__par
print "This Element does not have Parameter = ", nameOfPar
print "Stop."
sys.exit (0)
return self.__par[nameOfPar]
def getParameters(self):
"""
Method. Returns parameter dictionary.
"""
return self.__par
def getElements(self):
"""
Method. Returns list of elements (only one here)
"""
elements = []
elements.append(self)
return elements
#====================================================================
class _variable:
"Class. Holds MADX variables."
def __init__(self):
"Constructor. Creates empty MAD variable."
self._name = None
self._expression = ""
self._value = None
def getType():
"""
Method. Static method of this class.
Returns the name of the type.
"""
return "variable"
getType = staticmethod(getType)
def getValue(self):
"""
Method. It returns the numerical value of this variable.
"""
return self._value
def setValue(self, val):
"""
Method. It sets the numerical value of this variable.
"""
self._value = val
def setName(self, name):
self._name = name
def getName(self):
return self._name
def setExpression(self,line):
self._expression = line
def getExpression(self):
return self._expression
def parseLine(self,line_init):
"""
Method. It does the first parsing of the initial string.
"""
#divide string onto two parts: name of value and value
patt = re.compile(r'(?<=:=).*')
s_val = re.findall(patt,line_init)
if(len(s_val) > 0):
patt = re.compile(r'.*(?=:=)')
s_name = re.findall(patt,line_init)
s_val = s_val[0]
s_name = s_name[0]
else:
#deal with const defenition like AA = 1.2
patt = re.compile(r'(?<==).*')
s_val = re.findall(patt,line_init)
patt = re.compile(r'.*(?==)')
s_name = re.findall(patt,line_init)
s_val = s_val[0]
s_name = s_name[0]
if "." in s_name:
s_name = "".join(s_name.split("."))
self.setName(s_name)
self.setExpression(s_val)
#====================================================================
class StringFunctions:
"""
This class defines the set of static string functions.
"""
def replaceElementKeys(self, str_in, elem, key, value):
"""
Method. It will replace elem[key] in the string expression of this variable.
"""
new_val = r'(' + str(value) + r')'
s = elem+r'\['+key+r'\]'
patt = re.compile(s)
str_out = re.sub(patt,new_val,str_in)
return str_out
replaceElementKeys = classmethod(replaceElementKeys)
def getElementKeys(self, str_in):
"""
Method. It returns the set of [element,key] pairs for input string.
"""
res = []
patt=re.compile(r'[\w]*\[[\w]*\]')
s_name_key = re.findall(patt,str_in)
if len(s_name_key) == 0:
return res
patt_elem = re.compile(r'[\w]*(?=\[)')
patt_key = re.compile(r'(?<=\[)[\w]*(?=\])')
for s in s_name_key:
elem = re.findall(patt_elem,s)[0]
key = re.findall(patt_key,s)[0]
res.append([elem,key])
return res
getElementKeys = classmethod(getElementKeys)
def calculateString(self, str_in, localDict):
"""
Method. It returns a tuple (True,value) if
the expression can be evaluated and (False,None) otherwise.
"""
try:
val = eval(str_in,globals(),localDict)
return (True, val)
except:
return (False, None)
calculateString = classmethod(calculateString)
def replaceMath(self, str_in):
"""
Method. It replaces math symbols
to make them readable for python eval().
"""
#replace .e by .0e
str_out = re.sub("\.e",".0e",str_in)
#check the math operatons
str_out = re.sub("sin\(","math.sin(",str_out)
str_out = re.sub("cos\(","math.cos(",str_out)
str_out = re.sub("tan\(","math.tan(",str_out)
str_out = re.sub("exp\(","math.exp(",str_out)
str_out = re.sub("log\(","math.log(",str_out)
str_out = re.sub("acos\(","math.acos(",str_out)
str_out = re.sub("asin\(","math.asin(",str_out)
str_out = re.sub("atan\(","math.atan(",str_out)
str_out = re.sub("sqrt\(","math.sqrt(",str_out)
str_out = re.sub("pi","math.pi",str_out)
return str_out
replaceMath = classmethod(replaceMath)
#====================================================================
class MADX_Parser:
""" MADX parser """
def __init__(self):
""" Create instance of the MAD_Parser class """
self._madxLines = [] # lines of madx file-(s)
self._accElemDict = {}
self._varDict = {} # dict of variables
self._sequencename = ""
self._sequencelength = ""
self._sequencelist = []
#the lines to ignore will start with these words
self.__ingnoreWords = ["title","beam", "none", "initial"]
def collect_madx_lines(self,fileName,madFilePath):
#1-st stage read MAD file into the lines array
#the initialize can be recursive if there are nested MADX files
fl = open(os.path.join(madFilePath, fileName))
for str in fl:
#check if the line is a comment
if str.find("!") == 0:
str_local = ""
continue
#check if the line is a comment
if str.find("//") == 0:
str_local = ""
continue
#check the empty line
if not str:
str_local = ""
continue
# remove spaces, capital letters, words,...
tmp=str[:].lower().split()
str0="".join([x for x in tmp if x not in ["real","const"]]) # removes "const" and "real" from var definitions
str0 = "".join(str0.split()) # removes all spaces
# check if the line with ignore word
ignore_word = [word for word in self.__ingnoreWords if word in str0 and str0.lstrip(word)!=str0]
if ignore_word:
print("The line starting with {} word found. line [{}] is ignored".format(ignore_word,str0))
str_local = ""
continue
#take off the ";" at end of each line
if str0.rfind(";") > 0:
str_local = ""
for i in range(str0.rfind(";")):
str_local = "".join([str_local,str0[i]])
str_local.strip()
# deal with multi-line definitions in madx file
# is it better to preliminary collect all the lines, join and split by ";"?
else:
pass
#check if there is a nested file
if "call" in str.lower() and "file" in str.lower():
new_file = self._parseNestedFileLine(str)
self.collect_madx_lines(new_file,self.madxFilePath)
else:
self._madxLines.append(str_local)
def parse(self,MADXfileName):
self.__init__()
str_local = ""
aper_warning = 0
self.madxFilePath = os.path.dirname(MADXfileName)
fileName = os.path.basename(MADXfileName)
self.collect_madx_lines(fileName,self.madxFilePath)
for str_local in self._madxLines:
loc_flag_elem = True
#Parse element/variable":="/sequence definition
if "=" in str_local and ":" not in str_local.split("=")[0]: # here we avoid the variable parsing twice
loc_flag_elem = False # in order to avoid the parsing as an element
if "at=" not in str_local:
# we have a MADX variable here!
var = _variable()
var.parseLine(str_local)
self._varDict[var.getName()] = var
else:
# we have the defined elem with additional params at the positioning !! not a variable, but can't be parsed as an elem !!
tokens = str_local.split(",")
name = tokens[0]
elem_tmp = self._accElemDict[name]
elem = self.parseParameters(str_local,elem_tmp)
if re.search(r'[\w]* *:.*',str_local):
if(str_local.rfind("sequence") >=0):
tokens = str_local.split(":") # is it possible to have more than two tokens here?
self._sequencename = tokens[0]
tmp_str = ":".join(tokens[1:])
aux = [x.split("l=")[-1] for x in tmp_str.split(",") if "l=" in x]
var = _variable()
var.parseLine("SEQUENCE_LEN={}".format(aux[0]))
self._varDict[var.getName()] = var
localValDict =self.calculateVariables() # all variables are ready, now we can recalculate them
self._sequencelength = self._varDict["SEQUENCE_LEN"].getValue()
aux = [x.split("refer=")[-1] for x in tmp_str.split(",") if "refer=" in x]
if not aux:
elementRefer = "centre"
else:
elementRefer = aux[0]
else:
if ":=" in str_local and "at=" not in str_local:
if ":" not in str_local.split(":=")[0]:
# we have a MADX variable here!
var = _variable()
var.parseLine(str_local)
self._varDict[var.getName()] = var
loc_flag_elem = False # in order to avoid the parsing as an element
# element parsing
if loc_flag_elem:
if "at=" in str_local:
tokens = str_local.split(",")
tmp = ",".join([x for x in tokens if "at=" not in x]) # remove location from the string (is it necessary?)
else:
tmp = str_local
elem = self.parseElem(tmp) # elem can be defined directly at the location!
self._accElemDict[elem.getName()] = elem
#Add elements to the sequence list (and secondary elem params to an elem).
# if the positioning started, we can't have any variable definition (can we?)
# we can have the definition of elements at the locations
if(str_local.rfind("at=") >= 0):
if(self._sequencename==""):
print "Warning, adding elements to sequence with no name."
if "," in str_local.split(":")[0]:
tokens = str_local.split(",")
elem_name = tokens[0] # elem name 100% is the first position, otherwise the user did a mistake
tmp = tokens[1:]
aux = [x.split("at=")[-1] for x in tmp if "at=" in x]
position = eval(aux[0])
else:
tokens = str_local.split(":")
elem_name = tokens[0]
tmp_str = "".join(tokens[1:])
tmp = tmp_str.split(",")
aux = [x.split("at=")[-1] for x in tmp if "at=" in x]
position = eval(aux[0])
latt_elem = self._accElemDict[elem_name]
# he have the element, let's replace variables in parameters by numerical values here
latt_elem = self.recalculateParameters(latt_elem,localValDict)
# all monitors in PyORBIT have zero len by definition
if "monitor" in latt_elem.getType() and latt_elem.getParameter("l"):
latt_elem.addParameter("l", 0.0)
length = latt_elem.getParameter("l")
if "from" in latt_elem.getParameters().keys():
refer_elem_name = latt_elem.getParameter("from")
refer_elem = self._accElemDict[refer_elem_name]
position += refer_elem.getParameter("position")
latt_elem.addParameter("position", position)
if latt_elem.hasParameter("apertype"):
latt_aper_entry = self.makeAperture(latt_elem)
latt_aper_entry.addParameter("position", position-length/2.0)
latt_aper_exit = self.makeAperture(latt_elem)
latt_aper_exit.addParameter("position", position+length/2.0)
latt_drift = self.makeDrift(latt_elem,elementRefer)
self._sequencelist.append(latt_drift)
self._sequencelist.append(latt_aper_entry)
self._sequencelist.append(latt_elem)
self._sequencelist.append(latt_aper_exit)
aper_warning = aper_warning + 2
else:
latt_drift = self.makeDrift(latt_elem,elementRefer)
self._sequencelist.append(latt_drift)
self._sequencelist.append(latt_elem)
if(str_local.rfind("endsequence") >= 0):
#If the last element is not at the end of the lattice, make a drift
if not len(self._sequencelist):
print "Warning: Creating empty lattice."
sys.exit(1)
else:
#If the last element is not at the end of the lattice, make a drift
lattEnd = MADX_LattElement("lattEnd", "marker")
endPos = float(self._sequencelength)
lattEnd.addParameter("position", endPos)
lattEnd.addParameter("l", 0)
latt_drift = self.makeDrift(lattEnd,elementRefer)
self._sequencelist.append(latt_drift)
self._sequencelist.append(lattEnd)
endlength = float(self._sequencelength) - (float(self._sequencelist[-1].getParameter("position")) + 0.5*float(self._sequencelist[-1].getParameter("l")))
if(endlength < -1e-10):
print "Warning: Lattice parsing resulted in a lattice with length longer than specified by sequence command."
if aper_warning >= 1:
print "Warning, adding", aper_warning ,"aperture nodes to the teapot lattice. That will slow down the simluation."
print "If the lost of particles on the aperture is not necessary, please use a madx file without the aperture labels."
def calculateVariables(self):
#---------------------------------------------------------
#Now let's substitute elements parameters in variables' expression.
#---------------------------------------------------------
for name,var in self._varDict.iteritems():
val = var.getExpression()
resArr = StringFunctions.getElementKeys(val)
for [el,k] in resArr:
accElemInside = accElemDictInit[el]
replVal = accElemInside.getParameters()[k]
val = StringFunctions.replaceElementKeys(val,el,k,replVal)
var.setExpression(val)
#-----------------------------------------------
#now let's calculate all variables
#-----------------------------------------------
#replace all math cos,sin, etc by math.cos, math.sin, etc
for name,var in self._varDict.iteritems():
val = var.getExpression()
val = StringFunctions.replaceMath(val)
var.setExpression(val)
#Then let's calculate numerical values.
#They can be defined recursivelly, so we need iterations
accVarDict = {}
for name,var in self._varDict.iteritems():
accVarDict[var.getName()] = var
localValDict = {}
doNotStop = True
while(doNotStop):
doNotStop = False
accVarDictInner = accVarDict.copy()
for name,var in accVarDictInner.iteritems():
str_in = var.getExpression()
res,val = StringFunctions.calculateString(str_in.lower(),localValDict)
if(res):
localValDict[name.lower()] = val
var.setValue(val)
del accVarDict[name]
else:
doNotStop = True
if(len(accVarDictInner) == len(accVarDict) and len(accVarDict) > 0):
print "=========== Unresolved Variables============"
for name,var in accVarDictInner.iteritems():
print "name=",name," str=",var.getExpression()
print "=========== MADX File Problem ==============="
print "=================STOP======================="
sys.exit(1)
return localValDict
def recalculateParameters(self,accElem,localValDict):
#-----------------------------------------------
#replace all elem[key] substrings in element by variables
#-----------------------------------------------
name = accElem.getName()
accElemDictInit = self._accElemDict.copy()
accElemDictCp = self._accElemDict.copy()
doNotStop = True
while(doNotStop):
doNotStop = False
kvs = accElem.getParameters()
for key,val in kvs.iteritems():
if val != None:
tmp = "{}".format(val)
if not tmp.split(","):
resArr = StringFunctions.getElementKeys(tmp)
else:
resArr = []
for x in tmp.split(","):
resArr += StringFunctions.getElementKeys(x)
if(len(resArr) == 0 and accElemDictInit.has_key(name)):
del accElemDictInit[name]
for [el,k] in resArr:
doNotStop = True
accElemInside = accElemDictCp[el]
replVal = accElemInside.getParameters()[k]
val = StringFunctions.replaceElementKeys(val,el,k,replVal)
kvs[key] = val
if(len(accElemDictCp) == len(accElemDictInit)):
print "=========== Unresolved AccElements============"
for name,accElem in accElemDictCp.iteritems():
print "name=",name," params=",accElem.getParameters()
print "=========== MADX File Problem ==============="
print "=================STOP======================="
sys.exit(1)
#-------------------------------------------
# Now calculate all parameters in key,string_value
# for accelerator elements
#--------------------------------------------
# for name,accElem in self._accElemDict.iteritems():
kvs = accElem.getParameters()
for key,val in kvs.iteritems():
val_out = None
if val != None and key!="apertype" and key!="from":
tmp = ("{}".format(val)).split(",")
out = []
for aux in tmp:
auxRepl = StringFunctions.replaceMath(aux)
res,val_out = StringFunctions.calculateString(auxRepl,localValDict)
if not res:
val_out = 0.0
print "============= MADX File problem ==============",
print "Problem with acc. element:",accElem.getName()
print "Parameter name:",key,out
print "Can not calculate string:",val
print "Set variable", aux, "to zero"
print "================ CONTINUE ===================\n"
if len(tmp) == 1:
accElem.getParameters()[key] = val_out # directly
else:
out+=[val_out]
if out:
accElem.getParameters()[key] = out
return accElem
def makeDrift(self, downstreamelem,elementRefer):
# Now we have to create a drift between elements
if elementRefer == "entry":
refer = [1.0,0.0]
if elementRefer in ["centre", "center"]:
refer = [0.5,0.5]
if elementRefer == "exit":
refer = [0.0,1.0]
if elementRefer not in ("entry","centre","center","exit"):
print "=============MADX File problem ==============",
print "Problem with sequence, refer is:",elementRefer
print "Refer has to be one of :", ("entry","centre","center","exit")
print "============ STOP =========================="
sys.exit(1)
lenUp = 0.0 # upstream
lenDown = 0.0 # downstream
if not self._sequencelist:
posUp = 0.0
lenDown = downstreamelem.getParameter("l")
else:
upstreamelem = self._sequencelist[-1]
lenUp = upstreamelem.getParameter("l")
lenDown = downstreamelem.getParameter("l")
posUp = upstreamelem.getParameter("position")
posDown = downstreamelem.getParameter("position")
driftlength = abs(posDown - posUp) - refer[0]*lenUp -refer[1]*lenDown
name = "Drift{}".format(len(self._sequencelist)//2)
type_local = "drift"
if driftlength < 0:
print "Warning: Drift between {} and {} has negative length, value = {}".format(upstreamelem.getName(), downstreamelem.getName(),driftlength)
print "Setting length to zero."
lattElem = MADX_LattElement(name, type_local)
else:
lattElem = MADX_LattElement(name, type_local)
lattElem.addParameter("l", driftlength)
return lattElem
#-------------------------------------------------------------------
def parseParameters(self,line_init, lattElem):
"""
Method. It finds all parameters and applies to the existing element.
"""
length = 0.0
strength = 0.0
# search for knl, ksl, APERTURE, ... !! :={} or = {}
line_tmp = line_init[:]
if "{" in line_init and "}" in line_init:
line_init =line_init.replace("}", "!{")
line_init="".join([x if "!" not in x else "!" for x in line_init.split("{")])
line_init= ",".join([x for x in line_init.split(",") if "!" not in x])
if "aperture" in line_tmp.lower():
tmp = [s for s in line_tmp.split(",") if "apertype" in s.lower()]
if tmp:
apertype = tmp[0]
else:
apertype = 'ellipse' # default
tmp = [i for i,s in enumerate(line_tmp.split(",")) if "aperture" in s.lower()]
aper = line_tmp.split(",")[tmp[0]:tmp[0]+2]
dim="{},{}".format(aper[0].split("{")[-1], aper[1].split("}")[0]) # super straightforward, can cause some troubles, if, for example, aperture = {a,...,b}
tokens = line_init.split(",")
for token in tokens[1:]: # name+type are already parsed, aperture and knl/ksl are excluded
subtokens = token.split("=")
keyParam,valParam = subtokens[0].rstrip(":"),subtokens[1]
if "." in valParam:
# delete dots from names of variables in references to variables
valTmp = valParam[:]
for x in re.split(r'[)(*/+-]', valTmp):
if "." in x:
tmp = "".join(x.split("."))
if sum([s.isdigit() for s in tmp]) != len(tmp):
aux = valParam.split(x)
x = "".join(x.split("."))
valParam =x.join(aux)
if "." in keyParam:
# delete dots from names of params
keyParam = "".join(keyParam.split("."))
lattElem.addParameter(keyParam,valParam)
if "{" in line_tmp and "}" in line_tmp:
if "knl" in line_tmp.lower() or "ksl" in line_tmp.lower():
kls, poles, skews = self.makeMultiPols(line_tmp)
lattElem.addParameter("poles", poles)
lattElem.addParameter("skews", skews)
lattElem.addParameter("kls", kls)
if "aper" in line_tmp.lower():
apertype = apertype.split("=")
lattElem.addParameter("aperture", dim)
lattElem.addParameter("apertype", apertype[1])
#Make sure there is always a length parameter.
if not lattElem.hasParameter("l"):
lattElem.addParameter("l","0.0")
return lattElem
#-------------------------------------------------------------------
def parseElem(self,line_init):
"""
Method. It does the first parsing of the initial string.
"""
name = ""
type = ""
length = 0.0
strength = 0.0
tokens = line_init.split(",")
subtokens = tokens[0].split(":")
name = subtokens[0]
type_local = subtokens[1]
# elem can be defined as a child node
paramsDict = {}
if type_local not in self._accElemDict.keys():
type_upd = type_local
else:
type_upd = self._accElemDict[type_local].getType()
paramsDict = self._accElemDict[type_local].getParameters()
if "marker" in type_upd.lower():
type_upd = "marker"
if "collimator" in type_upd.lower():
type_upd = "drift"
if "monitor" in type_upd.lower():
type_upd = "monitor"
if "kicker" in type_upd.lower():
type_upd = "kicker"
if "elseparator" in type_upd.lower():
type_upd = "drift"
if "instrument" in type_upd.lower():
type_upd = "marker"
if "rfcavity" in type_upd.lower(): # matrix doesnt exist
type_upd = "drift"
lattElem = MADX_LattElement(name, type_upd)
for key,val in paramsDict.iteritems():
lattElem.addParameter(key,val)
if not name:
lattElem = MADX_LattElement(name, type_local)
print "Warning: Empty lattice element type."
lattElem_upd = self.parseParameters(line_init, lattElem)
return lattElem_upd
def makeMultiPols(self,line_init):
kls = []
poles = []
skews = []
line_init =line_init.replace("}", "!{")
line_init="".join([x if "!" not in x else "!".join(x.split(",")) for x in line_init.split("{")])
tokens = [",".join(x.rstrip("!").split("!")) for x in line_init.split(",") if "!" in x and "aper" not in x]
for token in tokens:
subtokens = token.split("=")
name = subtokens[0].rstrip(":")
kls = subtokens[1].split(',')
if len(kls)==1:
kls+=['0.0']
if name == "knl":
skews = ["0" for x in kls]
if name == "ksl":
skews = ["1" for x in kls]
poles = ["{}".format(i) for i,x in enumerate(kls)]
#return kls, poles, skews
return ",".join(kls), ",".join(poles), ",".join(skews)
def makeAperture(self, downstreamelem):
# Now we have to create an aperture before and after the element with the MADX label aperture
type_local = "apertype"
name = "aperture"
lattElem = MADX_LattElement("Aperture", name)
lattElem.addParameter("l", 0.0)
dim = downstreamelem.getParameter(name)
shape_type = downstreamelem.getParameter(type_local)
if shape_type == "circle":
shape = 1
elif shape_type == "ellipse":
shape = 2
elif shape_type == "rectangle":
shape = 3
else:
print "======== Can not create elementwith type:",shape_type
print "You have to fix the _teapotFactory, aperture class and madx_parser."
print "Stop."
sys.exit(1)
lattElem.addParameter(name, dim)
lattElem.addParameter(type_local, shape)
return lattElem
def getSequenceName(self):
"""
Method. Returns name of the sequence
"""
return self._sequencename
def getSequenceList(self):
"""
Method. Returns list of elements in the sequence
"""
return self._sequencelist
def _parseNestedFileLine(self, line):
""" Returns the name of the nested file"""
#Define delimiter
dl="'"
if line.find(dl) < 0:
dl = "\""
ind0 = line.find(dl)
ind0 += 1
ind1 = line.rfind(dl)
str_res=""
if ind0 >= ind1 or ind0 < 0 or ind1 < 0:
print "Wrong line in the MADX file"
print "line Call file= defines wrong name of file format"
print "Should be : Call file = 'name of file'"
print "Line:",line
print "Stop."
sys.exit (0)
for i in range(ind0,ind1):
str_res = "".join([str_res,line[i]])
return str_res
#STOP parsing a MADX file if there is a start of madx commands
|
|
# -*- coding: utf-8 -*-
"""Docker Swarm Client API."""
from typing import List
import re
import os
import logging
import copy
import docker
import yaml
LOG = logging.getLogger('sip.ec.docker_swarm_client')
class DockerSwarmClient:
"""Docker Swarm Client Interface."""
def __init__(self):
"""Initialise of the class."""
# Create a docker client
self._client = docker.from_env()
# Store a flag to show whether we are on a manager node or a worker.
self._manager = self._client.info()['Swarm']['ControlAvailable']
# Docker low-level API
self._api_client = docker.APIClient()
###########################################################################
# Properties / attributes
###########################################################################
@property
def services(self) -> List[str]:
"""Get list of docker services.
Returns:
list, list of service ids
"""
return self.get_service_list()
@property
def containers(self)-> List[str]:
"""Get list of docker containers.
Returns:
list, list of container ids
"""
return self.get_container_list()
@property
def volumes(self)-> List[str]:
"""Get list of docker volumes.
Returns:
list, list of volume names
"""
return self.get_volume_list()
@property
def nodes(self)-> List[str]:
"""Get list of docker nodes.
Returns:
list, list of node ids
"""
return self.get_node_list()
@property
def delete_services(self):
"""Delete all services."""
self.delete_all_services()
@property
def delete_volumes(self):
"""Delete all volumes."""
self.delete_all_volumes()
###########################################################################
# Create functions
###########################################################################
def create_services(self, compose_str: str) -> list:
"""Create new docker services.
Args:
compose_str (string): Docker compose 'file' string
Return:
service_names, list
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Services can only be run on '
'swarm manager nodes')
# Initialise empty list
services_ids = []
try:
service_config = yaml.load(compose_str)
# Deepcopy the service config
service_list = copy.deepcopy(service_config)
# Removing version and service from the dict
service_config.pop('version')
service_config.pop('services')
for service_name in service_list['services']:
service_exist = self._client.services.list(
filters={'name': service_name})
if not service_exist:
service_config['name'] = service_name
service_spec = self._parse_services(
service_config, service_name, service_list)
created_service = self._client.services.create(
**service_spec)
service_id = created_service.short_id
LOG.debug('Service created: %s', service_id)
services_ids.append(service_id)
else:
LOG.debug('Services already exists')
except yaml.YAMLError as exc:
print(exc)
# Returning list of services created
return services_ids
def create_volume(self, volume_name: str, driver_spec: str = None):
"""Create new docker volumes.
Only the manager nodes can create a volume
Args:
volume_name (string): Name for the new docker volume
driver_spec (string): Driver for the docker volume
"""
# Default values
if driver_spec:
driver = driver_spec
else:
driver = 'local'
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Services can only be deleted '
'on swarm manager nodes')
self._client.volumes.create(name=volume_name, driver=driver)
###########################################################################
# Delete functions
###########################################################################
def delete_service(self, service: str):
"""Removes/stops a docker service.
Only the manager nodes can delete a service
Args:
service (string): Service name or ID
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Services can only be deleted '
'on swarm manager nodes')
# Remove service
self._api_client.remove_service(service)
def delete_all_services(self):
"""Removes/stops a service.
Only the manager nodes can delete a service
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Services can only be deleted '
'on swarm manager nodes')
service_list = self.get_service_list()
for services in service_list:
# Remove all the services
self._api_client.remove_service(services)
def delete_volume(self, volume_name: str):
"""Removes/stops a docker volume.
Only the manager nodes can delete a volume
Args:
volume_name (string): Name of the volume
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Volumes can only be deleted '
'on swarm manager nodes')
# Remove volume
self._api_client.remove_volume(volume_name)
def delete_all_volumes(self):
"""Remove all the volumes.
Only the manager nodes can delete a volume
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Volumes can only be deleted '
'on swarm manager nodes')
volume_list = self.get_volume_list()
for volumes in volume_list:
# Remove all the services
self._api_client.remove_volume(volumes, force=True)
###########################################################################
# Get functions
###########################################################################
def get_service_list(self) -> list:
"""Get a list of docker services.
Only the manager nodes can retrieve all the services
Returns:
list, all the ids of the services in swarm
"""
# Initialising empty list
services = []
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can retrieve'
' all the services.')
service_list = self._client.services.list()
for s_list in service_list:
services.append(s_list.short_id)
return services
def get_service_name(self, service_id: str) -> str:
"""Get the name of the docker service.
Only the manager nodes can retrieve service name
Args:
service_id (string): List of service ID
Returns:
string, name of the docker service
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can retrieve all'
' the services details.')
service = self._client.services.get(service_id)
return service.name
def get_service_details(self, service_id: str) -> dict:
"""Get details of a service.
Only the manager nodes can retrieve service details
Args:
service_id (string): List of service id
Returns:
dict, details of the service
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can retrieve all'
' the services details.')
service = self._client.services.get(service_id)
return service.attrs
def get_service_state(self, service_id: str) -> str:
"""Get the state of the service.
Only the manager nodes can retrieve service state
Args:
service_id (str): Service id
Returns:
str, state of the service
"""
# Get service
service = self._client.services.get(service_id)
# Get the state of the service
for service_task in service.tasks():
service_state = service_task['DesiredState']
return service_state
def get_node_list(self) -> list:
"""Get a list of nodes.
Only the manager nodes can retrieve all the nodes
Returns:
list, all the ids of the nodes in swarm
"""
# Initialising empty list
nodes = []
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node '
'can retrieve all the nodes.')
node_list = self._client.nodes.list()
for n_list in node_list:
nodes.append(n_list.id)
return nodes
def get_node_details(self, node_id: list) -> dict:
"""Get details of a node.
Only the manager nodes can retrieve details of a node
Args:
node_id (list): List of node ID
Returns:
dict, details of the node
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can '
'retrieve node details.')
node = self._client.nodes.get(node_id)
return node.attrs
def get_container_list(self) -> list:
"""Get list of containers.
Returns:
list, all the ids of containers
"""
# Initialising empty list
containers = []
containers_list = self._client.containers.list()
for c_list in containers_list:
containers.append(c_list.short_id)
return containers
def get_container_details(self, container_id_or_name: str) -> dict:
"""Get details of a container.
Args:
container_id_or_name (string): docker container id or name
Returns:
dict, details of the container
"""
container = self._client.containers.get(container_id_or_name)
return container.attrs
def get_volume_list(self) -> list:
"""Get a list of docker volumes.
Only the manager nodes can retrieve all the volumes
Returns:
list, all the names of the volumes in swarm
"""
# Initialising empty list
volumes = []
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can retrieve'
' all the services.')
volume_list = self._client.volumes.list()
for v_list in volume_list:
volumes.append(v_list.name)
return volumes
def get_volume_details(self, volume_name: str) -> dict:
"""Get details of the volume.
Args:
volume_name (str): Name of the volume
Returns:
dict, details of the volume
"""
if volume_name not in self.volumes:
raise RuntimeError('No such volume found: ', volume_name)
volume = self._client.volumes.get(volume_name)
return volume.attrs
def get_actual_replica(self, service_id: str) -> str:
"""Get the actual replica level of a service.
Args:
service_id (str): docker swarm service id
Returns:
str, replicated level of the service
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can retrieve '
'replication level of the service')
service_details = self.get_service_details(service_id)
actual_replica = service_details["Spec"]["Mode"][
"Replicated"]["Replicas"]
return actual_replica
def get_replicas(self, service_id: str) -> str:
"""Get the replication level of a service.
Args:
service_id (str): docker swarm service id
Returns:
str, replication level of the service
"""
# Initialising empty list
replicas = []
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can retrieve '
'replication level of the service')
service_tasks = self._client.services.get(service_id).tasks()
for task in service_tasks:
if task['Status']['State'] == "running":
replicas.append(task)
return len(replicas)
###########################################################################
# Update functions
###########################################################################
def update_labels(self, node_name: str, labels: dict):
"""Update label of a node.
Args:
node_name (string): Name of the node.
labels (dict): Label to add to the node
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can update '
'node details.')
# Node specification
node_spec = {'Availability': 'active',
'Name': node_name,
'Role': 'manager',
'Labels': labels}
node = self._client.nodes.get(node_name)
node.update(node_spec)
###########################################################################
# Parsing functions
###########################################################################
def _parse_services(self, service_config: dict, service_name: str,
service_list: dict) -> dict:
"""Parse the docker compose file.
Args:
service_config (dict): Service configurations from the compose file
service_name (string): Name of the services
service_list (dict): Service configuration list
Returns:
dict, service specifications extracted from the compose file
"""
for key, value in service_list['services'][service_name].items():
service_config[key] = value
if 'command' in key:
key = "args"
service_config['args'] = value
service_config.pop('command')
if 'ports' in key:
endpoint_spec = self._parse_ports(value)
service_config['endpoint_spec'] = endpoint_spec
service_config.pop('ports')
if 'volumes' in key:
volume_spec = self._parse_volumes(value)
service_config['mounts'] = volume_spec
service_config.pop('volumes')
if 'deploy' in key:
self._parse_deploy(value, service_config)
service_config.pop('deploy')
if 'networks' in key:
network_spec = self._parse_networks(service_list)
service_config['networks'] = network_spec
if 'logging' in key:
self._parse_logging(value, service_config)
service_config.pop('logging')
if 'environment' in key:
service_config['env'] = value
service_config.pop('environment')
# LOG.info('Service Config: %s', service_config)
return service_config
def _parse_deploy(self, deploy_values: dict, service_config: dict):
"""Parse deploy key.
Args:
deploy_values (dict): deploy configuration values
service_config (dict): Service configuration
"""
# Initialising empty dictionary
mode = {}
for d_value in deploy_values:
if 'restart_policy' in d_value:
restart_spec = docker.types.RestartPolicy(
**deploy_values[d_value])
service_config['restart_policy'] = restart_spec
if 'placement' in d_value:
for constraints_key, constraints_value in \
deploy_values[d_value].items():
service_config[constraints_key] = constraints_value
if 'mode' in d_value:
mode[d_value] = deploy_values[d_value]
if 'replicas' in d_value:
mode[d_value] = deploy_values[d_value]
if 'resources' in d_value:
resource_spec = self._parse_resources(
deploy_values, d_value)
service_config['resources'] = resource_spec
# Setting the types
mode_spec = docker.types.ServiceMode(**mode)
service_config['mode'] = mode_spec
###########################################################################
# Static methods
###########################################################################
@staticmethod
def _parse_ports(port_values: dict) -> dict:
"""Parse ports key.
Args:
port_values (dict): ports configuration values
Returns:
dict, Ports specification which contains exposed ports
"""
# Initialising empty dictionary
endpoints = {}
for port_element in port_values:
target_port = port_element.split(':')
for port in target_port:
endpoints[int(port)] = int(port)
# Setting the types
endpoint_spec = docker.types.EndpointSpec(ports=endpoints)
return endpoint_spec
@staticmethod
def _parse_volumes(volume_values: dict) -> str:
"""Parse volumes key.
Args:
volume_values (dict): volume configuration values
Returns:
string, volume specification with mount source and container path
"""
for v_values in volume_values:
for v_key, v_value in v_values.items():
if v_key == 'source':
if v_value == '.':
source = os.path.dirname(
os.path.abspath(__file__))
else:
source = v_value
if v_key == 'target':
target = v_value
volume_spec = [source + ':' + target]
return volume_spec
@staticmethod
def _parse_resources(resource_values: dict, resource_name: str) -> dict:
"""Parse resources key.
Args:
resource_values (dict): resource configurations values
resource_name (string): Resource name
Returns:
dict, resources specification
"""
# Initialising empty dictionary
resources = {}
for r_values in resource_values[resource_name]:
if 'limits' in r_values:
for r_key, r_value in \
resource_values[resource_name][r_values].items():
if 'cpu' in r_key:
cpu_value = float(r_value) * 10 ** 9
cpu_key = r_key[:3] + '_limit'
resources[cpu_key] = int(cpu_value)
if 'mem' in r_key:
mem_value = re.sub('M', '', r_value)
mem_key = r_key[:3] + '_limit'
resources[mem_key] = int(mem_value) * 1048576
resources_spec = docker.types.Resources(**resources)
return resources_spec
@staticmethod
def _parse_networks(service_list: dict) -> list:
"""Parse network key.
Args:
service_list (dict): Service configurations
Returns:
list, List of networks
"""
# Initialising empty list
networks = []
for n_values in service_list['networks'].values():
for n_key, n_value in n_values.items():
if 'name' in n_key:
networks.append(n_value)
return networks
@staticmethod
def _parse_logging(log_values: dict, service_config: dict):
"""Parse log key.
Args:
log_values (dict): logging configuration values
service_config (dict): Service specification
"""
for log_key, log_value in log_values.items():
if 'driver' in log_key:
service_config['log_driver'] = log_value
if 'options' in log_key:
service_config['log_driver_options'] = log_value
|
|
import itertools
import random
import pytest
import numpy as np
import moldesign as mdt
from moldesign import units as u
from . import helpers
registered_types = {}
def typedfixture(*types, **kwargs):
"""This is a decorator that lets us associate fixtures with one or more arbitrary types.
We'll later use this type to determine what tests to run on the result"""
def fixture_wrapper(func):
for t in types:
registered_types.setdefault(t, []).append(func.__name__)
return pytest.fixture(**kwargs)(func)
return fixture_wrapper
@typedfixture('container')
def protein():
return mdt.read(helpers.get_data_path('3aid.pdb'))
@pytest.fixture
def atom(protein):
return random.choice(protein.atoms)
@typedfixture('container')
def chain(protein):
return random.choice(protein.chains)
@typedfixture('container')
def residue(protein):
return random.choice(protein.residues)
@typedfixture('container')
def atomlist(protein):
return mdt.AtomList(random.sample(protein.atoms, 10))
@typedfixture('container')
def small_molecule():
return mdt.from_smiles('c1ccccc1')
def test_self_distances_are_0(protein):
res = protein.residues[0]
assert protein.distance(protein.atoms[0]) == 0.0
assert protein.residues[0].distance(protein) == 0.0
assert protein.atoms[0].distance(protein.atoms[0]) == 0.0
assert res.distance(res) == 0.0
assert res.distance(res.chain) == 0.0
def test_distance_is_minimum_pairwise(protein):
res1 = protein.residues[0]
res2 = protein.residues[2]
assert res1.distance(res2) == _get_minimum_pairwise(res1, res2)
assert res1.distance(res2.atoms[3]) == _get_minimum_pairwise(res1,
mdt.AtomList([res2.atoms[3]]))
@pytest.mark.parametrize(['f1', 'f2'],
itertools.product(registered_types['container'],
registered_types['container']))
def test_pairwise_distance_arrays(f1, f2, request):
o1 = request.getfuncargvalue(f1)
o2 = request.getfuncargvalue(f2)
array = o1.calc_distance_array(o2)
if o1.num_atoms * o2.num_atoms > 250: # stochastically test larger matrices
pairs = ((random.randrange(0, o1.num_atoms), random.randrange(0, o2.num_atoms))
for i in xrange(250))
else:
pairs = itertools.product(xrange(o1.num_atoms), xrange(o2.num_atoms))
for iatom, jatom in pairs:
assert o1.atoms[iatom].distance(o2.atoms[jatom]) == array[iatom, jatom]
@pytest.mark.parametrize('fixturename', registered_types['container'])
def test_center_of_mass_movement(fixturename, request):
obj = request.getfuncargvalue(fixturename)
origpos = obj.positions.copy()
obj.positions -= obj.center_of_mass
np.testing.assert_allclose(obj.center_of_mass,
np.zeros(3),
atol=1e-10)
obj.translate([1.0, 2.0, 3.0] * u.angstrom)
np.testing.assert_allclose(obj.center_of_mass,
[1.0, 2.0, 3.0]*u.angstrom)
obj.rotate(90 * u.degrees, [0,0,1])
np.testing.assert_allclose(obj.center_of_mass,
[1.0, 2.0, 3.0]*u.angstrom)
obj.rotate(90 * u.degrees, [0,0,1], center=[0,0,0]*u.angstrom)
np.testing.assert_allclose(obj.center_of_mass,
[-2.0, 1.0, 3.0]*u.angstrom)
obj.positions = origpos
@pytest.mark.parametrize('fixturename', registered_types['container'])
def test_container_properties(fixturename, request):
obj = request.getfuncargvalue(fixturename)
assert obj.mass == sum([atom.mass for atom in obj.atoms])
np.testing.assert_array_equal(obj.positions.defunits(),
u.array([atom.position for atom in obj.atoms]).defunits())
assert obj.num_atoms == len(obj.atoms)
@pytest.mark.parametrize('fixturename', registered_types['container'])
def test_position_links(fixturename, request):
obj = request.getfuncargvalue(fixturename)
np.testing.assert_array_equal(obj.positions[0, :],
obj.atoms[0].position)
obj.positions[0, :] *= 2.0
np.testing.assert_array_equal(obj.positions[0, :],
obj.atoms[0].position)
def _get_minimum_pairwise(group1, group2):
mindist = np.inf * u.angstrom
for a1, a2 in itertools.product(group1.atoms, group2.atoms):
distance = (a1.position-a2.position).norm()
mindist = min(distance, mindist)
return mindist
@pytest.mark.parametrize('fixturename', ['atom', 'residue', 'atomlist', 'small_molecule'])
def test_atoms_within(fixturename, request):
obj = request.getfuncargvalue(fixturename)
if fixturename == 'atom':
myatoms = {obj}
mol = obj.molecule
else:
myatoms = set(obj.atoms)
mol = obj.atoms[0].molecule
assert len(obj.atoms_within(0.0*u.angstrom)) == 0
within5 = set(obj.atoms_within(5.0*u.angstrom))
within5_self = set(obj.atoms_within(5.0*u.angstrom, include_self=True))
assert myatoms.issubset(within5_self)
assert within5.union(myatoms) == within5_self
for atom in mol.atoms:
if atom in myatoms:
assert atom not in within5
elif atom.distance(obj) <= 5.0*u.angstrom:
assert atom in within5
else:
assert atom not in within5
@pytest.mark.parametrize('fixturename', ['atom', 'residue', 'atomlist'])
def test_residues_within(fixturename, request):
obj = request.getfuncargvalue(fixturename)
if fixturename == 'atom':
mol = obj.molecule
else:
mol = obj.atoms[0].molecule
assert len(obj.residues_within(0.0*u.angstrom)) == 0
within5 = set(obj.residues_within(5.0*u.angstrom))
within5_self = set(obj.residues_within(5.0*u.angstrom, include_self=True))
if fixturename == 'residue':
assert within5.union([obj]) == within5_self
else:
assert within5 == within5_self
for residue in mol.residues:
if residue == obj:
assert residue in within5_self
assert residue not in within5
elif residue.distance(obj) <= 5.0*u.angstrom:
assert residue in within5
else:
assert residue not in within5
def test_setlike_atomlist_methods(protein):
l1 = protein.atoms[:10]
assert isinstance(l1, mdt.AtomList)
l2 = protein.atoms[5:15]
assert l1.union(l2) == protein.atoms[:15]
assert l2.union(l1) == protein.atoms[5:15] + protein.atoms[:5]
interx = l1.intersection(l2)
assert interx == protein.atoms[5:10]
assert l2.intersection(l1) == interx
assert l1 - l2 == protein.atoms[:5]
assert l2 - l1 == protein.atoms[10:15]
assert (l1 + l2).unique() == protein.atoms[:15]
|
|
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Numerical gradient checking to validate backprop code.
"""
import logging
import numpy as np
from neon.datasets.synthetic import UniformRandom
from neon.experiments.experiment import Experiment
from neon.models.mlp import MLP
from neon.util.compat import range
logger = logging.getLogger(__name__)
class GradientChecker(Experiment):
"""
In this `Experiment`, a model is trained on a fake training dataset to
validate the backprop code within the given model.
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def initialize(self, backend):
if self.initialized:
return
self.backend = backend
self.model.initialize(backend)
self.initialized = True
def transfer(self, experiment):
self.model = experiment.model
self.dataset = experiment.dataset
def save_state(self):
for ind in range(len(self.trainable_layers)):
layer = self.model.layers[self.trainable_layers[ind]]
self.weights[ind][:] = layer.weights
def load_state(self):
for ind in range(len(self.trainable_layers)):
layer = self.model.layers[self.trainable_layers[ind]]
layer.weights[:] = self.weights[ind]
def check_layer(self, layer, inputs, targets):
# Check up to this many weights.
nmax = 30
if type(layer.updates) == list:
updates = layer.updates[0].asnumpyarray().ravel()
else:
updates = layer.updates.asnumpyarray().ravel()
weights = layer.weights.asnumpyarray().ravel()
grads = np.zeros(weights.shape)
inds = np.random.choice(np.arange(weights.shape[0]),
min(weights.shape[0], nmax),
replace=False)
for ind in inds:
saved = weights[ind]
weights[ind] += self.eps
self.model.fprop(inputs)
cost1 = self.model.cost.apply_function(targets).asnumpyarray()
weights[ind] -= 2 * self.eps
self.model.fprop(inputs)
cost2 = self.model.cost.apply_function(targets).asnumpyarray()
grads[ind] = ((cost1 - cost2) / self.model.layers[-1].batch_size *
layer.learning_rule.learning_rate / (2 * self.eps))
weights[ind] = saved
grads -= updates
diff = np.linalg.norm(grads[inds]) / nmax
if diff < 0.0002:
logger.info('diff %g. layer %s OK.', diff, layer.name)
return True
logger.error('diff %g. gradient check failed on layer %s.',
diff, layer.name)
return False
def check_layerb(self, layer):
# Check up to this many weights.
nmax = 30
if type(layer.updates) == list:
updates = layer.updates[0].asnumpyarray().ravel()
else:
updates = layer.updates.asnumpyarray().ravel()
weights = layer.weights.asnumpyarray().ravel()
grads = np.zeros(weights.shape)
inds = np.random.choice(np.arange(weights.shape[0]),
min(weights.shape[0], nmax),
replace=False)
for ind in inds:
saved = weights[ind]
weights[ind] += self.eps
self.model.data_layer.reset_counter()
self.model.fprop()
cost1 = self.model.cost_layer.get_cost().asnumpyarray()
weights[ind] -= 2 * self.eps
self.model.data_layer.reset_counter()
self.model.fprop()
cost2 = self.model.cost_layer.get_cost().asnumpyarray()
grads[ind] = ((cost1 - cost2) / self.model.batch_size *
layer.learning_rule.learning_rate / (2 * self.eps))
weights[ind] = saved
grads -= updates
diff = np.linalg.norm(grads[inds]) / nmax
if diff < 0.0002:
logger.info('diff %g. layer %s OK.', diff, layer.name)
return True
logger.error('diff %g. gradient check failed on layer %s.',
diff, layer.name)
return False
def run(self):
"""
Actually carry out each of the experiment steps.
"""
if not (hasattr(self.model, 'fprop') and hasattr(self.model, 'bprop')):
logger.error('Config file not compatible.')
return
self.eps = 1e-4
self.weights = []
self.trainable_layers = []
for ind in range(len(self.model.layers)):
layer = self.model.layers[ind]
if not (hasattr(layer, 'weights') and hasattr(layer, 'updates')):
continue
self.weights.append(layer.backend.copy(layer.weights))
self.trainable_layers.append(ind)
if not hasattr(layer, 'dataset'):
if isinstance(self.model, MLP):
datashape = (self.model.data_layer.nout,
self.model.cost_layer.nin)
else:
datashape = (self.model.layers[0].nin,
self.model.layers[-1].nout)
self.dataset = UniformRandom(self.model.batch_size,
self.model.batch_size,
datashape[0], datashape[1])
self.dataset.backend = self.model.backend
self.dataset.set_distributed_batch_size(self.model)
self.dataset.load()
ds = self.dataset
if isinstance(self.model, MLP):
self.model.data_layer.dataset = ds
self.model.data_layer.use_set('train')
self.model.fprop()
self.model.bprop()
self.model.update(0)
self.save_state()
self.model.data_layer.reset_counter()
self.model.fprop()
self.model.bprop()
self.model.update(0)
self.load_state()
else:
inputs = ds.get_batch(ds.get_inputs(train=True)['train'], 0)
targets = ds.get_batch(ds.get_targets(train=True)['train'], 0)
self.model.fprop(inputs)
self.model.bprop(targets, inputs)
self.model.update(0)
self.save_state()
self.model.fprop(inputs)
self.model.bprop(targets, inputs)
self.model.update(0)
self.load_state()
for ind in self.trainable_layers[::-1]:
layer = self.model.layers[ind]
if isinstance(self.model, MLP):
result = self.check_layerb(layer)
else:
result = self.check_layer(layer, inputs, targets)
if result is False:
break
|
|
#!/usr/bin/env python
import sys
import os
import inspect
import traceback
import yaml
import pycurl
import json
import csv
import logging
from optparse import OptionParser
from email import message_from_string # For headers handling
import time
try:
from cStringIO import StringIO
except:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
# Pyresttest internals
from binding import Context
import generators
from generators import parse_generator
from parsing import flatten_dictionaries, lowercase_keys, safe_to_bool, safe_to_json
import validators
from validators import Failure
from tests import Test, DEFAULT_TIMEOUT
from benchmarks import Benchmark, AGGREGATES, METRICS, parse_benchmark
"""
Executable class, ties everything together into the framework.
Module responsibilities:
- Read & import test test_files
- Parse test configs
- Provide executor methods for sets of tests and benchmarks
- Collect and report on test/benchmark results
- Perform analysis on benchmark results
"""
LOGGING_LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
logging.basicConfig(format='%(levelname)s:%(message)s')
logger = logging.getLogger('pyresttest')
class cd:
"""Context manager for changing the current working directory"""
# http://stackoverflow.com/questions/431684/how-do-i-cd-in-python/13197763#13197763
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
if self.newPath: # Don't CD to nothingness
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
if self.newPath: # Don't CD to nothingness
os.chdir(self.savedPath)
class TestConfig:
""" Configuration for a test run """
timeout = DEFAULT_TIMEOUT # timeout of tests, in seconds
print_bodies = False # Print response bodies in all cases
print_headers = False # Print response bodies in all cases
retries = 0 # Retries on failures
test_parallel = False # Allow parallel execution of tests in a test set, for speed?
interactive = False
verbose = False
ssl_insecure = False
# Binding and creation of generators
variable_binds = None
generators = None # Map of generator name to generator function
def __str__(self):
return json.dumps(self, default=safe_to_json)
class TestSet:
""" Encapsulates a set of tests and test configuration for them """
tests = list()
benchmarks = list()
config = TestConfig()
def __init__(self):
self.config = TestConfig()
self.tests = list()
self.benchmarks = list()
def __str__(self):
return json.dumps(self, default=safe_to_json)
class BenchmarkResult:
""" Stores results from a benchmark for reporting use """
group = None
name = u'unnamed'
results = dict() # Benchmark output, map the metric to the result array for that metric
aggregates = list() # List of aggregates, as tuples of (metricname, aggregate, result)
failures = 0 # Track call count that failed
def __init__(self):
self.aggregates = list()
self.results = list()
def __str__(self):
return json.dumps(self, default=safe_to_json)
class TestResponse:
""" Encapsulates everything about a test response """
test = None # Test run
response_code = None
body = None # Response body, if tracked
passed = False
response_headers = None
failures = None
def __init__(self):
self.failures = list()
def __str__(self):
return json.dumps(self, default=safe_to_json)
def unicode_body(self):
return unicode(self.body.decode('UTF-8'))
def read_test_file(path):
""" Read test file at 'path' in YAML """
# TODO allow use of safe_load_all to handle multiple test sets in a given doc
teststruct = yaml.safe_load(os.path.expandvars(read_file(path)))
return teststruct
def parse_headers(header_string):
""" Parse a header-string into individual headers
Implementation based on: http://stackoverflow.com/a/5955949/95122
"""
# First line is request line, strip it out
if not header_string:
return dict()
request, headers = header_string.split('\r\n', 1)
if not headers:
return dict()
else:
header_msg = message_from_string(headers)
# Note: HTTP headers are *case-insensitive* per RFC 2616
return dict((k.lower(), v) for k,v in header_msg.items())
def parse_testsets(base_url, test_structure, test_files = set(), working_directory = None, vars=None):
""" Convert a Python data structure read from validated YAML to a set of structured testsets
The data structure is assumed to be a list of dictionaries, each of which describes:
- a tests (test structure)
- a simple test (just a URL, and a minimal test is created)
- or overall test configuration for this testset
- an import (load another set of tests into this one, from a separate file)
- For imports, these are recursive, and will use the parent config if none is present
Note: test_files is used to track tests that import other tests, to avoid recursive loops
This returns a list of testsets, corresponding to imported testsets and in-line multi-document sets
"""
tests_out = list()
test_config = TestConfig()
testsets = list()
benchmarks = list()
if working_directory is None:
working_directory = os.path.abspath(os.getcwd())
if vars and isinstance(vars,dict):
test_config.variable_binds = vars
# returns a testconfig and collection of tests
for node in test_structure: # Iterate through lists of test and configuration elements
if isinstance(node,dict): # Each config element is a miniature key-value dictionary
node = lowercase_keys(node)
for key in node:
if key == u'import':
importfile = node[key] # import another file
if importfile not in test_files:
logger.debug("Importing test sets: " + importfile)
test_files.add(importfile)
import_test_structure = read_test_file(importfile)
with cd(os.path.dirname(os.path.realpath(importfile))):
import_testsets = parse_testsets(base_url, import_test_structure, test_files, vars=vars)
testsets.extend(import_testsets)
elif key == u'url': # Simple test, just a GET to a URL
mytest = Test()
val = node[key]
assert isinstance(val,str) or isinstance(val,unicode)
mytest.url = base_url + val
tests_out.append(mytest)
elif key == u'test': # Complex test with additional parameters
with cd(working_directory):
child = node[key]
mytest = Test.parse_test(base_url, child)
tests_out.append(mytest)
elif key == u'benchmark':
benchmark = parse_benchmark(base_url, node[key])
benchmarks.append(benchmark)
elif key == u'config' or key == u'configuration':
test_config = parse_configuration(node[key], base_config=test_config)
testset = TestSet()
testset.tests = tests_out
testset.config = test_config
testset.benchmarks = benchmarks
testsets.append(testset)
return testsets
def parse_configuration(node, base_config=None):
""" Parse input config to configuration information """
test_config = base_config
if not test_config:
test_config = TestConfig()
node = lowercase_keys(flatten_dictionaries(node)) # Make it usable
for key, value in node.items():
if key == u'timeout':
test_config.timeout = int(value)
elif key == u'print_bodies':
test_config.print_bodies = safe_to_bool(value)
elif key == u'retries':
test_config.retries = int(value)
elif key == u'variable_binds':
if not test_config.variable_binds:
test_config.variable_binds = dict()
test_config.variable_binds.update(flatten_dictionaries(value))
elif key == u'generators':
flat = flatten_dictionaries(value)
gen_map = dict()
for generator_name, generator_config in flat.items():
gen = parse_generator(generator_config)
gen_map[str(generator_name)] = gen
test_config.generators = gen_map
return test_config
def read_file(path):
""" Read an input into a file, doing necessary conversions around relative path handling """
with open(path, "r") as f:
string = f.read()
f.close()
return string
def run_test(mytest, test_config = TestConfig(), context = None):
""" Put together test pieces: configure & run actual test, return results """
# Initialize a context if not supplied
my_context = context
if my_context is None:
my_context = Context()
mytest.update_context_before(my_context)
templated_test = mytest.realize(my_context)
curl = templated_test.configure_curl(timeout=test_config.timeout, context=my_context)
result = TestResponse()
result.test = templated_test
# reset the body, it holds values from previous runs otherwise
headers = StringIO()
body = StringIO()
curl.setopt(pycurl.WRITEDATA, body)
curl.setopt(pycurl.HEADERFUNCTION, headers.write)
if test_config.verbose:
curl.setopt(pycurl.VERBOSE,True)
if test_config.ssl_insecure:
curl.setopt(pycurl.SSL_VERIFYPEER,0)
curl.setopt(pycurl.SSL_VERIFYHOST,0)
result.passed = None
if test_config.interactive:
print("===================================")
print("%s" % mytest.name)
print("-----------------------------------")
print("REQUEST:")
print("%s %s" % (templated_test.method, templated_test.url))
print("HEADERS:")
print("%s" % (templated_test.headers))
if mytest.body is not None:
print("\n%s" % templated_test.body)
raw_input("Press ENTER when ready (%d): " % (mytest.delay))
if mytest.delay > 0:
print("Delaying for %ds" % mytest.delay)
time.sleep(mytest.delay)
try:
curl.perform() # Run the actual call
except Exception as e:
# Curl exception occurred (network error), do not pass go, do not collect $200
trace = traceback.format_exc()
result.failures.append(Failure(message="Curl Exception: {0}".format(e), details=trace, failure_type=validators.FAILURE_CURL_EXCEPTION))
result.passed = False
curl.close()
return result
# Retrieve values
result.body = body.getvalue()
body.close()
result.response_headers = headers.getvalue()
headers.close()
response_code = curl.getinfo(pycurl.RESPONSE_CODE)
result.response_code = response_code
logger.debug("Initial Test Result, based on expected response code: "+str(response_code in mytest.expected_status))
if response_code in mytest.expected_status:
result.passed = True
else:
# Invalid response code
result.passed = False
failure_message = "Invalid HTTP response code: response code {0} not in expected codes [{1}]".format(response_code, mytest.expected_status)
result.failures.append(Failure(message=failure_message, details=None, failure_type=validators.FAILURE_INVALID_RESPONSE))
# Parse HTTP headers
try:
result.response_headers = parse_headers(result.response_headers)
except Exception as e:
trace = traceback.format_exc()
result.failures.append(Failure(message="Header parsing exception: {0}".format(e), details=trace, failure_type=validators.FAILURE_TEST_EXCEPTION))
result.passed = False
curl.close()
return result
# print str(test_config.print_bodies) + ',' + str(not result.passed) + ' , ' + str(test_config.print_bodies or not result.passed)
head = result.response_headers
# execute validator on body
if result.passed is True:
body = result.body
if mytest.validators is not None and isinstance(mytest.validators, list):
logger.debug("executing this many validators: " + str(len(mytest.validators)))
failures = result.failures
for validator in mytest.validators:
validate_result = validator.validate(body=body, headers=head, context=my_context)
if not validate_result:
result.passed = False
# Proxy for checking if it is a Failure object, because of import issues with isinstance there
if hasattr(validate_result, 'details'):
failures.append(validate_result)
# TODO add printing of validation for interactive mode
else:
logger.debug("no validators found")
# Only do context updates if test was successful
mytest.update_context_after(result.body, my_context)
# Print response body if override is set to print all *OR* if test failed (to capture maybe a stack trace)
if test_config.print_bodies or not result.passed:
if test_config.interactive:
print("RESPONSE:")
print(result.body.decode("string-escape"))
if test_config.print_headers or not result.passed:
if test_config.interactive:
print "RESPONSE HEADERS:"
print result.response_headers
# TODO add string escape on body output
logger.debug(result)
curl.close()
return result
def run_benchmark(benchmark, test_config = TestConfig(), context = None):
""" Perform a benchmark, (re)using a given, configured CURL call to do so
The actual analysis of metrics is performed separately, to allow for testing
"""
# Context handling
my_context = context
if my_context is None:
my_context = Context()
warmup_runs = benchmark.warmup_runs
benchmark_runs = benchmark.benchmark_runs
message = '' # Message is name of benchmark... print it?
if (benchmark_runs <= 0):
raise Exception("Invalid number of benchmark runs, must be > 0 :" + benchmark_runs)
result = TestResponse()
# TODO create and use a curl-returning configuration function
# TODO create and use a post-benchmark cleanup function
# They should use is_dynamic/is_context_modifier to determine if they need to
# worry about context and re-reading/retemplating and only do it if needed
# - Also, they will need to be smart enough to handle extraction functions
# For performance reasons, we don't want to re-run templating/extraction if
# we do not need to, and do not want to save request bodies.
# Initialize variables to store output
output = BenchmarkResult()
output.name = benchmark.name
output.group = benchmark.group
metricnames = list(benchmark.metrics)
metricvalues = [METRICS[name] for name in metricnames] # Metric variable for curl, to avoid hash lookup for every metric name
results = [list() for x in xrange(0, len(metricnames))] # Initialize arrays to store results for each metric
curl = pycurl.Curl()
# Benchmark warm-up to allow for caching, JIT compiling, on client
logger.info('Warmup: ' + message + ' started')
for x in xrange(0, warmup_runs):
benchmark.update_context_before(my_context)
templated = benchmark.realize(my_context)
curl = templated.configure_curl(timeout=test_config.timeout, context=my_context, curl_handle=curl)
curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) # Do not store actual response body at all.
curl.perform()
logger.info('Warmup: ' + message + ' finished')
logger.info('Benchmark: ' + message + ' starting')
for x in xrange(0, benchmark_runs): # Run the actual benchmarks
# Setup benchmark
benchmark.update_context_before(my_context)
templated = benchmark.realize(my_context)
curl = templated.configure_curl(timeout=test_config.timeout, context=my_context, curl_handle=curl)
curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) # Do not store actual response body at all.
try: # Run the curl call, if it errors, then add to failure counts for benchmark
curl.perform()
except Exception:
output.failures = output.failures + 1
curl.close()
curl = pycurl.Curl()
continue # Skip metrics collection
# Get all metrics values for this run, and store to metric lists
for i in xrange(0, len(metricnames)):
results[i].append( curl.getinfo(metricvalues[i]) )
curl.close()
logger.info('Benchmark: ' + message + ' ending')
temp_results = dict()
for i in xrange(0, len(metricnames)):
temp_results[metricnames[i]] = results[i]
output.results = temp_results
return analyze_benchmark_results(output, benchmark)
def analyze_benchmark_results(benchmark_result, benchmark):
""" Take a benchmark result containing raw benchmark results, and do aggregation by
applying functions
Aggregates come out in format of metricname, aggregate_name, result """
output = BenchmarkResult()
output.name = benchmark_result.name
output.group = benchmark_result.group
output.failures = benchmark_result.failures
# Copy raw metric arrays over where necessary
raw_results = benchmark_result.results
temp = dict()
for metric in benchmark.raw_metrics:
temp[metric] = raw_results[metric]
output.results = temp
# Compute aggregates for each metric, and add tuples to aggregate results
aggregate_results = list()
for metricname, aggregate_list in benchmark.aggregated_metrics.iteritems():
numbers = raw_results[metricname]
for aggregate_name in aggregate_list:
if numbers: # Only compute aggregates if numbers exist
aggregate_function = AGGREGATES[aggregate_name]
aggregate_results.append( (metricname, aggregate_name, aggregate_function(numbers)) )
else:
aggregate_results.append( (metricname, aggregate_name, None) )
output.aggregates = aggregate_results
return output
def metrics_to_tuples(raw_metrics):
""" Converts metric dictionary of name:values_array into list of tuples
Use case: writing out benchmark to CSV, etc
Input:
{'metric':[value1,value2...], 'metric2':[value1,value2,...]...}
Output: list, with tuple header row, then list of tuples of values
[('metric','metric',...), (metric1_value1,metric2_value1, ...) ... ]
"""
if not isinstance(raw_metrics, dict):
raise TypeError("Input must be dictionary!")
metrics = sorted(raw_metrics.keys())
arrays = [raw_metrics[metric] for metric in metrics]
num_rows = len(arrays[0]) # Assume all same size or this fails
output = list()
output.append(tuple(metrics)) # Add headers
# Create list of tuples mimicking 2D array from input
for row in xrange(0, num_rows):
new_row = tuple([arrays[col][row] for col in xrange(0, len(arrays))])
output.append(new_row)
return output
def write_benchmark_json(file_out, benchmark_result, benchmark, test_config = TestConfig()):
""" Writes benchmark to file as json """
json.dump(benchmark_result, file_out, default=safe_to_json)
def write_benchmark_csv(file_out, benchmark_result, benchmark, test_config = TestConfig()):
""" Writes benchmark to file as csv """
writer = csv.writer(file_out)
writer.writerow(('Benchmark', benchmark_result.name))
writer.writerow(('Benchmark Group', benchmark_result.group))
writer.writerow(('Failures', benchmark_result.failures))
# Write result arrays
if benchmark_result.results:
writer.writerow(('Results',''))
writer.writerows(metrics_to_tuples(benchmark_result.results))
if benchmark_result.aggregates:
writer.writerow(('Aggregates',''))
writer.writerows(benchmark_result.aggregates)
# Method to call when writing benchmark file
OUTPUT_METHODS = {u'csv' : write_benchmark_csv, u'json': write_benchmark_json}
def log_failure(failure, context=None, test_config=TestConfig()):
""" Log a failure from a test """
logger.error("Test Failure, failure type: {0}, Reason: {1}".format(failure.failure_type, failure.message))
if failure.details:
logger.error("Validator/Error details:"+str(failure.details))
def run_testsets(testsets):
""" Execute a set of tests, using given TestSet list input """
group_results = dict() # results, by group
group_failure_counts = dict()
total_failures = 0
myinteractive = False
for testset in testsets:
mytests = testset.tests
myconfig = testset.config
mybenchmarks = testset.benchmarks
context = Context()
# Bind variables & add generators if pertinent
if myconfig.variable_binds:
context.bind_variables(myconfig.variable_binds)
if myconfig.generators:
for key, value in myconfig.generators.items():
context.add_generator(key, value)
# Make sure we actually have tests to execute
if not mytests and not mybenchmarks:
# no tests in this test set, probably just imports.. skip to next test set
break
myinteractive = True if myinteractive or myconfig.interactive else False
# Run tests, collecting statistics as needed
for test in mytests:
# Initialize the dictionaries to store test fail counts and results
if test.group not in group_results:
group_results[test.group] = list()
group_failure_counts[test.group] = 0
result = run_test(test, test_config = myconfig, context=context)
result.body = None # Remove the body, save some memory!
if not result.passed: # Print failure, increase failure counts for that test group
# Use result test URL to allow for templating
logger.error('Test Failed: '+test.name+" URL="+result.test.url+" Group="+test.group+" HTTP Status Code: "+str(result.response_code))
# Print test failure reasons
if result.failures:
for failure in result.failures:
log_failure(failure, context=context, test_config=myconfig)
# Increment test failure counts for that group (adding an entry if not present)
failures = group_failure_counts[test.group]
failures = failures + 1
group_failure_counts[test.group] = failures
else: # Test passed, print results
logger.info('Test Succeeded: '+test.name+" URL="+test.url+" Group="+test.group)
# Add results for this test group to the resultset
group_results[test.group].append(result)
# handle stop_on_failure flag
if not result.passed and test.stop_on_failure is not None and test.stop_on_failure:
print('STOP ON FAILURE! stopping test set execution, continuing with other test sets')
break
for benchmark in mybenchmarks: # Run benchmarks, analyze, write
if not benchmark.metrics:
logger.debug('Skipping benchmark, no metrics to collect')
continue
logger.info("Benchmark Starting: "+benchmark.name+" Group: "+benchmark.group)
benchmark_result = run_benchmark(benchmark, myconfig, context=context)
print(benchmark_result)
logger.info("Benchmark Done: "+benchmark.name+" Group: "+benchmark.group)
if benchmark.output_file: # Write file
logger.debug('Writing benchmark to file in format: '+benchmark.output_format)
write_method = OUTPUT_METHODS[benchmark.output_format]
my_file = open(benchmark.output_file, 'w') # Overwrites file
logger.debug("Benchmark writing to file: " + benchmark.output_file)
write_method(my_file, benchmark_result, benchmark, test_config = myconfig)
my_file.close()
if myinteractive:
# a break for when interactive bits are complete, before summary data
print("===================================")
# Print summary results
for group in sorted(group_results.keys()):
test_count = len(group_results[group])
failures = group_failure_counts[group]
total_failures = total_failures + failures
if (failures > 0):
print(u'Test Group '+group+u' FAILED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!')
else:
print(u'Test Group '+group+u' SUCCEEDED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!')
return total_failures
def register_extensions(modules):
""" Import the modules and register their respective extensions """
if isinstance(modules, basestring): # Catch supplying just a string arg
modules = [modules]
for ext in modules:
# Get the package prefix and final module name
segments = ext.split('.')
module = segments.pop()
package = '.'.join(segments)
module = __import__(ext, globals(), locals(), package) # Necessary to get the root module back
# Extensions are registered by applying a register function to sets of registry name/function pairs inside an object
extension_applies = {
'VALIDATORS': validators.register_validator,
'COMPARATORS': validators.register_comparator,
'VALIDATOR_TESTS': validators.register_test,
'EXTRACTORS': validators.register_extractor,
'GENERATORS': generators.register_generator
}
has_registry = False
for registry_name, register_function in extension_applies.items():
if hasattr(module, registry_name):
registry = getattr(module, registry_name)
for key, val in registry.items():
register_function(key, val)
if registry:
has_registry = True
if not has_registry:
raise ImportError("Extension to register did not contain any registries: {0}".format(ext))
# AUTOIMPORTS, these should run just before the main method, to ensure everything else is loaded
try:
import jsonschema
register_extensions('ext.validator_jsonschema')
except ImportError as ie:
logging.debug("Failed to load jsonschema validator, make sure the jsonschema module is installed if you wish to use schema validators.")
def main(args):
"""
Execute a test against the given base url.
Keys allowed for args:
url - REQUIRED - Base URL
test - REQUIRED - Test file (yaml)
print_bodies - OPTIONAL - print response body
print_headers - OPTIONAL - print response headers
log - OPTIONAL - set logging level {debug,info,warning,error,critical} (default=warning)
interactive - OPTIONAL - mode that prints info before and after test exectuion and pauses for user input for each test
absolute_urls - OPTIONAL - mode that treats URLs in tests as absolute/full URLs instead of relative URLs
"""
if 'log' in args and args['log'] is not None:
logger.setLevel(LOGGING_LEVELS.get(args['log'].lower(), logging.NOTSET))
if 'import_extensions' in args and args['import_extensions']:
extensions = args['import_extensions'].split(';')
# We need to add current folder to working path to import modules
working_folder = args['cwd']
if working_folder not in sys.path:
sys.path.insert(0, working_folder)
register_extensions(extensions)
test_file = args['test']
test_structure = read_test_file(test_file)
my_vars = None
if 'vars' in args and args['vars'] is not None:
my_vars = yaml.safe_load(args['vars'])
if my_vars and not isinstance(my_vars, dict):
raise Exception("Variables must be a dictionary!")
# Set up base URL
base_url = args['url']
if 'absolute_urls' in args and args['absolute_urls']:
base_url = ''
tests = parse_testsets(base_url, test_structure, working_directory=os.path.dirname(test_file), vars=my_vars)
# Override configs from command line if config set
for t in tests:
if 'print_bodies' in args and args['print_bodies'] is not None and bool(args['print_bodies']):
t.config.print_bodies = safe_to_bool(args['print_bodies'])
if 'print_headers' in args and args['print_headers'] is not None and bool(args['print_headers']):
t.config.print_headers = safe_to_bool(args['print_headers'])
if 'interactive' in args and args['interactive'] is not None:
t.config.interactive = safe_to_bool(args['interactive'])
if 'verbose' in args and args['verbose'] is not None:
t.config.verbose = safe_to_bool(args['verbose'])
if 'ssl_insecure' in args and args['ssl_insecure'] is not None:
t.config.ssl_insecure = safe_to_bool(args['ssl_insecure'])
# Execute all testsets
failures = run_testsets(tests)
sys.exit(failures)
def command_line_run(args_in):
""" Runs everything needed to execute from the command line, so main method is callable without arg parsing """
parser = OptionParser(usage="usage: %prog base_url test_filename.yaml [options] ")
parser.add_option(u"--print-bodies", help="Print all response bodies", action="store", type="string", dest="print_bodies")
parser.add_option(u"--print-headers", help="Print all response headers", action="store", type="string", dest="print_headers")
parser.add_option(u"--log", help="Logging level", action="store", type="string")
parser.add_option(u"--interactive", help="Interactive mode", action="store", type="string")
parser.add_option(u"--url", help="Base URL to run tests against", action="store", type="string")
parser.add_option(u"--test", help="Test file to use", action="store", type="string")
parser.add_option(u'--import_extensions', help='Extensions to import, separated by semicolons', action="store", type="string")
parser.add_option(u'--vars', help='Variables to set, as a YAML dictionary', action="store", type="string")
parser.add_option(u'--verbose', help='Put cURL into verbose mode for extra debugging power', action='store_true', default=False, dest="verbose")
parser.add_option(u'--ssl-insecure', help='Disable cURL host and peer cert verification', action='store_true', default=False, dest="ssl_insecure")
parser.add_option(u'--absolute-urls', help='Enable absolute URLs in tests instead of relative paths', action="store_true", dest="absolute_urls")
(args, unparsed_args) = parser.parse_args(args_in)
args = vars(args)
# Handle url/test as named, or, failing that, positional arguments
if not args['url'] or not args['test']:
if len(unparsed_args) == 2:
args[u'url'] = unparsed_args[0]
args[u'test'] = unparsed_args[1]
elif len(unparsed_args) == 1 and args['url']:
args['test'] = unparsed_args[0]
elif len(unparsed_args) == 1 and args['test']:
args['url'] = unparsed_args[0]
else:
parser.print_help()
parser.error("wrong number of arguments, need both url and test filename, either as 1st and 2nd parameters or via --url and --test")
args['cwd'] = os.path.realpath(os.path.abspath(os.getcwd())) # So modules can be loaded from current folder
main(args)
# Allow import into another module without executing the main method
if(__name__ == '__main__'):
command_line_run(sys.argv[1:])
|
|
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import ctypes
import os
import re
import subprocess
import sys
import tempfile
import objdump_parser
# Some constants from validator.h
VALIDATION_ERRORS_MASK = 0x05ffc000
UNSUPPORTED_INSTRUCTION = 0x04000000
BAD_JUMP_TARGET = 0x40000000
RESTRICTED_REGISTER_MASK = 0x00001f00
RESTRICTED_REGISTER_SHIFT = 8
NC_REG_RAX = 0
NC_REG_RCX = 1
NC_REG_RDX = 2
NC_REG_RBX = 3
NC_REG_RSP = 4
NC_REG_RBP = 5
NC_REG_RSI = 6
NC_REG_RDI = 7
NC_REG_R8 = 8
NC_REG_R9 = 9
NC_REG_R10 = 10
NC_REG_R11 = 11
NC_REG_R12 = 12
NC_REG_R13 = 13
NC_REG_R14 = 14
NC_REG_R15 = 15
ALL_REGISTERS = range(NC_REG_RAX, NC_REG_R15 + 1)
NC_NO_REG = 0x19
RESTRICTED_REGISTER_INITIAL_VALUE_MASK = 0x000000ff
CALL_USER_CALLBACK_ON_EACH_INSTRUCTION = 0x00000100
# Macroses from validator.h
def PACK_RESTRICTED_REGISTER_INITIAL_VALUE(register):
return register ^ NC_NO_REG
BUNDLE_SIZE = 32
REGISTER_NAMES = {
NC_REG_RAX: '%rax',
NC_REG_RCX: '%rcx',
NC_REG_RDX: '%rdx',
NC_REG_RBX: '%rbx',
NC_REG_RSP: '%rsp',
NC_REG_RBP: '%rbp',
NC_REG_RSI: '%rsi',
NC_REG_RDI: '%rdi',
NC_REG_R8: '%r8',
NC_REG_R9: '%r9',
NC_REG_R10: '%r10',
NC_REG_R11: '%r11',
NC_REG_R12: '%r12',
NC_REG_R13: '%r13',
NC_REG_R14: '%r14',
NC_REG_R15: '%r15'}
REGISTER_BY_NAME = dict(map(reversed, REGISTER_NAMES.items()))
CALLBACK_TYPE = ctypes.CFUNCTYPE(
ctypes.c_uint32, # Bool result
ctypes.POINTER(ctypes.c_uint8), # begin
ctypes.POINTER(ctypes.c_uint8), # end
ctypes.c_uint32, # validation info
ctypes.c_void_p, # callback data
)
class DisassemblerError(Exception):
pass
class Validator(object):
def __init__(self, validator_dll=None, decoder_dll=None):
"""Initialize python interface to the validator.
Should be called before any calls to ValidateChunk.
Args:
validator_dll: path to dll that provides ValidateChunkIA32 and
ValidateChynkAMD64 functions.
Returns:
None.
"""
if validator_dll is not None:
validator_dll = ctypes.cdll.LoadLibrary(validator_dll)
self.GetFullCPUIDFeatures = validator_dll.GetFullCPUIDFeatures
self.GetFullCPUIDFeatures.restype = ctypes.c_void_p
self._ValidateChunkIA32 = validator_dll.ValidateChunkIA32
self._ValidateChunkAMD64 = validator_dll.ValidateChunkAMD64
self._ValidateChunkIA32.argtypes = self._ValidateChunkAMD64.argtypes = [
ctypes.POINTER(ctypes.c_uint8), # codeblock
ctypes.c_size_t, # size
ctypes.c_uint32, # options
ctypes.c_void_p, # CPU features
CALLBACK_TYPE, # callback
ctypes.c_void_p, # callback data
]
self._ValidateChunkIA32.restype = ctypes.c_bool # Bool
self._ValidateChunkAMD64.restype = ctypes.c_bool # Bool
self._ValidateAndGetFinalRestrictedRegister = (
validator_dll.ValidateAndGetFinalRestrictedRegister)
self._ValidateAndGetFinalRestrictedRegister.argtypes = [
ctypes.POINTER(ctypes.c_uint8), # codeblock
ctypes.c_size_t, # size
ctypes.c_size_t, # actual_size
ctypes.c_uint32, # initial_restricted_register
ctypes.c_void_p, # CPU features
ctypes.POINTER(ctypes.c_uint32), # resulting_restricted_register
]
self._ValidateChunkIA32.restype = ctypes.c_bool # Bool
if decoder_dll is not None:
decoder_dll = ctypes.cdll.LoadLibrary(decoder_dll)
self.DisassembleChunk_ = decoder_dll.DisassembleChunk
self.DisassembleChunk_.argtypes = [
ctypes.POINTER(ctypes.c_uint8), # data
ctypes.c_size_t, # size
ctypes.c_int, # bitness
]
self.DisassembleChunk_.restype = ctypes.c_char_p
def ValidateChunk(
self,
data,
bitness,
callback=None,
on_each_instruction=False,
restricted_register=None):
"""Validate chunk, calling the callback if there are errors.
Validator interface must be initialized by calling Init first.
Args:
data: raw data to validate as python string.
bitness: 32 or 64.
callback: function that takes three arguments
begin_index, end_index and info (info is combination of flags; it is
explained in validator.h). It is invoked for every erroneous
instruction.
on_each_instruction: whether to invoke callback on each instruction (not
only on erroneous ones).
restricted_register: initial value for the restricted_register variable
(see validator_internals.html for the details)
Returns:
True if the chunk is valid, False if invalid.
"""
data_addr = ctypes.cast(data, ctypes.c_void_p).value
def LowLevelCallback(begin, end, info, callback_data):
if callback is not None:
begin_index = ctypes.cast(begin, ctypes.c_void_p).value - data_addr
end_index = ctypes.cast(end, ctypes.c_void_p).value - data_addr
callback(begin_index, end_index, info)
# UNSUPPORTED_INSTRUCTION indicates validator failure only for pnacl-mode.
# Since by default we are in non-pnacl-mode, the flag is simply cleared.
info &= ~UNSUPPORTED_INSTRUCTION
# See validator.h for details
if info & (VALIDATION_ERRORS_MASK | BAD_JUMP_TARGET) != 0:
return 0
else:
return 1
options = 0
if on_each_instruction:
options |= CALL_USER_CALLBACK_ON_EACH_INSTRUCTION
if restricted_register is not None:
assert restricted_register in ALL_REGISTERS
options |= PACK_RESTRICTED_REGISTER_INITIAL_VALUE(restricted_register)
data_ptr = ctypes.cast(data, ctypes.POINTER(ctypes.c_uint8))
validate_chunk_function = {
32: self._ValidateChunkIA32,
64: self._ValidateChunkAMD64}[bitness]
result = validate_chunk_function(
data_ptr,
len(data),
options,
self.GetFullCPUIDFeatures(),
CALLBACK_TYPE(LowLevelCallback),
None)
return bool(result)
def ValidateAndGetFinalRestrictedRegister(
self, data, actual_size, initial_rr):
assert initial_rr is None or initial_rr in ALL_REGISTERS
if initial_rr is None:
initial_rr = NC_NO_REG
data_ptr = ctypes.cast(data, ctypes.POINTER(ctypes.c_uint8))
p = ctypes.pointer(ctypes.c_uint32(0))
result = self._ValidateAndGetFinalRestrictedRegister(
data_ptr, len(data),
actual_size,
initial_rr,
self.GetFullCPUIDFeatures(),
p)
if result:
if p[0] == NC_NO_REG:
resulting_rr = None
else:
resulting_rr = p[0]
return True, resulting_rr
else:
return False, None
def DisassembleChunk(self, data, bitness):
data_ptr = ctypes.cast(data, ctypes.POINTER(ctypes.c_uint8))
result = self.DisassembleChunk_(data_ptr, len(data), bitness)
instructions = []
total_bytes = 0
for line in cStringIO.StringIO(result):
m = re.match(r'rejected at ([\da-f]+)', line)
if m is not None:
offset = int(m.group(1), 16)
raise DisassemblerError(offset, ' '.join('%02x' % ord(c) for c in data))
insn = objdump_parser.ParseLine(line)
insn = objdump_parser.CanonicalizeInstruction(insn)
instructions.append(insn)
total_bytes += len(insn.bytes)
return instructions
# TODO(shcherbina): Remove it.
# Currently I'm keeping it around just in case (might be helpful for
# troubleshooting RDFA decoder).
def DisassembleChunkWithObjdump(self, data, bitness):
"""Disassemble chunk assuming it consists of valid instructions.
Args:
data: raw data as python string.
bitness: 32 or 64
Returns:
List of objdump_parser.Instruction tuples. If data can't be disassembled
(either contains invalid instructions or ends in a middle of instruction)
exception is raised.
"""
# TODO(shcherbina):
# Replace this shameless plug with python interface to RDFA decoder once
# https://code.google.com/p/nativeclient/issues/detail?id=3456 is done.
arch = {32: '-Mi386', 64: '-Mx86-64'}[bitness]
tmp = tempfile.NamedTemporaryFile(mode='wb', delete=False)
try:
tmp.write(data)
tmp.close()
objdump_proc = subprocess.Popen(
['objdump',
'-mi386', arch, '--target=binary',
'--disassemble-all', '--disassemble-zeroes',
'--insn-width=15',
tmp.name],
stdout=subprocess.PIPE)
instructions = []
total_bytes = 0
for line in objdump_parser.SkipHeader(objdump_proc.stdout):
insn = objdump_parser.ParseLine(line)
insn = objdump_parser.CanonicalizeInstruction(insn)
instructions.append(insn)
total_bytes += len(insn.bytes)
assert len(data) == total_bytes
return_code = objdump_proc.wait()
assert return_code == 0, 'error running objdump'
return instructions
finally:
tmp.close()
os.remove(tmp.name)
def main():
print 'Self check'
print sys.argv
validator_dll, = sys.argv[1:]
print validator_dll
validator = Validator(validator_dll)
# 'z' is the first byte of JP instruction (which does not validate in this
# case because it crosses bundle boundary)
data = '\x90' * 31 + 'z'
for bitness in 32, 64:
errors = []
def Callback(begin_index, end_index, info):
errors.append(begin_index)
print 'callback', begin_index, end_index
result = validator.ValidateChunk(
data,
bitness=bitness,
callback=Callback)
assert not result
assert errors == [31], errors
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
'''
@package ion.services.dm.utility.granule.record_dictionary
@file ion/services/dm/utility/granule/record_dictionary.py
@author Tim Giguere
@author Luke Campbell <[email protected]>
@brief https://confluence.oceanobservatories.org/display/CIDev/Record+Dictionary
'''
from pyon.container.cc import Container
from pyon.core.exception import BadRequest, NotFound
from pyon.core.object import IonObjectSerializer
from pyon.core.interceptor.encode import encode_ion
from pyon.util.arg_check import validate_equal
from pyon.util.log import log
from pyon.util.memoize import memoize_lru
from ion.util.stored_values import StoredValueManager
from interface.services.dm.ipubsub_management_service import PubsubManagementServiceClient
from interface.objects import Granule, StreamDefinition
from coverage_model import ParameterDictionary, ConstantType, ConstantRangeType, get_value_class, SimpleDomainSet, QuantityType, Span, SparseConstantType
from coverage_model.parameter_functions import ParameterFunctionException
from coverage_model.parameter_values import AbstractParameterValue, ConstantValue
from coverage_model.parameter_types import ParameterFunctionType
from coverage_model import PythonFunction, NumexprFunction
import numpy as np
import numexpr as ne
from copy import copy
import msgpack
import time
class RecordDictionaryTool(object):
"""
A record dictionary is a key/value store which contains records for a particular dataset. The keys are specified by
a parameter dictionary which map to the fields in the dataset, the data types for the records as well as metadata
about the fields. Each field in a record dictionary must contain the same number of records. A record can consist of
a scalar value (typically a NumPy scalar) or it may contain an array or dictionary of values. The data type for each
field is determined by the parameter dictionary. The length of the first assignment dictates the allowable size for
the RDT - see the Tip below
The record dictionary can contain an instance of the parameter dictionary itself or it may contain a reference to
one by use of a stream definition. A stream definition is a resource persisted by the resource registry and contains
the parameter dictionary. When the record dictionary is constructed the client will specify either a stream
definition identifier or a parameter dictionary.
ParameterDictionaries are inherently large and congest message traffic through RabbitMQ, therefore it is preferred
to use stream definitions in lieu of parameter dictionaries directly.
"""
_rd = None
_pdict = None
_shp = None
_locator = None
_stream_def = None
_dirty_shape = False
_available_fields = None
_creation_timestamp = None
_stream_config = {}
_definition = None
connection_id = ''
connection_index = ''
def __init__(self,param_dictionary=None, stream_definition_id='', locator=None, stream_definition=None):
"""
"""
if type(param_dictionary) == dict:
self._pdict = ParameterDictionary.load(param_dictionary)
elif isinstance(param_dictionary,ParameterDictionary):
self._pdict = param_dictionary
elif stream_definition_id or stream_definition:
if stream_definition:
if not isinstance(stream_definition,StreamDefinition):
raise BadRequest('Improper StreamDefinition object')
self._definition = stream_definition
stream_def_obj = stream_definition or RecordDictionaryTool.read_stream_def(stream_definition_id)
pdict = stream_def_obj.parameter_dictionary
self._available_fields = stream_def_obj.available_fields or None
self._stream_config = stream_def_obj.stream_configuration
self._pdict = ParameterDictionary.load(pdict)
self._stream_def = stream_definition_id
else:
raise BadRequest('Unable to create record dictionary with improper ParameterDictionary')
if stream_definition_id:
self._stream_def=stream_definition_id
self._shp = None
self._rd = {}
self._locator = locator
self._setup_params()
def _pval_callback(self, name, slice_):
retval = np.atleast_1d(self[name])
return retval[slice_]
@classmethod
def get_paramval(cls, ptype, domain, values):
paramval = get_value_class(ptype, domain_set=domain)
if isinstance(ptype,ParameterFunctionType):
paramval.memoized_values = values
if isinstance(ptype,SparseConstantType):
values = np.atleast_1d(values)
spans = cls.spanify(values)
paramval.storage._storage = np.array([spans],dtype='object')
else:
paramval[:] = values
paramval.storage._storage.flags.writeable = False
return paramval
def lookup_values(self):
return [i for i in self._lookup_values() if not self.context(i).document_key]
def _lookup_values(self):
lookup_values = []
for field in self.fields:
if hasattr(self.context(field), 'lookup_value'):
lookup_values.append(field)
return lookup_values
@classmethod
def spanify(cls,arr):
spans = []
lastval = None
for i,val in enumerate(arr):
if i == 0:
span = Span(None,None,0,val)
spans.append(span)
lastval = val
continue
if np.atleast_1d(lastval == val).all():
continue
spans[-1].upper_bound = i
span = Span(i,None,-i,val)
spans.append(span)
lastval = val
return spans
def fetch_lookup_values(self):
doc_keys = []
for lv in self._lookup_values():
context = self.context(lv)
if context.document_key:
document_key = context.document_key
if '$designator' in context.document_key and 'reference_designator' in self._stream_config:
document_key = document_key.replace('$designator',self._stream_config['reference_designator'])
doc_keys.append(document_key)
lookup_docs = {}
if doc_keys:
svm = StoredValueManager(Container.instance)
doc_list = svm.read_value_mult(doc_keys)
lookup_docs = dict(zip(doc_keys, doc_list))
for lv in self._lookup_values():
context = self.context(lv)
if context.document_key:
document_key = context.document_key
if '$designator' in context.document_key and 'reference_designator' in self._stream_config:
document_key = document_key.replace('$designator',self._stream_config['reference_designator'])
doc = lookup_docs[document_key]
if doc is None:
log.debug('Reference Document for %s not found', document_key)
continue
if context.lookup_value in doc:
self[lv] = [doc[context.lookup_value]] * self._shp[0] if self._shp else doc[context.lookup_value]
@classmethod
def load_from_granule(cls, g):
if g.stream_definition_id:
instance = cls(stream_definition_id=g.stream_definition_id, locator=g.locator)
elif g.stream_definition:
instance = cls(stream_definition=g.stream_definition, locator=g.locator)
else:
instance = cls(param_dictionary=g.param_dictionary, locator=g.locator)
if g.domain:
instance._shp = (g.domain[0],)
if g.creation_timestamp:
instance._creation_timestamp = g.creation_timestamp
# Do time first
time_ord = instance.to_ordinal(instance.temporal_parameter)
if g.record_dictionary[time_ord] is not None:
instance._rd[instance.temporal_parameter] = g.record_dictionary[time_ord]
for k,v in g.record_dictionary.iteritems():
key = instance.from_ordinal(k)
if v is not None:
#ptype = instance._pdict.get_context(key).param_type
#paramval = cls.get_paramval(ptype, instance.domain, v)
instance._rd[key] = v
instance.connection_id = g.connection_id
instance.connection_index = g.connection_index
return instance
def to_granule(self, data_producer_id='',provider_metadata_update={}, connection_id='', connection_index=''):
granule = Granule()
granule.record_dictionary = {}
for key,val in self._rd.iteritems():
if val is not None:
granule.record_dictionary[self.to_ordinal(key)] = self[key]
else:
granule.record_dictionary[self.to_ordinal(key)] = None
granule.param_dictionary = {} if self._stream_def else self._pdict.dump()
if self._definition:
granule.stream_definition = self._definition
else:
granule.stream_definition = None
granule.stream_definition_id = self._stream_def
granule.locator = self._locator
granule.domain = self.domain.shape
granule.data_producer_id=data_producer_id
granule.provider_metadata_update=provider_metadata_update
granule.creation_timestamp = time.time()
granule.connection_id = connection_id
granule.connection_index = connection_index
return granule
def _setup_params(self):
for param in self._pdict.keys():
self._rd[param] = None
@property
def fields(self):
if self._available_fields is not None:
return list(set(self._available_fields).intersection(self._pdict.keys()))
return self._pdict.keys()
@property
def domain(self):
dom = SimpleDomainSet(self._shp)
return dom
@property
def temporal_parameter(self):
return self._pdict.temporal_parameter_name
def fill_value(self,name):
return self._pdict.get_context(name).fill_value
def _replace_hook(self, name,vals):
if vals is None:
return None
if not isinstance(self._pdict.get_context(name).param_type, QuantityType):
return vals
if isinstance(vals, (list,tuple)):
vals = [i if i is not None else self.fill_value(name) for i in vals]
if all([i is None for i in vals]):
return None
return vals
if isinstance(vals, np.ndarray):
np.place(vals,vals==np.array(None), self.fill_value(name))
try:
if (vals == np.array(self.fill_value(name))).all():
return None
except AttributeError:
pass
return np.asanyarray(vals, dtype=self._pdict.get_context(name).param_type.value_encoding)
return np.atleast_1d(vals)
def __setitem__(self, name, vals):
return self._set(name, self._replace_hook(name,vals))
def _set(self, name, vals):
"""
Set a parameter
"""
if name not in self.fields:
raise KeyError(name)
if vals is None:
self._rd[name] = None
return
context = self._pdict.get_context(name)
if self._shp is None and isinstance(context.param_type, (SparseConstantType, ConstantType, ConstantRangeType)):
self._shp = (1,)
self._dirty_shape = True
elif self._shp is None or self._dirty_shape:
if isinstance(vals, np.ndarray):
self._shp = (vals.shape[0],) # Only support 1-d right now
elif isinstance(vals, list):
self._shp = (len(vals),)
else:
raise BadRequest('No shape was defined')
log.trace('Set shape to %s', self._shp)
if self._dirty_shape:
self._dirty_shape = False
self._reshape_const()
else:
if isinstance(vals, np.ndarray):
if not vals.shape:
raise BadRequest('Invalid shape on input (dimensionless)')
validate_equal(vals.shape[0], self._shp[0], 'Invalid shape on input (%s expecting %s)' % (vals.shape, self._shp))
elif isinstance(vals, list):
validate_equal(len(vals), self._shp[0], 'Invalid shape on input')
#paramval = self.get_paramval(context.param_type, dom, vals)
self._rd[name] = vals
def param_type(self, name):
if name in self.fields:
return self._pdict.get_context(name).param_type
raise KeyError(name)
def context(self, name):
if name in self.fields:
return self._pdict.get_context(name)
raise KeyError(name)
def _reshape_const(self):
for k in self.fields:
if isinstance(self._rd[k], ConstantValue):
self._rd[k].domain_set = self.domain
def __getitem__(self, name):
"""
Get an item by nick name from the record dictionary.
"""
if not self._shp:
return None
if self._available_fields and name not in self._available_fields:
raise KeyError(name)
ptype = self._pdict.get_context(name).param_type
if isinstance(ptype, ParameterFunctionType):
if self._rd[name] is not None:
return np.atleast_1d(self._rd[name]) # It was already set
try:
return self._get_param_func(name)
except ParameterFunctionException:
log.debug('failed to get parameter function field: %s (%s)', name, self._pdict.keys(), exc_info=True)
if self._rd[name] is not None:
return np.atleast_1d(self._rd[name])
return None
def _get_param_func(self, name):
ptype = self._pdict.get_context(name).param_type
if isinstance(ptype.function, PythonFunction):
args = self._build_arg_map(name, ptype)
# For missing parameter inputs, return None
if args is None:
return None
if not hasattr(ptype.function,'_callable'):
ptype.function._import_func()
retval = ptype.function._callable(*args)
return retval
elif isinstance(ptype.function, NumexprFunction):
args = self._build_arg_map(name, ptype, return_dict=True)
# For missing parameter inputs, return None
if args is None:
return None
retval = ne.evaluate(ptype.function.expression, local_dict=args)
return retval
else:
raise BadRequest("%s not supported parameter function type" % type(ptype.function))
def _build_arg_map(self, name, ptype, return_dict=False):
# get the arg list
arg_list = ptype.function.arg_list
# the map
arg_map = ptype.function.param_map
# get the arrays for each
array_map = {}
for k,v in arg_map.iteritems():
if isinstance(v, basestring):
array_value = self[v]
if array_value is None:
log.warning("Missing inputs for parameter function %s", name)
return None
array_map[k] = array_value
else:
array_map[k] = v
if return_dict:
return array_map
return [array_map[i] for i in arg_list]
def iteritems(self):
""" D.iteritems() -> an iterator over the (key, value) items of D """
for k,v in self._rd.iteritems():
if self._available_fields and k not in self._available_fields:
continue
if v is not None:
yield k,v
def iterkeys(self):
""" D.iterkeys() -> an iterator over the keys of D """
for k,v in self._rd.iteritems():
if v is not None:
yield k
def itervalues(self):
""" D.itervalues() -> an iterator over the values of D """
for k,v in self._rd.iteritems():
if v is not None:
yield v
def __contains__(self, key):
""" D.__contains__(k) -> True if D has a key k, else False """
if self._available_fields:
return key in self._rd and key in self._available_fields
return key in self._rd
def __delitem__(self, y):
""" x.__delitem__(y) <==> del x[y] """
self._rd[y] = None
def __iter__(self):
""" x.__iter__() <==> iter(x) """
for k in self._rd.iterkeys():
yield k
def __len__(self):
""" x.__len__() <==> len(x) """
if self._shp is None:
return 0
else:
return self._shp[0]
def __repr__(self):
""" x.__repr__() <==> repr(x) """
return self.pretty_print()
def __str__(self):
return 'Record Dictionary %s' % self._rd.keys()
__hash__ = None
def pretty_print(self):
"""
@brief Pretty Print the record dictionary for debug or log purposes.
"""
from pprint import pformat
repr_dict = {}
for field in self.fields:
if self[field] is not None:
repr_dict[field] = self[field][:]
return pformat(repr_dict)
def size(self):
'''
Truly poor way to calculate the size of a granule...
returns the size in bytes.
'''
granule = self.to_granule()
serializer = IonObjectSerializer()
flat = serializer.serialize(granule)
byte_stream = msgpack.packb(flat, default=encode_ion)
return len(byte_stream)
def to_ordinal(self, key):
params = copy(self._rd.keys())
params.sort()
try:
return params.index(key)
except ValueError:
raise KeyError(key)
def from_ordinal(self, ordinal):
params = copy(self._rd.keys())
params.sort()
return params[ordinal]
@staticmethod
def read_stream_def(stream_def_id):
pubsub_cli = PubsubManagementServiceClient()
stream_def_obj = pubsub_cli.read_stream_definition(stream_def_id)
return stream_def_obj
|
|
# Copyright 2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file will be used with PyPi in order to package and distribute the final
# product.
"""Discovers the mongo cluster and starts the connector.
"""
import logging
import logging.handlers
import oplog_manager
import optparse
import os
import pymongo
import re
import shutil
import sys
import threading
import time
import util
import imp
from locking_dict import LockingDict
try:
from pymongo import MongoClient as Connection
except ImportError:
from pymongo import Connection
try:
import simplejson as json
except ImportError:
import json
class Connector(threading.Thread):
"""Checks the cluster for shards to tail.
"""
def __init__(self, address, oplog_checkpoint, target_url, ns_set,
u_key, auth_key, no_dump, batch_size,
doc_manager=None, auth_username=None):
if doc_manager is not None:
doc_manager = imp.load_source('DocManager', doc_manager)
else:
from doc_manager import DocManager
time.sleep(1)
super(Connector, self).__init__()
#can_run is set to false when we join the thread
self.can_run = True
#The name of the file that stores the progress of the OplogThreads
self.oplog_checkpoint = oplog_checkpoint
#main address - either mongos for sharded setups or a primary otherwise
self.address = address
#The URL of the target system
self.target_url = target_url
#The set of relevant namespaces to consider
self.ns_set = ns_set
#The key that is a unique document identifier for the target system.
#Not necessarily the mongo unique key.
self.u_key = u_key
#Password for authentication
self.auth_key = auth_key
#Username for authentication
self.auth_username = auth_username
#The set of OplogThreads created
self.shard_set = {}
#Boolean chooses whether to dump the entire collection if no timestamp
# is present in the config file
self._no_dump = no_dump
#Num entries to process before updating config file with current pos
self._batch_size = batch_size
#Dict of OplogThread/timestamp pairs to record progress
self.oplog_progress = LockingDict()
try:
if target_url is None:
if doc_manager is None: # imported using from... import
self.doc_manager = DocManager(unique_key=u_key)
else: # imported using load source
self.doc_manager = doc_manager.DocManager(unique_key=u_key)
else:
if doc_manager is None:
self.doc_manager = DocManager(self.target_url,
unique_key=u_key)
else:
self.doc_manager = doc_manager.DocManager(self.target_url,
unique_key=u_key)
except SystemError:
logging.critical("MongoConnector: Bad target system URL!")
self.can_run = False
return
if self.oplog_checkpoint is not None:
if not os.path.exists(self.oplog_checkpoint):
info_str = "MongoConnector: Can't find OplogProgress file!"
logging.critical(info_str)
self.doc_manager.stop()
self.can_run = False
else:
if (not os.access(self.oplog_checkpoint, os.W_OK)
and not os.access(self.oplog_checkpoint, os.R_OK )):
logging.critical("Invalid permissions on %s! Exiting" %
(self.oplog_checkpoint))
sys.exit(1)
def join(self):
""" Joins thread, stops it from running
"""
self.can_run = False
self.doc_manager.stop()
threading.Thread.join(self)
def write_oplog_progress(self):
""" Writes oplog progress to file provided by user
"""
if self.oplog_checkpoint is None:
return None
# write to temp file
backup_file = self.oplog_checkpoint + '.backup'
os.rename(self.oplog_checkpoint, backup_file)
# for each of the threads write to file
with open(self.oplog_checkpoint, 'w') as dest:
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
for oplog, time_stamp in oplog_dict.items():
oplog_str = str(oplog)
timestamp = util.bson_ts_to_long(time_stamp)
json_str = json.dumps([oplog_str, timestamp])
try:
dest.write(json_str)
except IOError:
# Basically wipe the file, copy from backup
dest.truncate()
with open(backup_file, 'r') as backup:
shutil.copyfile(backup, dest)
break
os.remove(self.oplog_checkpoint + '.backup')
def read_oplog_progress(self):
"""Reads oplog progress from file provided by user.
This method is only called once before any threads are spanwed.
"""
if self.oplog_checkpoint is None:
return None
# Check for empty file
try:
if os.stat(self.oplog_checkpoint).st_size == 0:
logging.info("MongoConnector: Empty oplog progress file.")
return None
except OSError:
return None
source = open(self.oplog_checkpoint, 'r')
try:
data = json.load(source)
except ValueError: # empty file
reason = "It may be empty or corrupt."
logging.info("MongoConnector: Can't read oplog progress file. %s" %
(reason))
source.close()
return None
source.close()
count = 0
oplog_dict = self.oplog_progress.get_dict()
for count in range(0, len(data), 2):
oplog_str = data[count]
time_stamp = data[count + 1]
oplog_dict[oplog_str] = util.long_to_bson_ts(time_stamp)
#stored as bson_ts
def run(self):
"""Discovers the mongo cluster and creates a thread for each primary.
"""
main_conn = Connection(self.address)
if self.auth_key is not None:
main_conn['admin'].authenticate(self.auth_username, self.auth_key)
self.read_oplog_progress()
conn_type = None
try:
main_conn.admin.command("isdbgrid")
except pymongo.errors.OperationFailure:
conn_type = "REPLSET"
if conn_type == "REPLSET":
#non sharded configuration
oplog_coll = main_conn['local']['oplog.rs']
prim_admin = main_conn.admin
repl_set = prim_admin.command("replSetGetStatus")['set']
oplog = oplog_manager.OplogThread(main_conn,
(main_conn.host + ":" + str(main_conn.port)),
oplog_coll,
False, self.doc_manager,
self.oplog_progress,
self.ns_set, self.auth_key,
self.auth_username,
self._no_dump,
self._batch_size,
repl_set=repl_set)
self.shard_set[0] = oplog
logging.info('MongoConnector: Starting connection thread %s' %
main_conn)
oplog.start()
while self.can_run:
if not self.shard_set[0].running:
logging.error("MongoConnector: OplogThread"
" %s unexpectedly stopped! Shutting down" %
(str(self.shard_set[0])))
self.oplog_thread_join()
self.doc_manager.stop()
return
self.write_oplog_progress()
time.sleep(1)
else: # sharded cluster
while self.can_run is True:
for shard_doc in main_conn['config']['shards'].find():
shard_id = shard_doc['_id']
if shard_id in self.shard_set:
if not self.shard_set[shard_id].running:
logging.error("MongoConnector: OplogThread"
" %s unexpectedly stopped! Shutting down" %
(str(self.shard_set[shard_id])))
self.oplog_thread_join()
self.doc_manager.stop()
return
self.write_oplog_progress()
time.sleep(1)
continue
try:
repl_set, hosts = shard_doc['host'].split('/')
except ValueError:
cause = "The system only uses replica sets!"
logging.error("MongoConnector: %s", cause)
self.oplog_thread_join()
self.doc_manager.stop()
return
shard_conn = Connection(hosts, replicaset=repl_set)
oplog_coll = shard_conn['local']['oplog.rs']
oplog = oplog_manager.OplogThread(shard_conn, self.address,
oplog_coll, True,
self.doc_manager,
self.oplog_progress,
self.ns_set,
self.auth_key,
self.auth_username,
self._no_dump,
self._batch_size)
self.shard_set[shard_id] = oplog
msg = "Starting connection thread"
logging.info("MongoConnector: %s %s" % (msg, shard_conn))
oplog.start()
self.oplog_thread_join()
def oplog_thread_join(self):
"""Stops all the OplogThreads
"""
logging.info('MongoConnector: Stopping all OplogThreads')
for thread in self.shard_set.values():
thread.join()
def main():
""" Starts the mongo connector (assuming CLI)
"""
parser = optparse.OptionParser()
#-m is for the main address, which is a host:port pair, ideally of the
#mongos. For non sharded clusters, it can be the primary.
parser.add_option("-m", "--main", action="store", type="string",
dest="main_addr", default="localhost:27217",
help="""Specify the main address, which is a"""
""" host:port pair. For sharded clusters, this"""
""" should be the mongos address. For individual"""
""" replica sets, supply the address of the"""
""" primary. For example, `-m localhost:27217`"""
""" would be a valid argument to `-m`. Don't use"""
""" quotes around the address""")
#-o is to specify the oplog-config file. This file is used by the system
#to store the last timestamp read on a specific oplog. This allows for
#quick recovery from failure.
parser.add_option("-o", "--oplog-ts", action="store", type="string",
dest="oplog_config", default="config.txt",
help="""Specify the name of the file that stores the"""
"""oplog progress timestamps. """
"""This file is used by the system to store the last"""
"""timestamp read on a specific oplog. This allows"""
""" for quick recovery from failure. By default this"""
""" is `config.txt`, which starts off empty. An empty"""
""" file causes the system to go through all the mongo"""
""" oplog and sync all the documents. Whenever the """
"""cluster is restarted, it is essential that the """
"""oplog-timestamp config file be emptied - otherwise"""
""" the connector will miss some documents and behave"""
"""incorrectly.""")
#--no-dump specifies whether we should read an entire collection from
#scratch if no timestamp is found in the oplog_config.
parser.add_option("--no-dump", action="store_true", default=False, help=
"If specified, this flag will ensure that "
"mongo_connector won't read the entire contents of a "
"namespace iff --oplog-ts points to an empty file.")
#--batch-size specifies num docs to read from oplog before updating the
#--oplog-ts config file with current oplog position
parser.add_option("--batch-size", action="store", default=-1, type="int",
help="Specify an int to update the --oplog-ts "
"config file with latest position of oplog every "
"N documents. By default, the oplog config isn't "
"updated until we've read through the entire oplog. "
"You may want more frequent updates if you are at risk "
"of falling behind the earliest timestamp in the oplog")
#-t is to specify the URL to the target system being used.
parser.add_option("-t", "--target-url", action="store", type="string",
dest="url", default=None,
help="""Specify the URL to the target system being """
"""used. For example, if you were using Solr out of """
"""the box, you could use '-t """
""" http://localhost:8080/solr' with the """
""" SolrDocManager to establish a proper connection."""
""" Don't use quotes around address."""
"""If target system doesn't need URL, don't specify""")
#-n is to specify the namespaces we want to consider. The default
#considers all the namespaces
parser.add_option("-n", "--namespace-set", action="store", type="string",
dest="ns_set", default=None, help=
"""Used to specify the namespaces we want to """
""" consider. For example, if we wished to store all """
""" documents from the test.test and alpha.foo """
""" namespaces, we could use `-n test.test,alpha.foo`."""
""" The default is to consider all the namespaces, """
""" excluding the system and config databases, and """
""" also ignoring the "system.indexes" collection in """
"""any database.""")
#-u is to specify the mongoDB field that will serve as the unique key
#for the target system,
parser.add_option("-u", "--unique-key", action="store", type="string",
dest="u_key", default="_id", help=
"""Used to specify the mongoDB field that will serve"""
"""as the unique key for the target system"""
"""The default is "_id", which can be noted by """
""" '-u _id'""")
#-f is to specify the authentication key file. This file is used by mongos
#to authenticate connections to the shards, and we'll use it in the oplog
#threads.
parser.add_option("-f", "--password-file", action="store", type="string",
dest="auth_file", default=None, help=
""" Used to store the password for authentication."""
""" Use this option if you wish to specify a"""
""" username and password but don't want to"""
""" type in the password. The contents of this"""
""" file should be the password for the admin user.""")
#-p is to specify the password used for authentication.
parser.add_option("-p", "--password", action="store", type="string",
dest="password", default=None, help=
""" Used to specify the password."""
""" This is used by mongos to authenticate"""
""" connections to the shards, and in the"""
""" oplog threads. If authentication is not used, then"""
""" this field can be left empty as the default """)
#-a is to specify the username for authentication.
parser.add_option("-a", "--admin-username", action="store", type="string",
dest="admin_name", default="__system", help=
"""Used to specify the username of an admin user to"""
"""authenticate with. To use authentication, the user"""
"""must specify both an admin username and a keyFile."""
"""The default username is '__system'""")
#-d is to specify the doc manager file.
parser.add_option("-d", "--docManager", action="store", type="string",
dest="doc_manager", default=None, help=
"""Used to specify the doc manager file that"""
""" is going to be used. You should send the"""
""" path of the file you want to be used."""
""" By default, it will use the """
""" doc_manager_simulator.py file. It is"""
""" recommended that all doc manager files be"""
""" kept in the doc_managers folder in"""
""" mongo-connector. For more information"""
""" about making your own doc manager,"""
""" see Doc Manager section.""")
#-s is to enable syslog logging.
parser.add_option("-s", "--enable-syslog", action="store_true",
dest="enable_syslog", default=False, help=
"""Used to enable logging to syslog."""
""" Use -l to specify syslog host.""")
#--syslog-host is to specify the syslog host.
parser.add_option("--syslog-host", action="store", type="string",
dest="syslog_host", default="localhost:514", help=
"""Used to specify the syslog host."""
""" The default is 'localhost:514'""")
#--syslog-facility is to specify the syslog facility.
parser.add_option("--syslog-facility", action="store", type="string",
dest="syslog_facility", default="user", help=
"""Used to specify the syslog facility."""
""" The default is 'user'""")
(options, args) = parser.parse_args()
logger = logging.getLogger()
loglevel = logging.INFO
logger.setLevel(loglevel)
if options.enable_syslog:
syslog_info = options.syslog_host.split(":")
syslog_host = logging.handlers.SysLogHandler(address=(syslog_info[0],
int(syslog_info[1])),facility=options.syslog_facility)
syslog_host.setLevel(loglevel)
logger.addHandler(syslog_host)
else:
log_out = logging.StreamHandler()
log_out.setLevel(loglevel)
log_out.setFormatter(logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s'))
logger.addHandler(log_out)
logger.info('Beginning Mongo Connector')
if options.doc_manager is None:
logger.info('No doc manager specified, using simulator.')
if options.ns_set is None:
ns_set = []
else:
ns_set = options.ns_set.split(',')
key = None
if options.auth_file is not None:
try:
key = open(options.auth_file).read()
re.sub(r'\s', '', key)
except IOError:
logger.error('Could not parse password authentication file!')
sys.exit(1)
if options.password is not None:
key = options.password
if key is None and options.admin_name != "__system":
logger.error("Admin username specified without password!")
sys.exit(1)
connector = Connector(
options.main_addr, options.oplog_config, options.url,
ns_set, options.u_key, key, options.no_dump, options.batch_size,
options.doc_manager,
auth_username=options.admin_name)
connector.start()
while True:
try:
time.sleep(3)
if not connector.is_alive():
break
except KeyboardInterrupt:
logging.info("Caught keyboard interrupt, exiting!")
connector.join()
break
if __name__ == '__main__':
main()
|
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytz
import pytest
from datetime import timedelta, datetime
from distutils.version import LooseVersion
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, DataFrame, isnull, date_range,
MultiIndex, Index, Timestamp, NaT, IntervalIndex)
from pandas.compat import range
from pandas._libs.tslib import iNaT
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from .common import TestData
try:
import scipy
_is_scipy_ge_0190 = scipy.__version__ >= LooseVersion('0.19.0')
except:
_is_scipy_ge_0190 = False
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.Akima1DInterpolator missing')
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestSeriesMissingData(TestData):
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(NaT)
expected = Series([NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
result = s.fillna(NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-01-03 10:00'), pd.NaT])
null_loc = pd.Series([False, True, False, True])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00')])
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
assert s.dtype == 'datetime64[ns, {0}]'.format(tz)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00',
tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00', tz=tz)])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isnull(s), null_loc)
# with timezone
# GH 15855
df = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'), pd.NaT])
exp = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')])
assert_series_equal(df.fillna(method='pad'), exp)
df = pd.Series([pd.NaT, pd.Timestamp('2012-11-11 00:00:00+01:00')])
exp = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')])
assert_series_equal(df.fillna(method='bfill'), exp)
def test_datetime64tz_fillna_round_issue(self):
# GH 14872
data = pd.Series([pd.NaT, pd.NaT,
datetime(2016, 12, 12, 22, 24, 6, 100001,
tzinfo=pytz.utc)])
filled = data.fillna(method='bfill')
expected = pd.Series([datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc)])
assert_series_equal(filled, expected)
def test_fillna_downcast(self):
# GH 15277
# infer int64 from float64
s = pd.Series([1., np.nan])
result = s.fillna(0, downcast='infer')
expected = pd.Series([1, 0])
assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
s = pd.Series([1., np.nan])
result = s.fillna({1: 0}, downcast='infer')
expected = pd.Series([1, 0])
assert_series_equal(result, expected)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
pytest.raises(TypeError, s.fillna, [1, 2])
pytest.raises(TypeError, s.fillna, (1, 2))
# related GH 9217, make sure limit is an int and greater than 0
s = Series([1, 2, 3, None])
for limit in [-1, 0, 1., 2.]:
for method in ['backfill', 'bfill', 'pad', 'ffill', None]:
with pytest.raises(ValueError):
s.fillna(1, limit=limit, method=method)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
pytest.raises(ValueError, ts.fillna)
pytest.raises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0, 0, 2.], list('bac'))
assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
result = s.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self):
try:
self.ts.fillna(method='ffil')
except ValueError as inst:
assert 'ffil' in str(inst)
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_ffill_mixed_dtypes_without_missing_data(self):
# GH14956
series = pd.Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1])
result = series.ffill()
assert_series_equal(series, result)
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_timedelta64_nan(self):
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
assert isnull(td1[0])
assert td1[0].value == iNaT
td1[0] = td[0]
assert not isnull(td1[0])
td1[1] = iNaT
assert isnull(td1[1])
assert td1[1].value == iNaT
td1[1] = td[1]
assert not isnull(td1[1])
td1[2] = NaT
assert isnull(td1[2])
assert td1[2].value == iNaT
td1[2] = td[2]
assert not isnull(td1[2])
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# assert isnull(result).sum() == 7
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([])
assert len(s.dropna()) == 0
s.dropna(inplace=True)
assert len(s) == 0
# invalid axis
pytest.raises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
tm.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
assert s.dtype == 'datetime64[ns, Asia/Tokyo]'
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
assert result.dtype == 'datetime64[ns, Asia/Tokyo]'
tm.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'), Series(
[False, True, False], name='x')]:
result = s.dropna()
tm.assert_series_equal(result, s)
assert result is not s
s2 = s.copy()
s2.dropna(inplace=True)
tm.assert_series_equal(s2, s)
def test_dropna_intervals(self):
s = Series([np.nan, 1, 2, 3], IntervalIndex.from_arrays(
[np.nan, 0, 1, 2],
[np.nan, 1, 2, 3]))
result = s.dropna()
expected = s.iloc[1:]
assert_series_equal(result, expected)
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
assert len(result) == ts.count()
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notnull(ts)])
def test_isnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.isnull(),
Series([False, False, False, True, False]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.isnull(), Series([False, False, True]).values)
def test_notnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.notnull(),
Series([True, True, True, False, True]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.notnull(), Series([True, True, False]).values)
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'],
dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
assert np.isnan(x[0]), np.isnan(expected[0])
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
# neither monotonic increasing or decreasing
rng2 = rng[[1, 0, 2]]
pytest.raises(ValueError, rng2.get_indexer, rng, method='pad')
def test_dropna_preserve_name(self):
self.ts[:5] = np.nan
result = self.ts.dropna()
assert result.name == self.ts.name
name = self.ts.name
ts = self.ts.copy()
ts.dropna(inplace=True)
assert ts.name == name
def test_fill_value_when_combine_const(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
assert_series_equal(res, exp)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
class TestSeriesInterpolateData(TestData):
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
tm.assert_series_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
tm.assert_series_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
pytest.raises(ValueError, non_ts.interpolate, method='time')
def test_interpolate_pchip(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_akima(self):
tm._skip_if_no_scipy()
_skip_if_no_akima()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(method='akima')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_piecewise_polynomial(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='piecewise_polynomial')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_from_derivatives(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='from_derivatives')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with pytest.raises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
# GH #15662.
# new cubic and quadratic interpolation algorithms from scipy 0.19.0.
# previously `splmake` was used. See scipy/scipy#6710
if _is_scipy_ge_0190:
expected = Series([1, 3., 6.823529, 12., 18.058824, 25.])
else:
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
# GH 9217, make sure limit is an int and greater than 0
methods = ['linear', 'time', 'index', 'values', 'nearest', 'zero',
'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh',
'polynomial', 'spline', 'piecewise_polynomial', None,
'from_derivatives', 'pchip', 'akima']
s = pd.Series([1, 2, np.nan, np.nan, 5])
for limit in [-1, 0, 1., 2.]:
for method in methods:
with pytest.raises(ValueError):
s.interpolate(limit=limit, method=method)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_unlimited(self):
# these test are for issue #16282 default Limit=None is unlimited
s = Series([np.nan, 1., 3., np.nan, np.nan, np.nan, 11., np.nan])
expected = Series([1., 1., 3., 5., 7., 9., 11., 11.])
result = s.interpolate(method='linear',
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([np.nan, 1., 3., 5., 7., 9., 11., 11.])
result = s.interpolate(method='linear',
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([1., 1., 3., 5., 7., 9., 11., np.nan])
result = s.interpolate(method='linear',
limit_direction='backward')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
pytest.raises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
pytest.raises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
def test_interp_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
with pytest.raises(ValueError):
s.interpolate(method='polynomial', order=1)
def test_interp_nonmono_raise(self):
tm._skip_if_no_scipy()
s = Series([1, np.nan, 3], index=[0, 2, 1])
with pytest.raises(ValueError):
s.interpolate(method='krogh')
def test_interp_datetime64(self):
tm._skip_if_no_scipy()
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
def test_no_order(self):
tm._skip_if_no_scipy()
s = Series([0, 1, np.nan, 3])
with pytest.raises(ValueError):
s.interpolate(method='polynomial')
with pytest.raises(ValueError):
s.interpolate(method='spline')
def test_spline(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method='spline', order=1)
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
def test_spline_extrapolate(self):
tm.skip_if_no_package(
'scipy', min_version='0.15',
app='setting ext on scipy.interpolate.UnivariateSpline')
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
expected3 = Series([1., 2., 3., 4., 5., 6., 6.])
assert_series_equal(result3, expected3)
result1 = s.interpolate(method='spline', order=1, ext=0)
expected1 = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result1, expected1)
def test_spline_smooth(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
assert (s.interpolate(method='spline', order=3, s=0)[5] !=
s.interpolate(method='spline', order=3)[5])
def test_spline_interpolation(self):
tm._skip_if_no_scipy()
s = Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
result1 = s.interpolate(method='spline', order=1)
expected1 = s.interpolate(method='spline', order=1)
assert_series_equal(result1, expected1)
def test_spline_error(self):
# see gh-10633
tm._skip_if_no_scipy()
s = pd.Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
with pytest.raises(ValueError):
s.interpolate(method='spline')
with pytest.raises(ValueError):
s.interpolate(method='spline', order=0)
def test_interp_timedelta64(self):
# GH 6424
df = Series([1, np.nan, 3],
index=pd.to_timedelta([1, 2, 3]))
result = df.interpolate(method='time')
expected = Series([1., 2., 3.],
index=pd.to_timedelta([1, 2, 3]))
assert_series_equal(result, expected)
# test for non uniform spacing
df = Series([1, np.nan, 3],
index=pd.to_timedelta([1, 2, 4]))
result = df.interpolate(method='time')
expected = Series([1., 1.666667, 3.],
index=pd.to_timedelta([1, 2, 4]))
assert_series_equal(result, expected)
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).sort_values()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).sort_values()
result = ts.reindex(new_index).interpolate(method='time')
tm.assert_numpy_array_equal(result.values, exp.values)
|
|
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from heapq import heappush, heappop
from itertools import cycle
import six
from six.moves import xrange, zip
from threading import Condition
import sys
from cassandra.cluster import ResultSet
import logging
log = logging.getLogger(__name__)
def execute_concurrent(session, statements_and_parameters, concurrency=100, raise_on_first_error=True, results_generator=False):
"""
Executes a sequence of (statement, parameters) tuples concurrently. Each
``parameters`` item must be a sequence or :const:`None`.
The `concurrency` parameter controls how many statements will be executed
concurrently. When :attr:`.Cluster.protocol_version` is set to 1 or 2,
it is recommended that this be kept below 100 times the number of
core connections per host times the number of connected hosts (see
:meth:`.Cluster.set_core_connections_per_host`). If that amount is exceeded,
the event loop thread may attempt to block on new connection creation,
substantially impacting throughput. If :attr:`~.Cluster.protocol_version`
is 3 or higher, you can safely experiment with higher levels of concurrency.
If `raise_on_first_error` is left as :const:`True`, execution will stop
after the first failed statement and the corresponding exception will be
raised.
`results_generator` controls how the results are returned.
If :const:`False`, the results are returned only after all requests have completed.
If :const:`True`, a generator expression is returned. Using a generator results in a constrained
memory footprint when the results set will be large -- results are yielded
as they return instead of materializing the entire list at once. The trade for lower memory
footprint is marginal CPU overhead (more thread coordination and sorting out-of-order results
on-the-fly).
A sequence of ``(success, result_or_exc)`` tuples is returned in the same
order that the statements were passed in. If ``success`` is :const:`False`,
there was an error executing the statement, and ``result_or_exc`` will be
an :class:`Exception`. If ``success`` is :const:`True`, ``result_or_exc``
will be the query result.
Example usage::
select_statement = session.prepare("SELECT * FROM users WHERE id=?")
statements_and_params = []
for user_id in user_ids:
params = (user_id, )
statements_and_params.append((select_statement, params))
results = execute_concurrent(
session, statements_and_params, raise_on_first_error=False)
for (success, result) in results:
if not success:
handle_error(result) # result will be an Exception
else:
process_user(result[0]) # result will be a list of rows
"""
if concurrency <= 0:
raise ValueError("concurrency must be greater than 0")
if not statements_and_parameters:
return []
executor = ConcurrentExecutorGenResults(session, statements_and_parameters) if results_generator else ConcurrentExecutorListResults(session, statements_and_parameters)
return executor.execute(concurrency, raise_on_first_error)
class _ConcurrentExecutor(object):
def __init__(self, session, statements_and_params):
self.session = session
self._enum_statements = enumerate(iter(statements_and_params))
self._condition = Condition()
self._fail_fast = False
self._results_queue = []
self._current = 0
self._exec_count = 0
def execute(self, concurrency, fail_fast):
self._fail_fast = fail_fast
self._results_queue = []
self._current = 0
self._exec_count = 0
with self._condition:
for n in xrange(concurrency):
if not self._execute_next():
break
return self._results()
def _execute_next(self):
# lock must be held
try:
(idx, (statement, params)) = next(self._enum_statements)
self._exec_count += 1
self._execute(idx, statement, params)
return True
except StopIteration:
pass
def _execute(self, idx, statement, params):
try:
future = self.session.execute_async(statement, params, timeout=None)
args = (future, idx)
future.add_callbacks(
callback=self._on_success, callback_args=args,
errback=self._on_error, errback_args=args)
except Exception as exc:
# exc_info with fail_fast to preserve stack trace info when raising on the client thread
# (matches previous behavior -- not sure why we wouldn't want stack trace in the other case)
e = sys.exc_info() if self._fail_fast and six.PY2 else exc
self._put_result(e, idx, False)
def _on_success(self, result, future, idx):
future.clear_callbacks()
self._put_result(ResultSet(future, result), idx, True)
def _on_error(self, result, future, idx):
self._put_result(result, idx, False)
@staticmethod
def _raise(exc):
if six.PY2 and isinstance(exc, tuple):
(exc_type, value, traceback) = exc
six.reraise(exc_type, value, traceback)
else:
raise exc
class ConcurrentExecutorGenResults(_ConcurrentExecutor):
def _put_result(self, result, idx, success):
with self._condition:
heappush(self._results_queue, (idx, (success, result)))
self._execute_next()
self._condition.notify()
def _results(self):
with self._condition:
while self._current < self._exec_count:
while not self._results_queue or self._results_queue[0][0] != self._current:
self._condition.wait()
while self._results_queue and self._results_queue[0][0] == self._current:
_, res = heappop(self._results_queue)
try:
self._condition.release()
if self._fail_fast and not res[0]:
self._raise(res[1])
yield res
finally:
self._condition.acquire()
self._current += 1
class ConcurrentExecutorListResults(_ConcurrentExecutor):
_exception = None
def execute(self, concurrency, fail_fast):
self._exception = None
return super(ConcurrentExecutorListResults, self).execute(concurrency, fail_fast)
def _put_result(self, result, idx, success):
self._results_queue.append((idx, (success, result)))
with self._condition:
self._current += 1
if not success and self._fail_fast:
if not self._exception:
self._exception = result
self._condition.notify()
elif not self._execute_next() and self._current == self._exec_count:
self._condition.notify()
def _results(self):
with self._condition:
while self._current < self._exec_count:
self._condition.wait()
if self._exception and self._fail_fast:
self._raise(self._exception)
if self._exception and self._fail_fast: # raise the exception even if there was no wait
self._raise(self._exception)
return [r[1] for r in sorted(self._results_queue)]
def execute_concurrent_with_args(session, statement, parameters, *args, **kwargs):
"""
Like :meth:`~cassandra.concurrent.execute_concurrent()`, but takes a single
statement and a sequence of parameters. Each item in ``parameters``
should be a sequence or :const:`None`.
Example usage::
statement = session.prepare("INSERT INTO mytable (a, b) VALUES (1, ?)")
parameters = [(x,) for x in range(1000)]
execute_concurrent_with_args(session, statement, parameters, concurrency=50)
"""
return execute_concurrent(session, zip(cycle((statement,)), parameters), *args, **kwargs)
|
|
# Copyright (c) 2014 Ahmed H. Ismail
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from spdx import annotation
from spdx import checksum
from spdx import creationinfo
from spdx import document
from spdx import file
from spdx import package
from spdx import relationship
from spdx import review
from spdx import snippet
from spdx import utils
from spdx import version
from spdx.document import ExternalDocumentRef
from spdx.parsers.builderexceptions import CardinalityError
from spdx.parsers.builderexceptions import OrderError
from spdx.parsers.builderexceptions import SPDXValueError
from spdx.parsers import validations
def checksum_from_sha1(value):
"""
Return an spdx.checksum.Algorithm instance representing the SHA1
checksum or None if does not match CHECKSUM_RE.
"""
# More constrained regex at lexer level
CHECKSUM_RE = re.compile("SHA1:\\s*([\\S]+)", re.UNICODE)
match = CHECKSUM_RE.match(value)
if match:
return checksum.Algorithm(identifier="SHA1", value=match.group(1))
else:
return None
def str_from_text(text):
"""
Return content of a free form text block as a string.
"""
REGEX = re.compile("<text>((.|\n)+)</text>", re.UNICODE)
match = REGEX.match(text)
if match:
return match.group(1)
else:
return None
class DocBuilder(object):
"""
Set the fields of the top level document model.
"""
VERS_STR_REGEX = re.compile(r"SPDX-(\d+)\.(\d+)", re.UNICODE)
def __init__(self):
# FIXME: this state does not make sense
self.reset_document()
def set_doc_version(self, doc, value):
"""
Set the document version.
Raise SPDXValueError if malformed value.
Raise CardinalityError if already defined.
"""
if not self.doc_version_set:
self.doc_version_set = True
m = self.VERS_STR_REGEX.match(value)
if m is None:
raise SPDXValueError("Document::Version")
else:
doc.version = version.Version(
major=int(m.group(1)), minor=int(m.group(2))
)
return True
else:
raise CardinalityError("Document::Version")
def set_doc_data_lics(self, doc, lics):
"""
Set the document data license.
Raise value error if malformed value
Raise CardinalityError if already defined.
"""
if not self.doc_data_lics_set:
self.doc_data_lics_set = True
if validations.validate_data_lics(lics):
doc.data_license = document.License.from_identifier(lics)
return True
else:
raise SPDXValueError("Document::DataLicense")
else:
raise CardinalityError("Document::DataLicense")
def set_doc_name(self, doc, name):
"""
Set the document name.
Raise CardinalityError if already defined.
"""
if not self.doc_name_set:
doc.name = name
self.doc_name_set = True
return True
else:
raise CardinalityError("Document::Name")
def set_doc_spdx_id(self, doc, doc_spdx_id_line):
"""
Set the document SPDX Identifier.
Raise value error if malformed value.
Raise CardinalityError if already defined.
"""
if not self.doc_spdx_id_set:
if doc_spdx_id_line == "SPDXRef-DOCUMENT":
doc.spdx_id = doc_spdx_id_line
self.doc_spdx_id_set = True
return True
else:
raise SPDXValueError("Document::SPDXID")
else:
raise CardinalityError("Document::SPDXID")
def set_doc_comment(self, doc, comment):
"""
Set document comment.
Raise CardinalityError if comment already set.
Raise SPDXValueError if comment is not free form text.
"""
if not self.doc_comment_set:
self.doc_comment_set = True
if validations.validate_doc_comment(comment):
doc.comment = str_from_text(comment)
return True
else:
raise SPDXValueError("Document::Comment")
else:
raise CardinalityError("Document::Comment")
def set_doc_namespace(self, doc, namespace):
"""
Set the document namespace.
Raise SPDXValueError if malformed value.
Raise CardinalityError if already defined.
"""
if not self.doc_namespace_set:
self.doc_namespace_set = True
if validations.validate_doc_namespace(namespace):
doc.namespace = namespace
return True
else:
raise SPDXValueError("Document::Namespace")
else:
raise CardinalityError("Document::Comment")
def reset_document(self):
"""
Reset the state to allow building new documents
"""
# FIXME: this state does not make sense
self.doc_version_set = False
self.doc_comment_set = False
self.doc_namespace_set = False
self.doc_data_lics_set = False
self.doc_name_set = False
self.doc_spdx_id_set = False
class ExternalDocumentRefBuilder(object):
def set_ext_doc_id(self, doc, ext_doc_id):
"""
Set the `external_document_id` attribute of the `ExternalDocumentRef` object.
"""
doc.add_ext_document_reference(
ExternalDocumentRef(external_document_id=ext_doc_id)
)
def set_spdx_doc_uri(self, doc, spdx_doc_uri):
"""
Set the `spdx_document_uri` attribute of the `ExternalDocumentRef` object.
"""
if validations.validate_doc_namespace(spdx_doc_uri):
doc.ext_document_references[-1].spdx_document_uri = spdx_doc_uri
else:
raise SPDXValueError("Document::ExternalDocumentRef")
def set_chksum(self, doc, chksum):
"""
Set the `check_sum` attribute of the `ExternalDocumentRef` object.
"""
doc.ext_document_references[-1].check_sum = checksum_from_sha1(chksum)
def add_ext_doc_refs(self, doc, ext_doc_id, spdx_doc_uri, chksum):
self.set_ext_doc_id(doc, ext_doc_id)
self.set_spdx_doc_uri(doc, spdx_doc_uri)
self.set_chksum(doc, chksum)
class EntityBuilder(object):
tool_re = re.compile(r"Tool:\s*(.+)", re.UNICODE)
person_re = re.compile(r"Person:\s*(([^(])+)(\((.*)\))?", re.UNICODE)
org_re = re.compile(r"Organization:\s*(([^(])+)(\((.*)\))?", re.UNICODE)
PERSON_NAME_GROUP = 1
PERSON_EMAIL_GROUP = 4
ORG_NAME_GROUP = 1
ORG_EMAIL_GROUP = 4
TOOL_NAME_GROUP = 1
def build_tool(self, doc, entity):
"""
Build a tool object out of a string representation.
Return built tool.
Raise SPDXValueError if failed to extract tool name or name is malformed
"""
match = self.tool_re.match(entity)
if match and validations.validate_tool_name(match.group(self.TOOL_NAME_GROUP)):
name = match.group(self.TOOL_NAME_GROUP)
return creationinfo.Tool(name)
else:
raise SPDXValueError("Failed to extract tool name")
def build_org(self, doc, entity):
"""
Build an organization object of of a string representation.
Return built organization.
Raise SPDXValueError if failed to extractname.
"""
match = self.org_re.match(entity)
if match and validations.validate_org_name(match.group(self.ORG_NAME_GROUP)):
name = match.group(self.ORG_NAME_GROUP).strip()
email = match.group(self.ORG_EMAIL_GROUP)
if (email is not None) and (len(email) != 0):
return creationinfo.Organization(name=name, email=email.strip())
else:
return creationinfo.Organization(name=name, email=None)
else:
raise SPDXValueError("Failed to extract Organization name")
def build_person(self, doc, entity):
"""
Build an organization object of of a string representation.
Return built organization. Raise SPDXValueError if failed to extract name.
"""
match = self.person_re.match(entity)
if match and validations.validate_person_name(
match.group(self.PERSON_NAME_GROUP)
):
name = match.group(self.PERSON_NAME_GROUP).strip()
email = match.group(self.PERSON_EMAIL_GROUP)
if (email is not None) and (len(email) != 0):
return creationinfo.Person(name=name, email=email.strip())
else:
return creationinfo.Person(name=name, email=None)
else:
raise SPDXValueError("Failed to extract person name")
class CreationInfoBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_creation_info()
def add_creator(self, doc, creator):
"""
Add a creator to the document's creation info.
Return true if creator is valid.
Creator must be built by an EntityBuilder.
Raise SPDXValueError if not a creator type.
"""
if validations.validate_creator(creator):
doc.creation_info.add_creator(creator)
return True
else:
raise SPDXValueError("CreationInfo::Creator")
def set_created_date(self, doc, created):
"""
Set created date.
Raise CardinalityError if created date already set.
Raise SPDXValueError if created is not a date.
"""
if not self.created_date_set:
self.created_date_set = True
date = utils.datetime_from_iso_format(created)
if date is not None:
doc.creation_info.created = date
return True
else:
raise SPDXValueError("CreationInfo::Date")
else:
raise CardinalityError("CreationInfo::Created")
def set_creation_comment(self, doc, comment):
"""
Set creation comment.
Raise CardinalityError if comment already set.
Raise SPDXValueError if not free form text.
"""
if not self.creation_comment_set:
self.creation_comment_set = True
if validations.validate_creation_comment(comment):
doc.creation_info.comment = str_from_text(comment)
return True
else:
raise SPDXValueError("CreationInfo::Comment")
else:
raise CardinalityError("CreationInfo::Comment")
def set_lics_list_ver(self, doc, value):
"""
Set the license list version.
Raise CardinalityError if already set.
Raise SPDXValueError if incorrect value.
"""
if not self.lics_list_ver_set:
self.lics_list_ver_set = True
vers = version.Version.from_str(value)
if vers is not None:
doc.creation_info.license_list_version = vers
return True
else:
raise SPDXValueError("CreationInfo::LicenseListVersion")
else:
raise CardinalityError("CreationInfo::LicenseListVersion")
def reset_creation_info(self):
"""
Reset builder state to allow building new creation info.
"""
# FIXME: this state does not make sense
self.created_date_set = False
self.creation_comment_set = False
self.lics_list_ver_set = False
class ReviewBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_reviews()
def reset_reviews(self):
"""
Reset the builder's state to allow building new reviews.
"""
# FIXME: this state does not make sense
self.review_date_set = False
self.review_comment_set = False
def add_reviewer(self, doc, reviewer):
"""
Adds a reviewer to the SPDX Document.
Reviwer is an entity created by an EntityBuilder.
Raise SPDXValueError if not a valid reviewer type.
"""
# Each reviewer marks the start of a new review object.
# FIXME: this state does not make sense
self.reset_reviews()
if validations.validate_reviewer(reviewer):
doc.add_review(review.Review(reviewer=reviewer))
return True
else:
raise SPDXValueError("Review::Reviewer")
def add_review_date(self, doc, reviewed):
"""
Set the review date.
Raise CardinalityError if already set.
Raise OrderError if no reviewer defined before.
Raise SPDXValueError if invalid reviewed value.
"""
if len(doc.reviews) != 0:
if not self.review_date_set:
self.review_date_set = True
date = utils.datetime_from_iso_format(reviewed)
if date is not None:
doc.reviews[-1].review_date = date
return True
else:
raise SPDXValueError("Review::ReviewDate")
else:
raise CardinalityError("Review::ReviewDate")
else:
raise OrderError("Review::ReviewDate")
def add_review_comment(self, doc, comment):
"""
Set the review comment.
Raise CardinalityError if already set.
Raise OrderError if no reviewer defined before.
Raise SPDXValueError if comment is not free form text.
"""
if len(doc.reviews) != 0:
if not self.review_comment_set:
self.review_comment_set = True
if validations.validate_review_comment(comment):
doc.reviews[-1].comment = str_from_text(comment)
return True
else:
raise SPDXValueError("ReviewComment::Comment")
else:
raise CardinalityError("ReviewComment")
else:
raise OrderError("ReviewComment")
class AnnotationBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_annotations()
def reset_annotations(self):
"""
Reset the builder's state to allow building new annotations.
"""
# FIXME: this state does not make sense
self.annotation_date_set = False
self.annotation_comment_set = False
self.annotation_type_set = False
self.annotation_spdx_id_set = False
def add_annotator(self, doc, annotator):
"""
Add an annotator to the SPDX Document.
Annotator is an entity created by an EntityBuilder.
Raise SPDXValueError if not a valid annotator type.
"""
# Each annotator marks the start of a new annotation object.
# FIXME: this state does not make sense
self.reset_annotations()
if validations.validate_annotator(annotator):
doc.add_annotation(annotation.Annotation(annotator=annotator))
return True
else:
raise SPDXValueError("Annotation::Annotator")
def add_annotation_date(self, doc, annotation_date):
"""
Set the annotation date.
Raise CardinalityError if already set.
Raise OrderError if no annotator defined before.
Raise SPDXValueError if invalid value.
"""
if len(doc.annotations) != 0:
if not self.annotation_date_set:
self.annotation_date_set = True
date = utils.datetime_from_iso_format(annotation_date)
if date is not None:
doc.annotations[-1].annotation_date = date
return True
else:
raise SPDXValueError("Annotation::AnnotationDate")
else:
raise CardinalityError("Annotation::AnnotationDate")
else:
raise OrderError("Annotation::AnnotationDate")
def add_annotation_comment(self, doc, comment):
"""
Set the annotation comment.
Raise CardinalityError if already set.
Raise OrderError if no annotator defined before.
Raise SPDXValueError if comment is not free form text.
"""
if len(doc.annotations) != 0:
if not self.annotation_comment_set:
self.annotation_comment_set = True
if validations.validate_annotation_comment(comment):
doc.annotations[-1].comment = str_from_text(comment)
return True
else:
raise SPDXValueError("AnnotationComment::Comment")
else:
raise CardinalityError("AnnotationComment::Comment")
else:
raise OrderError("AnnotationComment::Comment")
def add_annotation_type(self, doc, annotation_type):
"""
Set the annotation type.
Raise CardinalityError if already set.
Raise OrderError if no annotator defined before.
Raise SPDXValueError if invalid value.
"""
if len(doc.annotations) != 0:
if not self.annotation_type_set:
self.annotation_type_set = True
if validations.validate_annotation_type(annotation_type):
doc.annotations[-1].annotation_type = annotation_type
return True
else:
raise SPDXValueError("Annotation::AnnotationType")
else:
raise CardinalityError("Annotation::AnnotationType")
else:
raise OrderError("Annotation::AnnotationType")
def set_annotation_spdx_id(self, doc, spdx_id):
"""
Set the annotation SPDX Identifier.
Raise CardinalityError if already set.
Raise OrderError if no annotator defined before.
"""
if len(doc.annotations) != 0:
if not self.annotation_spdx_id_set:
self.annotation_spdx_id_set = True
doc.annotations[-1].spdx_id = spdx_id
return True
else:
raise CardinalityError("Annotation::SPDXREF")
else:
raise OrderError("Annotation::SPDXREF")
class RelationshipBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_relationship()
def reset_relationship(self):
"""
Reset the builder's state to allow building new relationships.
"""
# FIXME: this state does not make sense
self.relationship_comment_set = False
def add_relationship(self, doc, relationship_term):
"""
Raise SPDXValueError if type is unknown.
"""
self.reset_relationship()
doc.add_relationships(relationship.Relationship(relationship_term))
return True
def add_relationship_comment(self, doc, comment):
"""
Set the annotation comment.
Raise CardinalityError if already set.
Raise OrderError if no relationship defined before it.
Raise SPDXValueError if comment is not free form text.
"""
if len(doc.relationships) != 0:
if not self.relationship_comment_set:
self.relationship_comment_set = True
if validations.validate_relationship_comment(comment):
doc.relationships[-1].comment = str_from_text(comment)
return True
else:
raise SPDXValueError("RelationshipComment::Comment")
else:
raise CardinalityError("RelationshipComment::Comment")
else:
raise OrderError("RelationshipComment::Comment")
class PackageBuilder(object):
VERIF_CODE_REGEX = re.compile(r"([0-9a-f]+)\s*(\(\s*(.+)\))?", re.UNICODE)
VERIF_CODE_CODE_GRP = 1
VERIF_CODE_EXC_FILES_GRP = 3
def __init__(self):
# FIXME: this state does not make sense
self.reset_package()
def reset_package(self):
"""Resets the builder's state in order to build new packages."""
# FIXME: this state does not make sense
self.package_set = False
self.package_spdx_id_set = False
self.package_vers_set = False
self.package_file_name_set = False
self.package_supplier_set = False
self.package_originator_set = False
self.package_down_location_set = False
self.package_files_analyzed_set = False
self.package_home_set = False
self.package_verif_set = False
self.package_chk_sum_set = False
self.package_source_info_set = False
self.package_conc_lics_set = False
self.package_license_declared_set = False
self.package_license_comment_set = False
self.package_cr_text_set = False
self.package_summary_set = False
self.package_desc_set = False
self.package_comment_set = False
# self.package_attribution_text_set = False
self.pkg_ext_comment_set = False
def create_package(self, doc, name):
"""
Create a package for the SPDX Document.
name - any string.
Raise CardinalityError if package already defined.
"""
self.package_set = True
doc.add_package(package.Package(name=name))
return True
def set_pkg_spdx_id(self, doc, spdx_id):
"""
Set the Package SPDX Identifier.
Raise SPDXValueError if malformed value.
Raise CardinalityError if already defined.
"""
self.assert_package_exists()
if not self.package_spdx_id_set:
if validations.validate_pkg_spdx_id(spdx_id):
doc.packages[-1].spdx_id = spdx_id
self.package_spdx_id_set = True
return True
else:
raise SPDXValueError("Package::SPDXID")
else:
raise CardinalityError("Package::SPDXID")
def set_pkg_vers(self, doc, version):
"""
Set package version, if not already set.
version - Any string.
Raise CardinalityError if already has a version.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_vers_set:
self.package_vers_set = True
doc.packages[-1].version = version
return True
else:
raise CardinalityError("Package::Version")
def set_pkg_file_name(self, doc, name):
"""
Set the package file name, if not already set.
name - Any string.
Raise CardinalityError if already has a file_name.
Raise OrderError if no pacakge previously defined.
"""
self.assert_package_exists()
if not self.package_file_name_set:
self.package_file_name_set = True
doc.packages[-1].file_name = name
return True
else:
raise CardinalityError("Package::FileName")
def set_pkg_supplier(self, doc, entity):
"""
Set the package supplier, if not already set.
entity - Organization, Person or NoAssert.
Raise CardinalityError if already has a supplier.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_supplier_set:
self.package_supplier_set = True
if validations.validate_pkg_supplier(entity):
doc.packages[-1].supplier = entity
return True
else:
raise SPDXValueError("Package::Supplier")
else:
raise CardinalityError("Package::Supplier")
def set_pkg_originator(self, doc, entity):
"""
Set the package originator, if not already set.
entity - Organization, Person or NoAssert.
Raise CardinalityError if already has an originator.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_originator_set:
self.package_originator_set = True
if validations.validate_pkg_originator(entity):
doc.packages[-1].originator = entity
return True
else:
raise SPDXValueError("Package::Originator")
else:
raise CardinalityError("Package::Originator")
def set_pkg_down_location(self, doc, location):
"""
Set the package download location, if not already set.
location - A string
Raise CardinalityError if already defined.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_down_location_set:
self.package_down_location_set = True
doc.packages[-1].download_location = location
return True
else:
raise CardinalityError("Package::DownloadLocation")
def set_pkg_files_analyzed(self, doc, files_analyzed):
"""
Set the package files analyzed, if not already set.
Raise SPDXValueError if malformed value, CardinalityError if
already defined.
"""
self.assert_package_exists()
if not self.package_files_analyzed_set:
if files_analyzed is not None:
if validations.validate_pkg_files_analyzed(files_analyzed):
self.package_files_analyzed_set = True
if isinstance(files_analyzed, str):
files_analyzed = files_analyzed.lower() == "true"
doc.packages[-1].files_analyzed = files_analyzed
# convert to boolean;
# validate_pkg_files_analyzed already checked if
# files_analyzed is in ['True', 'true', 'False', 'false']
return True
else:
raise SPDXValueError("Package::FilesAnalyzed")
else:
raise CardinalityError("Package::FilesAnalyzed")
def set_pkg_home(self, doc, location):
"""Set the package homepage location if not already set.
location - A string or None or NoAssert.
Raise CardinalityError if already defined.
Raise OrderError if no package previously defined.
Raise SPDXValueError if location has incorrect value.
"""
self.assert_package_exists()
if not self.package_home_set:
self.package_home_set = True
if validations.validate_pkg_homepage(location):
doc.packages[-1].homepage = location
return True
else:
raise SPDXValueError("Package::HomePage")
else:
raise CardinalityError("Package::HomePage")
def set_pkg_verif_code(self, doc, code):
"""
Set the package verification code, if not already set.
code - A string.
Raise CardinalityError if already defined.
Raise OrderError if no package previously defined.
Raise Value error if doesn't match verifcode form
"""
self.assert_package_exists()
if not self.package_verif_set:
self.package_verif_set = True
match = self.VERIF_CODE_REGEX.match(code)
if match:
doc.packages[-1].verif_code = match.group(self.VERIF_CODE_CODE_GRP)
if match.group(self.VERIF_CODE_EXC_FILES_GRP) is not None:
doc.packages[-1].verif_exc_files = match.group(
self.VERIF_CODE_EXC_FILES_GRP
).split(",")
return True
else:
raise SPDXValueError("Package::VerificationCode")
else:
raise CardinalityError("Package::VerificationCode")
def set_pkg_chk_sum(self, doc, chk_sum):
"""
Set the package check sum, if not already set.
chk_sum - A string
Raise CardinalityError if already defined.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_chk_sum_set:
self.package_chk_sum_set = True
doc.packages[-1].check_sum = checksum_from_sha1(chk_sum)
return True
else:
raise CardinalityError("Package::CheckSum")
def set_pkg_source_info(self, doc, text):
"""
Set the package's source information, if not already set.
text - Free form text.
Raise CardinalityError if already defined.
Raise OrderError if no package previously defined.
SPDXValueError if text is not free form text.
"""
self.assert_package_exists()
if not self.package_source_info_set:
self.package_source_info_set = True
if validations.validate_pkg_src_info(text):
doc.packages[-1].source_info = str_from_text(text)
return True
else:
raise SPDXValueError("Pacckage::SourceInfo")
else:
raise CardinalityError("Package::SourceInfo")
def set_pkg_licenses_concluded(self, doc, licenses):
"""
Set the package's concluded licenses.
licenses - License info.
Raise CardinalityError if already defined.
Raise OrderError if no package previously defined.
Raise SPDXValueError if data malformed.
"""
self.assert_package_exists()
if not self.package_conc_lics_set:
self.package_conc_lics_set = True
if validations.validate_lics_conc(licenses):
doc.packages[-1].conc_lics = licenses
return True
else:
raise SPDXValueError("Package::ConcludedLicenses")
else:
raise CardinalityError("Package::ConcludedLicenses")
def set_pkg_license_from_file(self, doc, lic):
"""
Add a license from a file to the package.
Raise SPDXValueError if data malformed.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if validations.validate_lics_from_file(lic):
doc.packages[-1].licenses_from_files.append(lic)
return True
else:
raise SPDXValueError("Package::LicensesFromFile")
def set_pkg_license_declared(self, doc, lic):
"""
Set the package's declared license.
Raise SPDXValueError if data malformed.
Raise OrderError if no package previously defined.
Raise CardinalityError if already set.
"""
self.assert_package_exists()
if not self.package_license_declared_set:
self.package_license_declared_set = True
if validations.validate_lics_conc(lic):
doc.packages[-1].license_declared = lic
return True
else:
raise SPDXValueError("Package::LicenseDeclared")
else:
raise CardinalityError("Package::LicenseDeclared")
def set_pkg_license_comment(self, doc, text):
"""
Set the package's license comment.
Raise OrderError if no package previously defined.
Raise CardinalityError if already set.
Raise SPDXValueError if text is not free form text.
"""
self.assert_package_exists()
if not self.package_license_comment_set:
self.package_license_comment_set = True
if validations.validate_pkg_lics_comment(text):
doc.packages[-1].license_comment = str_from_text(text)
return True
else:
raise SPDXValueError("Package::LicenseComment")
else:
raise CardinalityError("Package::LicenseComment")
def set_pkg_attribution_text(self, doc, text):
"""
Set the package's attribution text .
Raise SPDXValueError if text is not free form text.
"""
self.assert_package_exists()
if validations.validate_pkg_attribution_text(text):
doc.packages[-1].attribution_text = str_from_text(text)
return True
else:
raise SPDXValueError("Package::AttributionText")
def set_pkg_cr_text(self, doc, text):
"""
Set the package's copyright text.
Raise OrderError if no package previously defined.
Raise CardinalityError if already set.
Raise value error if text is not one of [None, NOASSERT, TEXT].
"""
self.assert_package_exists()
if not self.package_cr_text_set:
self.package_cr_text_set = True
if validations.validate_pkg_cr_text(text):
if isinstance(text, str):
doc.packages[-1].cr_text = str_from_text(text)
else:
doc.packages[-1].cr_text = text # None or NoAssert
else:
raise SPDXValueError("Package::CopyrightText")
else:
raise CardinalityError("Package::CopyrightText")
def set_pkg_summary(self, doc, text):
"""
Set the package summary.
Raise SPDXValueError if text is not free form text.
Raise CardinalityError if summary already set.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_summary_set:
self.package_summary_set = True
if validations.validate_pkg_summary(text):
doc.packages[-1].summary = str_from_text(text)
else:
raise SPDXValueError("Package::Summary")
else:
raise CardinalityError("Package::Summary")
def set_pkg_desc(self, doc, text):
"""
Set the package's description.
Raise SPDXValueError if text is not free form text.
Raise CardinalityError if description already set.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_desc_set:
self.package_desc_set = True
if validations.validate_pkg_desc(text):
doc.packages[-1].description = str_from_text(text)
else:
raise SPDXValueError("Package::Description")
else:
raise CardinalityError("Package::Description")
def set_pkg_comment(self, doc, text):
"""
Set the package's comment.
Raise SPDXValueError if text is not free form text.
Raise CardinalityError if comment already set.
Raise OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_comment_set:
self.package_comment_set = True
if validations.validate_pkg_comment(text):
doc.packages[-1].comment = str_from_text(text)
else:
raise SPDXValueError("Package::Comment")
else:
raise CardinalityError("Package::Comment")
def set_pkg_ext_ref_category(self, doc, category):
"""
Set the `category` attribute of the `ExternalPackageRef` object.
"""
self.assert_package_exists()
if validations.validate_pkg_ext_ref_category(category):
if (
len(doc.packages[-1].pkg_ext_refs)
and doc.packages[-1].pkg_ext_refs[-1].category is None
):
doc.packages[-1].pkg_ext_refs[-1].category = category
else:
doc.packages[-1].add_pkg_ext_refs(
package.ExternalPackageRef(category=category)
)
else:
raise SPDXValueError("ExternalRef::Category")
def set_pkg_ext_ref_type(self, doc, pkg_ext_ref_type):
"""
Set the `pkg_ext_ref_type` attribute of the `ExternalPackageRef` object.
"""
self.assert_package_exists()
if validations.validate_pkg_ext_ref_type(pkg_ext_ref_type):
if (
len(doc.packages[-1].pkg_ext_refs)
and doc.packages[-1].pkg_ext_refs[-1].pkg_ext_ref_type is None
):
doc.packages[-1].pkg_ext_refs[-1].pkg_ext_ref_type = pkg_ext_ref_type
else:
doc.packages[-1].add_pkg_ext_refs(
package.ExternalPackageRef(pkg_ext_ref_type=pkg_ext_ref_type)
)
else:
raise SPDXValueError("ExternalRef::Type")
def set_pkg_ext_ref_locator(self, doc, locator):
"""
Set the `locator` attribute of the `ExternalPackageRef` object.
"""
self.assert_package_exists()
if (
len(doc.packages[-1].pkg_ext_refs)
and doc.packages[-1].pkg_ext_refs[-1].locator is None
):
doc.packages[-1].pkg_ext_refs[-1].locator = locator
else:
doc.packages[-1].add_pkg_ext_refs(package.ExternalPackageRef(locator=locator))
def add_pkg_ext_ref_comment(self, doc, comment):
"""
Set the `comment` attribute of the `ExternalPackageRef` object.
"""
self.assert_package_exists()
if not len(doc.packages[-1].pkg_ext_refs):
raise OrderError("Package::ExternalRef")
else:
if validations.validate_pkg_ext_ref_comment(comment):
doc.packages[-1].pkg_ext_refs[-1].comment = str_from_text(comment)
else:
raise SPDXValueError("ExternalRef::Comment")
def add_pkg_ext_refs(self, doc, category, pkg_ext_ref_type, locator):
self.set_pkg_ext_ref_category(doc, category)
self.set_pkg_ext_ref_type(doc, pkg_ext_ref_type)
self.set_pkg_ext_ref_locator(doc, locator)
def assert_package_exists(self):
if not self.package_set:
raise OrderError("Package")
class FileBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_file_stat()
def set_file_name(self, doc, name):
"""
Raise OrderError if no package defined.
"""
if self.has_package(doc):
doc.packages[-1].files.append(file.File(name))
# A file name marks the start of a new file instance.
# The builder must be reset
# FIXME: this state does not make sense
self.reset_file_stat()
return True
else:
raise OrderError("File::Name")
def set_file_spdx_id(self, doc, spdx_id):
"""
Set the file SPDX Identifier.
Raise OrderError if no package or no file defined.
Raise SPDXValueError if malformed value.
Raise CardinalityError if more than one spdx_id set.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_spdx_id_set:
self.file_spdx_id_set = True
if validations.validate_file_spdx_id(spdx_id):
self.file(doc).spdx_id = spdx_id
return True
else:
raise SPDXValueError("File::SPDXID")
else:
raise CardinalityError("File::SPDXID")
else:
raise OrderError("File::SPDXID")
def set_file_comment(self, doc, text):
"""
Raise OrderError if no package or no file defined.
Raise CardinalityError if more than one comment set.
Raise SPDXValueError if text is not free form text.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_comment_set:
self.file_comment_set = True
if validations.validate_file_comment(text):
self.file(doc).comment = str_from_text(text)
return True
else:
raise SPDXValueError("File::Comment")
else:
raise CardinalityError("File::Comment")
else:
raise OrderError("File::Comment")
def set_file_attribution_text(self, doc, text):
"""
Set the file's attribution text .
Raise SPDXValueError if text is not free form text.
"""
if self.has_package(doc) and self.has_file(doc):
if validations.validate_file_attribution_text(text):
self.file(doc).comment = str_from_text(text)
return True
else:
raise SPDXValueError("File::AttributionText")
def set_file_type(self, doc, type_value):
"""
Raise OrderError if no package or file defined.
Raise CardinalityError if more than one type set.
Raise SPDXValueError if type is unknown.
"""
type_dict = {
"SOURCE": file.FileType.SOURCE,
"BINARY": file.FileType.BINARY,
"ARCHIVE": file.FileType.ARCHIVE,
"OTHER": file.FileType.OTHER,
}
if self.has_package(doc) and self.has_file(doc):
if not self.file_type_set:
self.file_type_set = True
if type_value in type_dict.keys():
self.file(doc).type = type_dict[type_value]
return True
else:
raise SPDXValueError("File::Type")
else:
raise CardinalityError("File::Type")
else:
raise OrderError("File::Type")
def set_file_chksum(self, doc, chksum):
"""
Raise OrderError if no package or file defined.
Raise CardinalityError if more than one chksum set.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_chksum_set:
self.file_chksum_set = True
self.file(doc).chk_sum = checksum_from_sha1(chksum)
return True
else:
raise CardinalityError("File::CheckSum")
else:
raise OrderError("File::CheckSum")
def set_concluded_license(self, doc, lic):
"""
Raise OrderError if no package or file defined.
Raise CardinalityError if already set.
Raise SPDXValueError if malformed.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_conc_lics_set:
self.file_conc_lics_set = True
if validations.validate_lics_conc(lic):
self.file(doc).conc_lics = lic
return True
else:
raise SPDXValueError("File::ConcludedLicense")
else:
raise CardinalityError("File::ConcludedLicense")
else:
raise OrderError("File::ConcludedLicense")
def set_file_license_in_file(self, doc, lic):
"""
Raise OrderError if no package or file defined.
Raise SPDXValueError if malformed value.
"""
if self.has_package(doc) and self.has_file(doc):
if validations.validate_file_lics_in_file(lic):
self.file(doc).add_lics(lic)
return True
else:
raise SPDXValueError("File::LicenseInFile")
else:
raise OrderError("File::LicenseInFile")
def set_file_license_comment(self, doc, text):
"""
Raise OrderError if no package or file defined.
Raise SPDXValueError if text is not free form text.
Raise CardinalityError if more than one per file.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_license_comment_set:
self.file_license_comment_set = True
if validations.validate_file_lics_comment(text):
self.file(doc).license_comment = str_from_text(text)
else:
raise SPDXValueError("File::LicenseComment")
else:
raise CardinalityError("File::LicenseComment")
else:
raise OrderError("File::LicenseComment")
def set_file_copyright(self, doc, text):
"""
Raise OrderError if no package or file defined.
Raise SPDXValueError if not free form text or NONE or NO_ASSERT.
Raise CardinalityError if more than one.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_copytext_set:
self.file_copytext_set = True
if validations.validate_file_cpyright(text):
if isinstance(text, str):
self.file(doc).copyright = str_from_text(text)
else:
self.file(doc).copyright = text # None or NoAssert
return True
else:
raise SPDXValueError("File::CopyRight")
else:
raise CardinalityError("File::CopyRight")
else:
raise OrderError("File::CopyRight")
def set_file_notice(self, doc, text):
"""
Raise OrderError if no package or file defined.
Raise SPDXValueError if not free form text.
Raise CardinalityError if more than one.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_notice_set:
self.file_notice_set = True
if validations.validate_file_notice(text):
self.file(doc).notice = str_from_text(text)
else:
raise SPDXValueError("File::Notice")
else:
raise CardinalityError("File::Notice")
else:
raise OrderError("File::Notice")
def add_file_contribution(self, doc, value):
"""
Raise OrderError if no package or file defined.
"""
if self.has_package(doc) and self.has_file(doc):
self.file(doc).add_contrib(value)
else:
raise OrderError("File::Contributor")
def add_file_dep(self, doc, value):
"""
Raise OrderError if no package or file defined.
"""
if self.has_package(doc) and self.has_file(doc):
self.file(doc).add_depend(value)
else:
raise OrderError("File::Dependency")
def set_file_atrificat_of_project(self, doc, symbol, value):
"""
Set a file name, uri or home artificat.
Raise OrderError if no package or file defined.
"""
if self.has_package(doc) and self.has_file(doc):
self.file(doc).add_artifact(symbol, value)
else:
raise OrderError("File::Artificat")
def file(self, doc):
"""
Return the last file in the document's package's file list.
"""
return doc.packages[-1].files[-1]
def has_file(self, doc):
"""
Return true if the document's package has at least one file.
Does not test if the document has a package.
"""
return len(doc.packages[-1].files) != 0
def has_package(self, doc):
"""
Return true if the document has a package.
"""
return len(doc.packages) != 0
def reset_file_stat(self):
"""
Reset the builder's state to enable building new files.
"""
# FIXME: this state does not make sense
self.file_spdx_id_set = False
self.file_comment_set = False
self.file_type_set = False
self.file_chksum_set = False
self.file_conc_lics_set = False
self.file_license_comment_set = False
self.file_notice_set = False
self.file_copytext_set = False
class LicenseBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_extr_lics()
def extr_lic(self, doc):
"""
Retrieve last license in extracted license list.
"""
return doc.extracted_licenses[-1]
def has_extr_lic(self, doc):
return len(doc.extracted_licenses) != 0
def set_lic_id(self, doc, lic_id):
"""
Add a new extracted license to the document.
Raise SPDXValueError if data format is incorrect.
"""
# FIXME: this state does not make sense
self.reset_extr_lics()
if validations.validate_extracted_lic_id(lic_id):
doc.add_extr_lic(document.ExtractedLicense(lic_id))
return True
else:
raise SPDXValueError("ExtractedLicense::id")
def set_lic_text(self, doc, text):
"""
Set license extracted text.
Raise SPDXValueError if text is not free form text.
Raise OrderError if no license ID defined.
"""
if self.has_extr_lic(doc):
if not self.extr_text_set:
self.extr_text_set = True
if validations.validate_is_free_form_text(text):
self.extr_lic(doc).text = str_from_text(text)
return True
else:
raise SPDXValueError("ExtractedLicense::text")
else:
raise CardinalityError("ExtractedLicense::text")
else:
raise OrderError("ExtractedLicense::text")
def set_lic_name(self, doc, name):
"""
Set license name.
Raise SPDXValueError if name is not str or utils.NoAssert
Raise OrderError if no license id defined.
"""
if self.has_extr_lic(doc):
if not self.extr_lic_name_set:
self.extr_lic_name_set = True
if validations.validate_extr_lic_name(name):
self.extr_lic(doc).full_name = name
return True
else:
raise SPDXValueError("ExtractedLicense::Name")
else:
raise CardinalityError("ExtractedLicense::Name")
else:
raise OrderError("ExtractedLicense::Name")
def set_lic_comment(self, doc, comment):
"""
Set license comment.
Raise SPDXValueError if comment is not free form text.
Raise OrderError if no license ID defined.
"""
if self.has_extr_lic(doc):
if not self.extr_lic_comment_set:
self.extr_lic_comment_set = True
if validations.validate_is_free_form_text(comment):
self.extr_lic(doc).comment = str_from_text(comment)
return True
else:
raise SPDXValueError("ExtractedLicense::comment")
else:
raise CardinalityError("ExtractedLicense::comment")
else:
raise OrderError("ExtractedLicense::comment")
def add_lic_xref(self, doc, ref):
"""
Add a license cross reference.
Raise OrderError if no License ID defined.
"""
if self.has_extr_lic(doc):
self.extr_lic(doc).add_xref(ref)
return True
else:
raise OrderError("ExtractedLicense::CrossRef")
def reset_extr_lics(self):
# FIXME: this state does not make sense
self.extr_text_set = False
self.extr_lic_name_set = False
self.extr_lic_comment_set = False
class SnippetBuilder(object):
def __init__(self):
# FIXME: this state does not make sense
self.reset_snippet()
def create_snippet(self, doc, spdx_id):
"""
Create a snippet for the SPDX Document.
spdx_id - To uniquely identify any element in an SPDX document which
may be referenced by other elements.
Raise SPDXValueError if the data is a malformed value.
"""
self.reset_snippet()
spdx_id = spdx_id.split("#")[-1]
if validations.validate_snippet_spdx_id(spdx_id):
doc.add_snippet(snippet.Snippet(spdx_id=spdx_id))
self.snippet_spdx_id_set = True
return True
else:
raise SPDXValueError("Snippet::SnippetSPDXID")
def set_snippet_name(self, doc, name):
"""
Set name of the snippet.
Raise OrderError if no snippet previously defined.
Raise CardinalityError if the name is already set.
"""
self.assert_snippet_exists()
if not self.snippet_name_set:
self.snippet_name_set = True
doc.snippet[-1].name = name
return True
else:
raise CardinalityError("SnippetName")
def set_snippet_comment(self, doc, comment):
"""
Set general comments about the snippet.
Raise OrderError if no snippet previously defined.
Raise SPDXValueError if the data is a malformed value.
Raise CardinalityError if comment already set.
"""
self.assert_snippet_exists()
if not self.snippet_comment_set:
self.snippet_comment_set = True
if validations.validate_snip_comment(comment):
doc.snippet[-1].comment = str_from_text(comment)
return True
else:
raise SPDXValueError("Snippet::SnippetComment")
else:
raise CardinalityError("Snippet::SnippetComment")
def set_snippet_attribution_text(self, doc, text):
"""
Set the snippet's attribution text .
Raise SPDXValueError if text is not free form text.
"""
self.assert_snippet_exists()
if validations.validate_snippet_attribution_text(text):
doc.snippet[-1].attribution_text = str_from_text(text)
return True
else:
raise SPDXValueError("Snippet::AttributionText")
def set_snippet_copyright(self, doc, text):
"""Set the snippet's copyright text.
Raise OrderError if no snippet previously defined.
Raise CardinalityError if already set.
Raise SPDXValueError if text is not one of [None, NOASSERT, TEXT].
"""
self.assert_snippet_exists()
if not self.snippet_copyright_set:
self.snippet_copyright_set = True
if validations.validate_snippet_copyright(text):
if isinstance(text, str):
doc.snippet[-1].copyright = str_from_text(text)
else:
doc.snippet[-1].copyright = text # None or NoAssert
else:
raise SPDXValueError("Snippet::SnippetCopyrightText")
else:
raise CardinalityError("Snippet::SnippetCopyrightText")
def set_snippet_lic_comment(self, doc, text):
"""
Set the snippet's license comment.
Raise OrderError if no snippet previously defined.
Raise CardinalityError if already set.
Raise SPDXValueError if the data is a malformed value.
"""
self.assert_snippet_exists()
if not self.snippet_lic_comment_set:
self.snippet_lic_comment_set = True
if validations.validate_snip_lic_comment(text):
doc.snippet[-1].license_comment = str_from_text(text)
return True
else:
raise SPDXValueError("Snippet::SnippetLicenseComments")
else:
raise CardinalityError("Snippet::SnippetLicenseComments")
def set_snip_from_file_spdxid(self, doc, snip_from_file_spdxid):
"""
Set the snippet's 'Snippet from File SPDX Identifier'.
Raise OrderError if no snippet previously defined.
Raise CardinalityError if already set.
Raise SPDXValueError if the data is a malformed value.
"""
self.assert_snippet_exists()
snip_from_file_spdxid = snip_from_file_spdxid.split("#")[-1]
if not self.snip_file_spdxid_set:
self.snip_file_spdxid_set = True
if validations.validate_snip_file_spdxid(snip_from_file_spdxid):
doc.snippet[-1].snip_from_file_spdxid = snip_from_file_spdxid
return True
else:
raise SPDXValueError("Snippet::SnippetFromFileSPDXID")
else:
raise CardinalityError("Snippet::SnippetFromFileSPDXID")
def set_snip_concluded_license(self, doc, conc_lics):
"""
Raise OrderError if no snippet previously defined.
Raise CardinalityError if already set.
Raise SPDXValueError if the data is a malformed value.
"""
self.assert_snippet_exists()
if not self.snippet_conc_lics_set:
self.snippet_conc_lics_set = True
if validations.validate_lics_conc(conc_lics):
doc.snippet[-1].conc_lics = conc_lics
return True
else:
raise SPDXValueError("Snippet::SnippetLicenseConcluded")
else:
raise CardinalityError("Snippet::SnippetLicenseConcluded")
def set_snippet_lics_info(self, doc, lics_info):
"""
Raise OrderError if no snippet previously defined.
Raise SPDXValueError if the data is a malformed value.
"""
self.assert_snippet_exists()
if validations.validate_snip_lics_info(lics_info):
doc.snippet[-1].add_lics(lics_info)
return True
else:
raise SPDXValueError("Snippet::LicenseInfoInSnippet")
def reset_snippet(self):
# FIXME: this state does not make sense
self.snippet_spdx_id_set = False
self.snippet_name_set = False
self.snippet_comment_set = False
self.snippet_copyright_set = False
self.snippet_lic_comment_set = False
self.snip_file_spdxid_set = False
self.snippet_conc_lics_set = False
def assert_snippet_exists(self):
if not self.snippet_spdx_id_set:
raise OrderError("Snippet")
class Builder(
DocBuilder,
CreationInfoBuilder,
EntityBuilder,
ReviewBuilder,
PackageBuilder,
FileBuilder,
LicenseBuilder,
SnippetBuilder,
ExternalDocumentRefBuilder,
AnnotationBuilder,
RelationshipBuilder,
):
"""
SPDX document builder.
"""
def __init__(self):
super(Builder, self).__init__()
# FIXME: this state does not make sense
self.reset()
def reset(self):
"""
Reset builder's state for building new documents.
Must be called between usage with different documents.
"""
# FIXME: this state does not make sense
self.reset_creation_info()
self.reset_document()
self.reset_package()
self.reset_file_stat()
self.reset_reviews()
self.reset_annotations()
self.reset_extr_lics()
self.reset_snippet()
self.reset_relationship()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.