gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import functools
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import weakref
from oslo.config import cfg
from solum.openstack.common import fileutils
from solum.openstack.common.gettextutils import _ # noqa
from solum.openstack.common import local
from solum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
default=os.environ.get("SOLUM_LOCK_PATH"),
help=('Directory to use for lock files.'))
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
_semaphores_lock = threading.Lock()
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
:param lock_path: The lock_path keyword argument is used to specify a
special location for external lock files to live. If nothing is set, then
CONF.lock_path is used as a default.
"""
with _semaphores_lock:
try:
sem = _semaphores[name]
except KeyError:
sem = threading.Semaphore()
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
# NOTE(mikal): I know this looks odd
if not hasattr(local.strong_store, 'locks_held'):
local.strong_store.locks_held = []
local.strong_store.locks_held.append(name)
try:
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
{'lock': name})
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path or CONF.lock_path
if not local_lock_path:
raise cfg.RequiredOptError('lock_path')
if not os.path.exists(local_lock_path):
fileutils.ensure_tree(local_lock_path)
LOG.info(_('Created lock path: %s'), local_lock_path)
def add_prefix(name, prefix):
if not prefix:
return name
sep = '' if prefix.endswith('-') else '-'
return '%s%s%s' % (prefix, sep, name)
# NOTE(mikal): the lock name cannot contain directory
# separators
lock_file_name = add_prefix(name.replace(os.sep, '_'),
lock_file_prefix)
lock_file_path = os.path.join(local_lock_path, lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock as lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
yield lock
finally:
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
else:
yield sem
finally:
local.strong_store.locks_held.remove(name)
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
try:
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug(_('Got semaphore / lock "%(function)s"'),
{'function': f.__name__})
return f(*args, **kwargs)
finally:
LOG.debug(_('Semaphore / lock released "%(function)s"'),
{'function': f.__name__})
return inner
return wrap
def synchronized_with_prefix(lock_file_prefix):
"""Partial object generator for the synchronization decorator.
Redefine @synchronized in each project like so::
(in nova/utils.py)
from nova.openstack.common import lockutils
synchronized = lockutils.synchronized_with_prefix('nova-')
(in nova/foo.py)
from nova import utils
@utils.synchronized('mylock')
def bar(self, *args):
...
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix.
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
def main(argv):
"""Create a dir for locks and pass it to command from arguments
If you run this:
python -m openstack.common.lockutils python setup.py testr <etc>
a temporary directory will be created for all your locks and passed to all
your tests in an environment variable. The temporary dir will be deleted
afterwards and the return value will be preserved.
"""
lock_dir = tempfile.mkdtemp()
os.environ["SOLUM_LOCK_PATH"] = lock_dir
try:
ret_val = subprocess.call(argv[1:])
finally:
shutil.rmtree(lock_dir, ignore_errors=True)
return ret_val
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
# Copyright 2013 IBM Corp.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Authors:
# Erik Zaadi <[email protected]>
# Avishay Traeger <[email protected]>
"""
Unified Volume driver for IBM XIV and DS8K Storage Systems.
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from cinder import exception
from cinder.volume import driver
from cinder.volume.drivers.san import san
xiv_ds8k_opts = [
cfg.StrOpt(
'xiv_ds8k_proxy',
default='xiv_ds8k_openstack.nova_proxy.XIVDS8KNovaProxy',
help='Proxy driver that connects to the IBM Storage Array'),
cfg.StrOpt(
'xiv_ds8k_connection_type',
default='iscsi',
choices=['fibre_channel', 'iscsi'],
help='Connection type to the IBM Storage Array'),
cfg.StrOpt(
'xiv_chap',
default='disabled',
choices=['disabled', 'enabled'],
help='CHAP authentication mode, effective only for iscsi'
' (disabled|enabled)'),
cfg.StrOpt(
'management_ips',
default='',
help='List of Management IP addresses (separated by commas)'),
]
CONF = cfg.CONF
CONF.register_opts(xiv_ds8k_opts)
LOG = logging.getLogger(__name__)
class XIVDS8KDriver(san.SanDriver,
driver.ManageableVD,
driver.ExtendVD,
driver.SnapshotVD,
driver.MigrateVD,
driver.ReplicaVD,
driver.ConsistencyGroupVD,
driver.CloneableImageVD,
driver.TransferVD):
"""Unified IBM XIV and DS8K volume driver."""
def __init__(self, *args, **kwargs):
"""Initialize the driver."""
super(XIVDS8KDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(xiv_ds8k_opts)
proxy = importutils.import_class(self.configuration.xiv_ds8k_proxy)
# NOTE: All Array specific configurations are prefixed with:
# "xiv_ds8k_array_"
# These additional flags should be specified in the cinder.conf
# preferably in each backend configuration.
self.xiv_ds8k_proxy = proxy(
{
"xiv_ds8k_user": self.configuration.san_login,
"xiv_ds8k_pass": self.configuration.san_password,
"xiv_ds8k_address": self.configuration.san_ip,
"xiv_ds8k_vol_pool": self.configuration.san_clustername,
"xiv_ds8k_connection_type":
self.configuration.xiv_ds8k_connection_type,
"xiv_chap": self.configuration.xiv_chap,
"management_ips": self.configuration.management_ips
},
LOG,
exception,
driver=self)
def do_setup(self, context):
"""Setup and verify IBM XIV and DS8K Storage connection."""
self.xiv_ds8k_proxy.setup(context)
def ensure_export(self, context, volume):
"""Ensure an export."""
return self.xiv_ds8k_proxy.ensure_export(context, volume)
def create_export(self, context, volume, connector):
"""Create an export."""
return self.xiv_ds8k_proxy.create_export(context, volume)
def create_volume(self, volume):
"""Create a volume on the IBM XIV and DS8K Storage system."""
return self.xiv_ds8k_proxy.create_volume(volume)
def delete_volume(self, volume):
"""Delete a volume on the IBM XIV and DS8K Storage system."""
self.xiv_ds8k_proxy.delete_volume(volume)
def remove_export(self, context, volume):
"""Disconnect a volume from an attached instance."""
return self.xiv_ds8k_proxy.remove_export(context, volume)
def initialize_connection(self, volume, connector):
"""Map the created volume."""
return self.xiv_ds8k_proxy.initialize_connection(volume, connector)
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate a connection to a volume."""
return self.xiv_ds8k_proxy.terminate_connection(volume, connector)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot."""
return self.xiv_ds8k_proxy.create_volume_from_snapshot(
volume,
snapshot)
def create_snapshot(self, snapshot):
"""Create a snapshot."""
return self.xiv_ds8k_proxy.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Delete a snapshot."""
return self.xiv_ds8k_proxy.delete_snapshot(snapshot)
def get_volume_stats(self, refresh=False):
"""Get volume stats."""
return self.xiv_ds8k_proxy.get_volume_stats(refresh)
def create_cloned_volume(self, tgt_volume, src_volume):
"""Create Cloned Volume."""
return self.xiv_ds8k_proxy.create_cloned_volume(tgt_volume, src_volume)
def extend_volume(self, volume, new_size):
"""Extend Created Volume."""
self.xiv_ds8k_proxy.extend_volume(volume, new_size)
def migrate_volume(self, context, volume, host):
"""Migrate the volume to the specified host."""
return self.xiv_ds8k_proxy.migrate_volume(context, volume, host)
def manage_existing(self, volume, existing_ref):
"""Brings an existing backend storage object under Cinder management.
existing_ref is passed straight through from the API request's
manage_existing_ref value, and it is up to the driver how this should
be interpreted. It should be sufficient to identify a storage object
that the driver should somehow associate with the newly-created cinder
volume structure.
In the case of XIV, the existing_ref consists of a single field named
'existing_ref' representing the name of the volume on the storage.
There are two ways to do this:
1. Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
2. Place some metadata on the volume, or somewhere in the backend, that
allows other driver requests (e.g. delete, clone, attach, detach...)
to locate the backend storage object when required.
If the existing_ref doesn't make sense, or doesn't refer to an existing
backend storage object, raise a ManageExistingInvalidReference
exception.
The volume may have a volume_type, and the driver can inspect that and
compare against the properties of the referenced backend storage
object. If they are incompatible, raise a
ManageExistingVolumeTypeMismatch, specifying a reason for the failure.
"""
return self.xiv_ds8k_proxy.manage_volume(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing."""
return self.xiv_ds8k_proxy.manage_volume_get_size(volume, existing_ref)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management."""
return self.xiv_ds8k_proxy.unmanage_volume(volume)
def reenable_replication(self, context, volume):
"""Re-enable volume replication. """
return self.xiv_ds8k_proxy.reenable_replication(context, volume)
def get_replication_status(self, context, volume):
"""Return replication status."""
return self.xiv_ds8k_proxy.get_replication_status(context, volume)
def promote_replica(self, context, volume):
"""Promote the replica to be the primary volume."""
return self.xiv_ds8k_proxy.promote_replica(context, volume)
def create_replica_test_volume(self, volume, src_vref):
"""Creates a test replica clone of the specified replicated volume."""
return self.xiv_ds8k_proxy.create_replica_test_volume(volume, src_vref)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
return self.xiv_ds8k_proxy.retype(ctxt, volume, new_type, diff, host)
def create_consistencygroup(self, context, group):
"""Creates a consistency group."""
return self.xiv_ds8k_proxy.create_consistencygroup(context, group)
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group."""
return self.xiv_ds8k_proxy.delete_consistencygroup(
context, group, volumes)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a consistency group snapshot."""
return self.xiv_ds8k_proxy.create_cgsnapshot(
context, cgsnapshot, snapshots)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a consistency group snapshot."""
return self.xiv_ds8k_proxy.delete_cgsnapshot(
context, cgsnapshot, snapshots)
def update_consistencygroup(self, context, group,
add_volumes, remove_volumes):
"""Adds or removes volume(s) to/from an existing consistency group."""
return self.xiv_ds8k_proxy.update_consistencygroup(
context, group, add_volumes, remove_volumes)
def create_consistencygroup_from_src(
self, context, group, volumes, cgsnapshot, snapshots,
source_cg=None, source_vols=None):
"""Creates a consistencygroup from source."""
return self.xiv_ds8k_proxy.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot, snapshots,
source_cg, source_vols)
def replication_disable(self, context, volume):
"""Disable replication on the specified volume."""
return self.xiv_ds8k_proxy.replication_disable(context, volume)
def replication_enable(self, context, volume):
"""Enable replication on a replication capable volume."""
return self.xiv_ds8k_proxy.replication_enable(context, volume)
def list_replication_targets(self, context, volume):
"""Provide a means to obtain replication targets for a volume."""
return self.xiv_ds8k_proxy.list_replication_targets(context, volume)
def replication_failover(self, context, volume, secondary):
"""Force failover to a secondary replication target. """
return self.xiv_ds8k_proxy.replication_failover(
context, volume, secondary)
|
|
#!/usr/bin/env python
"""
Usage example employing Lasagne for digit recognition using the MNIST dataset.
This example is deliberately structured as a long flat file, focusing on how
to use Lasagne, instead of focusing on writing maximally modular and reusable
code. It is used as the foundation for the introductory Lasagne tutorial:
http://lasagne.readthedocs.org/en/latest/user/tutorial.html
More in-depth examples and reproductions of paper results are maintained in
a separate repository: https://github.com/Lasagne/Recipes
"""
from __future__ import print_function
import sys
import os
import time
import numpy as np
import theano
import theano.tensor as T
import lasagne
# ################## Download and prepare the MNIST dataset ##################
# This is just some way of getting the MNIST dataset from an online location
# and loading it into numpy arrays. It doesn't involve Lasagne at all.
def load_dataset():
# We first define a download function, supporting both Python 2 and 3.
if sys.version_info[0] == 2:
from urllib import urlretrieve
else:
from urllib.request import urlretrieve
def download(filename, source='http://yann.lecun.com/exdb/mnist/'):
print("Downloading %s" % filename)
urlretrieve(source + filename, filename)
# We then define functions for loading MNIST images and labels.
# For convenience, they also download the requested files if needed.
import gzip
def load_mnist_images(filename):
if not os.path.exists(filename):
download(filename)
# Read the inputs in Yann LeCun's binary format.
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
# The inputs are vectors now, we reshape them to monochrome 2D images,
# following the shape convention: (examples, channels, rows, columns)
data = data.reshape(-1, 1, 28, 28)
# The inputs come as bytes, we convert them to float32 in range [0,1].
# (Actually to range [0, 255/256], for compatibility to the version
# provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)
return data / np.float32(256)
def load_mnist_labels(filename):
if not os.path.exists(filename):
download(filename)
# Read the labels in Yann LeCun's binary format.
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
# The labels are vectors of integers now, that's exactly what we want.
return data
# We can now download and read the training and test set images and labels.
X_train = load_mnist_images('train-images-idx3-ubyte.gz')
y_train = load_mnist_labels('train-labels-idx1-ubyte.gz')
X_test = load_mnist_images('t10k-images-idx3-ubyte.gz')
y_test = load_mnist_labels('t10k-labels-idx1-ubyte.gz')
import ipdb; ipdb.set_trace()
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
# We just return all the arrays in order, as expected in main().
# (It doesn't matter how we do this as long as we can read them again.)
return X_train, y_train, X_val, y_val, X_test, y_test
# ##################### Build the neural network model #######################
# This script supports three types of models. For each one, we define a
# function that takes a Theano variable representing the input and returns
# the output layer of a neural network model built in Lasagne.
def build_mlp(input_var=None):
# This creates an MLP of two hidden layers of 800 units each, followed by
# a softmax output layer of 10 units. It applies 20% dropout to the input
# data and 50% dropout to the hidden layers.
# Input layer, specifying the expected input shape of the network
# (unspecified batchsize, 1 channel, 28 rows and 28 columns) and
# linking it to the given Theano variable `input_var`, if any:
l_in = lasagne.layers.InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
# Apply 20% dropout to the input data:
l_in_drop = lasagne.layers.DropoutLayer(l_in, p=0.2)
# Add a fully-connected layer of 800 units, using the linear rectifier, and
# initializing weights with Glorot's scheme (which is the default anyway):
l_hid1 = lasagne.layers.DenseLayer(
l_in_drop, num_units=800,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# We'll now add dropout of 50%:
l_hid1_drop = lasagne.layers.DropoutLayer(l_hid1, p=0.5)
# Another 800-unit layer:
l_hid2 = lasagne.layers.DenseLayer(
l_hid1_drop, num_units=800,
nonlinearity=lasagne.nonlinearities.rectify)
# 50% dropout again:
l_hid2_drop = lasagne.layers.DropoutLayer(l_hid2, p=0.5)
# Finally, we'll add the fully-connected output layer, of 10 softmax units:
l_out = lasagne.layers.DenseLayer(
l_hid2_drop, num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
# Each layer is linked to its incoming layer(s), so we only need to pass
# the output layer to give access to a network in Lasagne:
return l_out
def build_custom_mlp(input_var=None, depth=2, width=800, drop_input=.2,
drop_hidden=.5):
# By default, this creates the same network as `build_mlp`, but it can be
# customized with respect to the number and size of hidden layers. This
# mostly showcases how creating a network in Python code can be a lot more
# flexible than a configuration file. Note that to make the code easier,
# all the layers are just called `network` -- there is no need to give them
# different names if all we return is the last one we created anyway; we
# just used different names above for clarity.
# Input layer and dropout (with shortcut `dropout` for `DropoutLayer`):
network = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
input_var=input_var)
if drop_input:
network = lasagne.layers.dropout(network, p=drop_input)
# Hidden layers and dropout:
nonlin = lasagne.nonlinearities.rectify
for _ in range(depth):
network = lasagne.layers.DenseLayer(network, width, nonlinearity=nonlin)
if drop_hidden:
network = lasagne.layers.dropout(network, p=drop_hidden)
# Output layer:
softmax = lasagne.nonlinearities.softmax
network = lasagne.layers.DenseLayer(network, 10, nonlinearity=softmax)
return network
def build_cnn(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
network = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional layers.
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = lasagne.layers.Conv2DLayer(
network, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# Expert note: Lasagne provides alternative convolutional layers that
# override Theano's choice of which implementation to use; for details
# please see http://lasagne.readthedocs.org/en/latest/user/tutorial.html.
# Max-pooling layer of factor 2 in both dimensions:
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = lasagne.layers.Conv2DLayer(
network, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
return network
# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
# ############################## Main program ################################
# Everything else will be handled in our main program now. We could pull out
# more functions to better separate the code, but it wouldn't make it any
# easier to read.
def main(model='mlp', num_epochs=500):
# Load the dataset
print("Loading data...")
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
# Create neural network model (depending on first command line parameter)
print("Building model and compiling functions...")
if model == 'mlp':
network = build_mlp(input_var)
elif model.startswith('custom_mlp:'):
depth, width, drop_in, drop_hid = model.split(':', 1)[1].split(',')
network = build_custom_mlp(input_var, int(depth), int(width),
float(drop_in), float(drop_hid))
elif model == 'cnn':
network = build_cnn(input_var)
else:
print("Unrecognized model type %r." % model)
return
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# We could add some weight decay as well here, see lasagne.regularization.
# Create update expressions for training, i.e., how to modify the
# parameters at each training step. Here, we'll use Stochastic Gradient
# Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=0.01, momentum=0.9)
# Create a loss expression for validation/testing. The crucial difference
# here is that we do a deterministic forward pass through the network,
# disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
# As a bonus, also create an expression for the classification accuracy:
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], loss, updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train, 500, shuffle=True):
inputs, targets = batch
train_err += train_fn(inputs, targets)
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# After training, we compute and print the test error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
# Optionally, you could now dump the network weights to a file like this:
# np.savez('model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
if __name__ == '__main__':
if ('--help' in sys.argv) or ('-h' in sys.argv):
print("Trains a neural network on MNIST using Lasagne.")
print("Usage: %s [MODEL [EPOCHS]]" % sys.argv[0])
print()
print("MODEL: 'mlp' for a simple Multi-Layer Perceptron (MLP),")
print(" 'custom_mlp:DEPTH,WIDTH,DROP_IN,DROP_HID' for an MLP")
print(" with DEPTH hidden layers of WIDTH units, DROP_IN")
print(" input dropout and DROP_HID hidden dropout,")
print(" 'cnn' for a simple Convolutional Neural Network (CNN).")
print("EPOCHS: number of training epochs to perform (default: 500)")
else:
kwargs = {}
if len(sys.argv) > 1:
kwargs['model'] = sys.argv[1]
if len(sys.argv) > 2:
kwargs['num_epochs'] = int(sys.argv[2])
main(**kwargs)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Discrete Cosine Transform ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math as _math
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.ops import math_ops as _math_ops
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.util.tf_export import tf_export
def _validate_dct_arguments(input_tensor, dct_type, n, axis, norm):
"""Checks that DCT/IDCT arguments are compatible and well formed."""
if axis != -1:
raise NotImplementedError("axis must be -1. Got: %s" % axis)
if n is not None and n < 1:
raise ValueError("n should be a positive integer or None")
if dct_type not in (1, 2, 3):
raise ValueError("Only Types I, II and III (I)DCT are supported.")
if dct_type == 1:
if norm == "ortho":
raise ValueError("Normalization is not supported for the Type-I DCT.")
if input_tensor.shape[-1] is not None and input_tensor.shape[-1] < 2:
raise ValueError(
"Type-I DCT requires the dimension to be greater than one.")
if norm not in (None, "ortho"):
raise ValueError(
"Unknown normalization. Expected None or 'ortho', got: %s" % norm)
# TODO(rjryan): Implement `axis` parameter.
@tf_export("signal.dct", v1=["signal.dct", "spectral.dct"])
def dct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disable=redefined-builtin
"""Computes the 1D [Discrete Cosine Transform (DCT)][dct] of `input`.
Currently only Types I, II and III are supported.
Type I is implemented using a length `2N` padded `tf.signal.rfft`.
Type II is implemented using a length `2N` padded `tf.signal.rfft`, as
described here: [Type 2 DCT using 2N FFT padded (Makhoul)](https://dsp.stackexchange.com/a/10606).
Type III is a fairly straightforward inverse of Type II
(i.e. using a length `2N` padded `tf.signal.irfft`).
@compatibility(scipy)
Equivalent to [scipy.fftpack.dct](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html)
for Type-I, Type-II and Type-III DCT.
@end_compatibility
Args:
input: A `[..., samples]` `float32` `Tensor` containing the signals to
take the DCT of.
type: The DCT type to perform. Must be 1, 2 or 3.
n: The length of the transform. If length is less than sequence length,
only the first n elements of the sequence are considered for the DCT.
If n is greater than the sequence length, zeros are padded and then
the DCT is computed as usual.
axis: For future expansion. The axis to compute the DCT along. Must be `-1`.
norm: The normalization to apply. `None` for no normalization or `'ortho'`
for orthonormal normalization.
name: An optional name for the operation.
Returns:
A `[..., samples]` `float32` `Tensor` containing the DCT of `input`.
Raises:
ValueError: If `type` is not `1`, `2` or `3`, `axis` is
not `-1`, `n` is not `None` or greater than 0,
or `norm` is not `None` or `'ortho'`.
ValueError: If `type` is `1` and `norm` is `ortho`.
[dct]: https://en.wikipedia.org/wiki/Discrete_cosine_transform
"""
_validate_dct_arguments(input, type, n, axis, norm)
with _ops.name_scope(name, "dct", [input]):
# We use the RFFT to compute the DCT and TensorFlow only supports float32
# for FFTs at the moment.
input = _ops.convert_to_tensor(input, dtype=_dtypes.float32)
seq_len = (
tensor_shape.dimension_value(input.shape[-1]) or
_array_ops.shape(input)[-1])
if n is not None:
if n <= seq_len:
input = input[..., 0:n]
else:
rank = len(input.shape)
padding = [[0, 0] for i in range(rank)]
padding[rank - 1][1] = n - seq_len
padding = _ops.convert_to_tensor(padding, dtype=_dtypes.int32)
input = _array_ops.pad(input, paddings=padding)
axis_dim = (tensor_shape.dimension_value(input.shape[-1])
or _array_ops.shape(input)[-1])
axis_dim_float = _math_ops.cast(axis_dim, _dtypes.float32)
if type == 1:
dct1_input = _array_ops.concat([input, input[..., -2:0:-1]], axis=-1)
dct1 = _math_ops.real(fft_ops.rfft(dct1_input))
return dct1
if type == 2:
scale = 2.0 * _math_ops.exp(
_math_ops.complex(
0.0, -_math_ops.range(axis_dim_float) * _math.pi * 0.5 /
axis_dim_float))
# TODO(rjryan): Benchmark performance and memory usage of the various
# approaches to computing a DCT via the RFFT.
dct2 = _math_ops.real(
fft_ops.rfft(
input, fft_length=[2 * axis_dim])[..., :axis_dim] * scale)
if norm == "ortho":
n1 = 0.5 * _math_ops.rsqrt(axis_dim_float)
n2 = n1 * _math_ops.sqrt(2.0)
# Use tf.pad to make a vector of [n1, n2, n2, n2, ...].
weights = _array_ops.pad(
_array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]],
constant_values=n2)
dct2 *= weights
return dct2
elif type == 3:
if norm == "ortho":
n1 = _math_ops.sqrt(axis_dim_float)
n2 = n1 * _math_ops.sqrt(0.5)
# Use tf.pad to make a vector of [n1, n2, n2, n2, ...].
weights = _array_ops.pad(
_array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]],
constant_values=n2)
input *= weights
else:
input *= axis_dim_float
scale = 2.0 * _math_ops.exp(
_math_ops.complex(
0.0,
_math_ops.range(axis_dim_float) * _math.pi * 0.5 /
axis_dim_float))
dct3 = _math_ops.real(
fft_ops.irfft(
scale * _math_ops.complex(input, 0.0),
fft_length=[2 * axis_dim]))[..., :axis_dim]
return dct3
# TODO(rjryan): Implement `n` and `axis` parameters.
@tf_export("signal.idct", v1=["signal.idct", "spectral.idct"])
def idct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disable=redefined-builtin
"""Computes the 1D [Inverse Discrete Cosine Transform (DCT)][idct] of `input`.
Currently only Types I, II and III are supported. Type III is the inverse of
Type II, and vice versa.
Note that you must re-normalize by 1/(2n) to obtain an inverse if `norm` is
not `'ortho'`. That is:
`signal == idct(dct(signal)) * 0.5 / signal.shape[-1]`.
When `norm='ortho'`, we have:
`signal == idct(dct(signal, norm='ortho'), norm='ortho')`.
@compatibility(scipy)
Equivalent to [scipy.fftpack.idct](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.idct.html)
for Type-I, Type-II and Type-III DCT.
@end_compatibility
Args:
input: A `[..., samples]` `float32` `Tensor` containing the signals to take
the DCT of.
type: The IDCT type to perform. Must be 1, 2 or 3.
n: For future expansion. The length of the transform. Must be `None`.
axis: For future expansion. The axis to compute the DCT along. Must be `-1`.
norm: The normalization to apply. `None` for no normalization or `'ortho'`
for orthonormal normalization.
name: An optional name for the operation.
Returns:
A `[..., samples]` `float32` `Tensor` containing the IDCT of `input`.
Raises:
ValueError: If `type` is not `1`, `2` or `3`, `n` is not `None, `axis` is
not `-1`, or `norm` is not `None` or `'ortho'`.
[idct]:
https://en.wikipedia.org/wiki/Discrete_cosine_transform#Inverse_transforms
"""
_validate_dct_arguments(input, type, n, axis, norm)
inverse_type = {1: 1, 2: 3, 3: 2}[type]
return dct(input, type=inverse_type, n=n, axis=axis, norm=norm, name=name)
|
|
# -*- coding: utf-8 -*-
import httplib as http
from flask import request
from django.core.exceptions import ValidationError
from framework import forms, status
from framework.auth import cas
from framework.auth.core import get_user, generate_verification_key
from framework.auth.decorators import block_bing_preview, collect_auth, must_be_logged_in
from framework.auth.forms import PasswordForm, SetEmailAndPasswordForm
from framework.auth.signals import user_registered
from framework.auth.utils import validate_email, validate_recaptcha
from framework.exceptions import HTTPError
from framework.flask import redirect # VOL-aware redirect
from framework.sessions import session
from framework.transactions.handlers import no_auto_transaction
from osf.models import AbstractNode, OSFUser, PreprintService
from website import mails, language, settings
from website.notifications.utils import check_if_all_global_subscriptions_are_none
from website.profile import utils as profile_utils
from website.project.decorators import (must_have_permission, must_be_valid_project, must_not_be_registration,
must_be_contributor_or_public, must_be_contributor)
from website.project.model import has_anonymous_link
from website.project.signals import unreg_contributor_added, contributor_added
from website.util import sanitize
from website.util import web_url_for, is_json_request
from website.util.permissions import expand_permissions, ADMIN
from website.util.time import get_timestamp, throttle_period_expired
from website.exceptions import NodeStateError
@collect_auth
@must_be_valid_project(retractions_valid=True)
def get_node_contributors_abbrev(auth, node, **kwargs):
anonymous = has_anonymous_link(node, auth)
formatter = 'surname'
max_count = kwargs.get('max_count', 3)
if 'user_ids' in kwargs:
users = [
OSFUser.load(user_id) for user_id in kwargs['user_ids']
if node.contributor_set.filter(user__guid__guid=user_id).exists()
]
else:
users = node.visible_contributors
if anonymous or not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
contributors = []
n_contributors = len(users)
others_count = ''
for index, user in enumerate(users[:max_count]):
if index == max_count - 1 and len(users) > max_count:
separator = ' &'
others_count = str(n_contributors - 3)
elif index == len(users) - 1:
separator = ''
elif index == len(users) - 2:
separator = ' &'
else:
separator = ','
contributor = user.get_summary(formatter)
contributor['user_id'] = user._primary_key
contributor['separator'] = separator
contributors.append(contributor)
return {
'contributors': contributors,
'others_count': others_count,
}
@collect_auth
@must_be_valid_project(retractions_valid=True)
def get_contributors(auth, node, **kwargs):
# Can set limit to only receive a specified number of contributors in a call to this route
if request.args.get('limit'):
try:
limit = int(request.args['limit'])
except ValueError:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "limit": {}'.format(request.args['limit'])
))
else:
limit = None
anonymous = has_anonymous_link(node, auth)
if anonymous or not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
# Limit is either an int or None:
# if int, contribs list is sliced to specified length
# if None, contribs list is not sliced
contribs = profile_utils.serialize_contributors(
node.visible_contributors[0:limit],
node=node,
)
# Will either return just contributor list or contributor list + 'more' element
if limit:
return {
'contributors': contribs,
'more': max(0, len(node.visible_contributors) - limit)
}
else:
return {'contributors': contribs}
@must_be_logged_in
@must_be_valid_project
def get_contributors_from_parent(auth, node, **kwargs):
parent = node.parent_node
if not parent:
raise HTTPError(http.BAD_REQUEST)
if not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
contribs = [
profile_utils.add_contributor_json(contrib, node=node)
for contrib in parent.contributors if contrib not in node.contributors
]
return {'contributors': contribs}
def deserialize_contributors(node, user_dicts, auth, validate=False):
"""View helper that returns a list of User objects from a list of
serialized users (dicts). The users in the list may be registered or
unregistered users.
e.g. ``[{'id': 'abc123', 'registered': True, 'fullname': ..},
{'id': None, 'registered': False, 'fullname'...},
{'id': '123ab', 'registered': False, 'fullname': ...}]
If a dict represents an unregistered user without an ID, creates a new
unregistered User record.
:param Node node: The node to add contributors to
:param list(dict) user_dicts: List of serialized users in the format above.
:param Auth auth:
:param bool validate: Whether to validate and sanitize fields (if necessary)
"""
# Add the registered contributors
contribs = []
for contrib_dict in user_dicts:
fullname = contrib_dict['fullname']
visible = contrib_dict['visible']
email = contrib_dict.get('email')
if validate is True:
# Validate and sanitize inputs as needed. Email will raise error if invalid.
# TODO Edge case bug: validation and saving are performed in same loop, so all in list
# up to the invalid entry will be saved. (communicate to the user what needs to be retried)
fullname = sanitize.strip_html(fullname)
if not fullname:
raise ValidationError('Full name field cannot be empty')
if email:
validate_email(email) # Will raise a ValidationError if email invalid
if contrib_dict['id']:
contributor = OSFUser.load(contrib_dict['id'])
else:
try:
contributor = OSFUser.create_unregistered(
fullname=fullname,
email=email)
contributor.save()
except ValidationError:
## FIXME: This suppresses an exception if ID not found & new validation fails; get_user will return None
contributor = get_user(email=email)
# Add unclaimed record if necessary
if not contributor.is_registered:
contributor.add_unclaimed_record(node=node, referrer=auth.user,
given_name=fullname,
email=email)
contributor.save()
contribs.append({
'user': contributor,
'visible': visible,
'permissions': expand_permissions(contrib_dict.get('permission'))
})
return contribs
@unreg_contributor_added.connect
def finalize_invitation(node, contributor, auth, email_template='default'):
try:
record = contributor.get_unclaimed_record(node._primary_key)
except ValueError:
pass
else:
if record['email']:
send_claim_email(record['email'], contributor, node, notify=True, email_template=email_template)
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def project_contributors_post(auth, node, **kwargs):
""" Add contributors to a node. """
user_dicts = request.json.get('users')
node_ids = request.json.get('node_ids')
if node._id in node_ids:
node_ids.remove(node._id)
if user_dicts is None or node_ids is None:
raise HTTPError(http.BAD_REQUEST)
# Prepare input data for `Node::add_contributors`
try:
contribs = deserialize_contributors(node, user_dicts, auth=auth, validate=True)
except ValidationError as e:
return {'status': 400, 'message': e.message}, 400
try:
node.add_contributors(contributors=contribs, auth=auth)
except NodeStateError as e:
return {'status': 400, 'message': e.args[0]}, 400
node.save()
# Disconnect listener to avoid multiple invite emails
unreg_contributor_added.disconnect(finalize_invitation)
for child_id in node_ids:
child = AbstractNode.load(child_id)
# Only email unreg users once
try:
child_contribs = deserialize_contributors(
child, user_dicts, auth=auth, validate=True
)
except ValidationError as e:
return {'status': 400, 'message': e.message}, 400
child.add_contributors(contributors=child_contribs, auth=auth)
child.save()
# Reconnect listeners
unreg_contributor_added.connect(finalize_invitation)
return {
'status': 'success',
'contributors': profile_utils.serialize_contributors(
node.visible_contributors,
node=node,
)
}, 201
@no_auto_transaction
@must_be_valid_project # injects project
@must_have_permission(ADMIN)
@must_not_be_registration
def project_manage_contributors(auth, node, **kwargs):
"""Reorder and remove contributors.
:param Auth auth: Consolidated authorization
:param-json list contributors: Ordered list of contributors represented as
dictionaries of the form:
{'id': <id>, 'permission': <One of 'read', 'write', 'admin'>}
:raises: HTTPError(400) if contributors to be removed are not in list
or if no admin users would remain after changes were applied
"""
contributors = request.json.get('contributors')
# Update permissions and order
try:
node.manage_contributors(contributors, auth=auth, save=True)
except (ValueError, NodeStateError) as error:
raise HTTPError(http.BAD_REQUEST, data={'message_long': error.args[0]})
# If user has removed herself from project, alert; redirect to
# node summary if node is public, else to user's dashboard page
if not node.is_contributor(auth.user):
status.push_status_message(
'You have removed yourself as a contributor from this project',
kind='success',
trust=False
)
if node.is_public:
return {'redirectUrl': node.url}
return {'redirectUrl': web_url_for('dashboard')}
# Else if user has revoked her admin permissions, alert and stay on
# current page
if not node.has_permission(auth.user, ADMIN):
status.push_status_message(
'You have removed your administrative privileges for this project',
kind='success',
trust=False
)
# Else stay on current page
return {}
@must_be_valid_project # returns project
@must_be_contributor
@must_not_be_registration
def project_remove_contributor(auth, **kwargs):
"""Remove a contributor from a list of nodes.
:param Auth auth: Consolidated authorization
:raises: HTTPError(400) if contributors to be removed are not in list
or if no admin users would remain after changes were applied
"""
contributor_id = request.get_json()['contributorID']
node_ids = request.get_json()['nodeIDs']
contributor = OSFUser.load(contributor_id)
if contributor is None:
raise HTTPError(http.BAD_REQUEST, data={'message_long': 'Contributor not found.'})
redirect_url = {}
parent_id = node_ids[0]
for node_id in node_ids:
# Update permissions and order
node = AbstractNode.load(node_id)
# Forbidden unless user is removing herself
if not node.has_permission(auth.user, 'admin'):
if auth.user != contributor:
raise HTTPError(http.FORBIDDEN)
if node.visible_contributors.count() == 1 \
and node.visible_contributors[0] == contributor:
raise HTTPError(http.FORBIDDEN, data={
'message_long': 'Must have at least one bibliographic contributor'
})
nodes_removed = node.remove_contributor(contributor, auth=auth)
# remove_contributor returns false if there is not one admin or visible contributor left after the move.
if not nodes_removed:
raise HTTPError(http.BAD_REQUEST, data={
'message_long': 'Could not remove contributor.'})
# On parent node, if user has removed herself from project, alert; redirect to
# node summary if node is public, else to user's dashboard page
if not node.is_contributor(auth.user) and node_id == parent_id:
status.push_status_message(
'You have removed yourself as a contributor from this project',
kind='success',
trust=False
)
if node.is_public:
redirect_url = {'redirectUrl': node.url}
else:
redirect_url = {'redirectUrl': web_url_for('dashboard')}
return redirect_url
def send_claim_registered_email(claimer, unclaimed_user, node, throttle=24 * 3600):
"""
A registered user claiming the unclaimed user account as an contributor to a project.
Send an email for claiming the account to the referrer and notify the claimer.
:param claimer: the claimer
:param unclaimed_user: the user account to claim
:param node: the project node where the user account is claimed
:param throttle: the time period in seconds before another claim for the account can be made
:return:
:raise: http.BAD_REQUEST
"""
unclaimed_record = unclaimed_user.get_unclaimed_record(node._primary_key)
# check throttle
timestamp = unclaimed_record.get('last_sent')
if not throttle_period_expired(timestamp, throttle):
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='User account can only be claimed with an existing user once every 24 hours'
))
# roll the valid token for each email, thus user cannot change email and approve a different email address
verification_key = generate_verification_key(verification_type='claim')
unclaimed_record['token'] = verification_key['token']
unclaimed_record['expires'] = verification_key['expires']
unclaimed_record['claimer_email'] = claimer.username
unclaimed_user.save()
referrer = OSFUser.load(unclaimed_record['referrer_id'])
claim_url = web_url_for(
'claim_user_registered',
uid=unclaimed_user._primary_key,
pid=node._primary_key,
token=unclaimed_record['token'],
_external=True,
)
# Send mail to referrer, telling them to forward verification link to claimer
mails.send_mail(
referrer.username,
mails.FORWARD_INVITE_REGISTERED,
user=unclaimed_user,
referrer=referrer,
node=node,
claim_url=claim_url,
fullname=unclaimed_record['name'],
)
unclaimed_record['last_sent'] = get_timestamp()
unclaimed_user.save()
# Send mail to claimer, telling them to wait for referrer
mails.send_mail(
claimer.username,
mails.PENDING_VERIFICATION_REGISTERED,
fullname=claimer.fullname,
referrer=referrer,
node=node,
)
def send_claim_email(email, unclaimed_user, node, notify=True, throttle=24 * 3600, email_template='default'):
"""
Unregistered user claiming a user account as an contributor to a project. Send an email for claiming the account.
Either sends to the given email or the referrer's email, depending on the email address provided.
:param str email: The address given in the claim user form
:param User unclaimed_user: The User record to claim.
:param Node node: The node where the user claimed their account.
:param bool notify: If True and an email is sent to the referrer, an email
will also be sent to the invited user about their pending verification.
:param int throttle: Time period (in seconds) after the referrer is
emailed during which the referrer will not be emailed again.
:param str email_template: the email template to use
:return
:raise http.BAD_REQUEST
"""
claimer_email = email.lower().strip()
unclaimed_record = unclaimed_user.get_unclaimed_record(node._primary_key)
referrer = OSFUser.load(unclaimed_record['referrer_id'])
claim_url = unclaimed_user.get_claim_url(node._primary_key, external=True)
# Option 1:
# When adding the contributor, the referrer provides both name and email.
# The given email is the same provided by user, just send to that email.
preprint_provider = None
if unclaimed_record.get('email') == claimer_email:
# check email template for branded preprints
if email_template == 'preprint':
email_template, preprint_provider = find_preprint_provider(node)
if not email_template or not preprint_provider:
return
mail_tpl = getattr(mails, 'INVITE_PREPRINT')(email_template, preprint_provider)
else:
mail_tpl = getattr(mails, 'INVITE_DEFAULT'.format(email_template.upper()))
to_addr = claimer_email
unclaimed_record['claimer_email'] = claimer_email
unclaimed_user.save()
# Option 2:
# TODO: [new improvement ticket] this option is disabled from preprint but still available on the project page
# When adding the contributor, the referred only provides the name.
# The account is later claimed by some one who provides the email.
# Send email to the referrer and ask her/him to forward the email to the user.
else:
# check throttle
timestamp = unclaimed_record.get('last_sent')
if not throttle_period_expired(timestamp, throttle):
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='User account can only be claimed with an existing user once every 24 hours'
))
# roll the valid token for each email, thus user cannot change email and approve a different email address
verification_key = generate_verification_key(verification_type='claim')
unclaimed_record['last_sent'] = get_timestamp()
unclaimed_record['token'] = verification_key['token']
unclaimed_record['expires'] = verification_key['expires']
unclaimed_record['claimer_email'] = claimer_email
unclaimed_user.save()
claim_url = unclaimed_user.get_claim_url(node._primary_key, external=True)
# send an email to the invited user without `claim_url`
if notify:
pending_mail = mails.PENDING_VERIFICATION
mails.send_mail(
claimer_email,
pending_mail,
user=unclaimed_user,
referrer=referrer,
fullname=unclaimed_record['name'],
node=node
)
mail_tpl = mails.FORWARD_INVITE
to_addr = referrer.username
# Send an email to the claimer (Option 1) or to the referrer (Option 2) with `claim_url`
mails.send_mail(
to_addr,
mail_tpl,
user=unclaimed_user,
referrer=referrer,
node=node,
claim_url=claim_url,
email=claimer_email,
fullname=unclaimed_record['name'],
branded_service=preprint_provider
)
return to_addr
@contributor_added.connect
def notify_added_contributor(node, contributor, auth=None, throttle=None, email_template='default'):
if email_template == 'false':
return
throttle = throttle or settings.CONTRIBUTOR_ADDED_EMAIL_THROTTLE
# Email users for projects, or for components where they are not contributors on the parent node.
if contributor.is_registered and \
(not node.parent_node or (node.parent_node and not node.parent_node.is_contributor(contributor))):
preprint_provider = None
if email_template == 'preprint':
email_template, preprint_provider = find_preprint_provider(node)
if not email_template or not preprint_provider:
return
email_template = getattr(mails, 'CONTRIBUTOR_ADDED_PREPRINT')(email_template, preprint_provider)
elif node.is_preprint:
email_template = getattr(mails, 'CONTRIBUTOR_ADDED_PREPRINT_NODE_FROM_OSF'.format(email_template.upper()))
else:
email_template = getattr(mails, 'CONTRIBUTOR_ADDED_DEFAULT'.format(email_template.upper()))
contributor_record = contributor.contributor_added_email_records.get(node._id, {})
if contributor_record:
timestamp = contributor_record.get('last_sent', None)
if timestamp:
if not throttle_period_expired(timestamp, throttle):
return
else:
contributor.contributor_added_email_records[node._id] = {}
mails.send_mail(
contributor.username,
email_template,
user=contributor,
node=node,
referrer_name=auth.user.fullname if auth else '',
all_global_subscriptions_none=check_if_all_global_subscriptions_are_none(contributor),
branded_service=preprint_provider
)
contributor.contributor_added_email_records[node._id]['last_sent'] = get_timestamp()
contributor.save()
elif not contributor.is_registered:
unreg_contributor_added.send(node, contributor=contributor, auth=auth, email_template=email_template)
def find_preprint_provider(node):
"""
Given a node, find the preprint and the service provider.
:param node: the node to which a contributer or preprint author is added
:return: tuple containing the type of email template (osf or branded) and the preprint provider
"""
try:
preprint = PreprintService.objects.get(node=node)
provider = preprint.provider
email_template = 'osf' if provider._id == 'osf' else 'branded'
return email_template, provider
except PreprintService.DoesNotExist:
return None, None
def verify_claim_token(user, token, pid):
"""View helper that checks that a claim token for a given user and node ID
is valid. If not valid, throws an error with custom error messages.
"""
# if token is invalid, throw an error
if not user.verify_claim_token(token=token, project_id=pid):
if user.is_registered:
error_data = {
'message_short': 'User has already been claimed.',
'message_long': 'Please <a href="/login/">log in</a> to continue.'}
raise HTTPError(400, data=error_data)
else:
return False
return True
@block_bing_preview
@collect_auth
@must_be_valid_project
def claim_user_registered(auth, node, **kwargs):
"""
View that prompts user to enter their password in order to claim being a contributor on a project.
A user must be logged in.
"""
current_user = auth.user
sign_out_url = web_url_for('auth_register', logout=True, next=request.url)
if not current_user:
return redirect(sign_out_url)
# Logged in user should not be a contributor the project
if node.is_contributor(current_user):
logout_url = web_url_for('auth_logout', redirect_url=request.url)
data = {
'message_short': 'Already a contributor',
'message_long': ('The logged-in user is already a contributor to this '
'project. Would you like to <a href="{}">log out</a>?').format(logout_url)
}
raise HTTPError(http.BAD_REQUEST, data=data)
uid, pid, token = kwargs['uid'], kwargs['pid'], kwargs['token']
unreg_user = OSFUser.load(uid)
if not verify_claim_token(unreg_user, token, pid=node._primary_key):
error_data = {
'message_short': 'Invalid url.',
'message_long': 'The token in the URL is invalid or has expired.'
}
raise HTTPError(http.BAD_REQUEST, data=error_data)
# Store the unreg_user data on the session in case the user registers
# a new account
session.data['unreg_user'] = {
'uid': uid, 'pid': pid, 'token': token
}
session.save()
form = PasswordForm(request.form)
if request.method == 'POST':
if form.validate():
if current_user.check_password(form.password.data):
node.replace_contributor(old=unreg_user, new=current_user)
node.save()
status.push_status_message(
'You are now a contributor to this project.',
kind='success',
trust=False
)
return redirect(node.url)
else:
status.push_status_message(language.LOGIN_FAILED, kind='warning', trust=False)
else:
forms.push_errors_to_status(form.errors)
if is_json_request():
form_ret = forms.utils.jsonify(form)
user_ret = profile_utils.serialize_user(current_user, full=False)
else:
form_ret = form
user_ret = current_user
return {
'form': form_ret,
'user': user_ret,
'signOutUrl': sign_out_url
}
@user_registered.connect
def replace_unclaimed_user_with_registered(user):
"""Listens for the user_registered signal. If unreg_user is stored in the
session, then the current user is trying to claim themselves as a contributor.
Replaces the old, unregistered contributor with the newly registered
account.
"""
unreg_user_info = session.data.get('unreg_user')
if unreg_user_info:
unreg_user = OSFUser.load(unreg_user_info['uid'])
pid = unreg_user_info['pid']
node = AbstractNode.load(pid)
node.replace_contributor(old=unreg_user, new=user)
node.save()
status.push_status_message(
'Successfully claimed contributor.', kind='success', trust=False)
@block_bing_preview
@collect_auth
def claim_user_form(auth, **kwargs):
"""
View for rendering the set password page for a claimed user.
Must have ``token`` as a querystring argument.
Renders the set password form, validates it, and sets the user's password.
HTTP Method: GET, POST
"""
uid, pid = kwargs['uid'], kwargs['pid']
token = request.form.get('token') or request.args.get('token')
user = OSFUser.load(uid)
# If unregistered user is not in database, or url bears an invalid token raise HTTP 400 error
if not user or not verify_claim_token(user, token, pid):
error_data = {
'message_short': 'Invalid url.',
'message_long': 'Claim user does not exists, the token in the URL is invalid or has expired.'
}
raise HTTPError(http.BAD_REQUEST, data=error_data)
# If user is logged in, redirect to 're-enter password' page
if auth.logged_in:
return redirect(web_url_for('claim_user_registered',
uid=uid, pid=pid, token=token))
unclaimed_record = user.unclaimed_records[pid]
user.fullname = unclaimed_record['name']
user.update_guessed_names()
# The email can be the original referrer email if no claimer email has been specified.
claimer_email = unclaimed_record.get('claimer_email') or unclaimed_record.get('email')
# If there is a registered user with this email, redirect to 're-enter password' page
try:
user_from_email = OSFUser.objects.get(emails__address=claimer_email.lower().strip()) if claimer_email else None
except OSFUser.DoesNotExist:
user_from_email = None
if user_from_email and user_from_email.is_registered:
return redirect(web_url_for('claim_user_registered', uid=uid, pid=pid, token=token))
form = SetEmailAndPasswordForm(request.form, token=token)
if request.method == 'POST':
if not form.validate():
forms.push_errors_to_status(form.errors)
elif settings.RECAPTCHA_SITE_KEY and not validate_recaptcha(request.form.get('g-recaptcha-response'), remote_ip=request.remote_addr):
status.push_status_message('Invalid captcha supplied.', kind='error')
else:
username, password = claimer_email, form.password.data
if not username:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='No email associated with this account. Please claim this '
'account on the project to which you were invited.'
))
user.register(username=username, password=password)
# Clear unclaimed records
user.unclaimed_records = {}
user.verification_key = generate_verification_key()
user.save()
# Authenticate user and redirect to project page
status.push_status_message(language.CLAIMED_CONTRIBUTOR, kind='success', trust=True)
# Redirect to CAS and authenticate the user with a verification key.
return redirect(cas.get_login_url(
web_url_for('resolve_guid', guid=pid, _absolute=True),
username=user.username,
verification_key=user.verification_key
))
return {
'firstname': user.given_name,
'email': claimer_email if claimer_email else '',
'fullname': user.fullname,
'form': forms.utils.jsonify(form) if is_json_request() else form,
}
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def invite_contributor_post(node, **kwargs):
"""API view for inviting an unregistered user. Performs validation, but does not actually invite the user.
Expects JSON arguments with 'fullname' (required) and email (not required).
"""
fullname = request.json.get('fullname').strip()
email = request.json.get('email')
# Validate and sanitize inputs as needed. Email will raise error if invalid.
fullname = sanitize.strip_html(fullname)
if email:
email = email.lower().strip()
try:
validate_email(email)
except ValidationError as e:
return {'status': 400, 'message': e.message}, 400
if not fullname:
return {'status': 400, 'message': 'Full name field cannot be empty'}, 400
# Check if email is in the database
user = get_user(email=email)
if user:
if user.is_registered:
msg = 'User is already in database. Please go back and try your search again.'
return {'status': 400, 'message': msg}, 400
elif node.is_contributor(user):
msg = 'User with this email address is already a contributor to this project.'
return {'status': 400, 'message': msg}, 400
elif not user.is_confirmed:
serialized = profile_utils.serialize_unregistered(fullname, email)
else:
serialized = profile_utils.add_contributor_json(user)
# use correct display name
serialized['fullname'] = fullname
serialized['email'] = email
else:
# Create a placeholder
serialized = profile_utils.serialize_unregistered(fullname, email)
return {'status': 'success', 'contributor': serialized}
@must_be_contributor_or_public
def claim_user_post(node, **kwargs):
"""
View for claiming a user from the X-editable form on a project page.
:param node: the project node
:return:
"""
request_data = request.json
# The unclaimed user
unclaimed_user = OSFUser.load(request_data['pk'])
unclaimed_data = unclaimed_user.get_unclaimed_record(node._primary_key)
# Claimer is not logged in and submit her/his email through X-editable, stored in `request_data['value']`
if 'value' in request_data:
email = request_data['value'].lower().strip()
claimer = get_user(email=email)
# registered user
if claimer and claimer.is_registered:
send_claim_registered_email(claimer, unclaimed_user, node)
# unregistered user
else:
send_claim_email(email, unclaimed_user, node, notify=True)
# Claimer is logged in with confirmed identity stored in `request_data['claimerId']`
elif 'claimerId' in request_data:
claimer_id = request_data['claimerId']
claimer = OSFUser.load(claimer_id)
send_claim_registered_email(claimer, unclaimed_user, node)
email = claimer.username
else:
raise HTTPError(http.BAD_REQUEST)
return {
'status': 'success',
'email': email,
'fullname': unclaimed_data['name']
}
|
|
"""
Evennia menu system.
Contribution - Griatch 2011
This module offers the ability for admins to let their game be fully
or partly menu-driven. Menu choices can be numbered or use arbitrary
keys. There are also some formatting options, such a putting options
in one or more collumns.
The menu system consists of a MenuTree object populated by MenuNode
objects. Nodes are linked together with automatically created commands
so the player may select and traverse the menu. Each node can display
text and show options, but also execute arbitrary code to act on the
system and the calling object when they are selected.
There is also a simple Yes/No function supplied. This will create a
one-off Yes/No question and executes a given code depending on which
choice was made.
To test, make sure to follow the instructions in
game/gamesrc/commands/examples/cmdset.py (copy the template up one level
and change settings to point to the relevant cmdsets within). If you
already have such a module, you can of course use that. Next you
import and add the CmdTestMenu command to the end of the default cmdset in
this custom module.
The test command is also a good example of how to use this module in code.
"""
from ev import syscmdkeys
from ev import Command, CmdSet, utils
from ev import default_cmds
# imported only to make it available during execution of code blocks
import ev
CMD_NOMATCH = syscmdkeys.CMD_NOMATCH
CMD_NOINPUT = syscmdkeys.CMD_NOINPUT
#
# Commands used by the Menu system
#
class CmdMenuNode(Command):
"""
Parent for menu selection commands.
"""
key = "selection"
aliases = []
locks = "cmd:all()"
help_category = "Menu"
menutree = None
code = None
def func(self):
"Execute a selection"
if self.code:
try:
exec(self.code)
except Exception, e:
self.caller.msg("%s\n{rThere was an error with this selection.{n" % e)
else:
self.caller.msg("{rThis option is not available.{n")
class CmdMenuLook(default_cmds.CmdLook):
"""
ooc look
Usage:
look
This is a Menu version of the look command. It will normally show
the options available, otherwise works like the normal look
command..
"""
key = "look"
aliases = ["l", "ls"]
locks = "cmd:all()"
help_cateogory = "General"
def func(self):
"implement the menu look command"
if self.caller.db._menu_data:
# if we have menu data, try to use that.
lookstring = self.caller.db._menu_data.get("look", None)
if lookstring:
self.caller.msg(lookstring)
return
# otherwise we use normal look
super(CmdMenuLook, self).func()
class CmdMenuHelp(default_cmds.CmdHelp):
"""
help
Usage:
help
Get help specific to the menu, if available. If not,
works like the normal help command.
"""
key = "help"
aliases = "h"
locks = "cmd:all()"
help_category = "Menu"
def func(self):
"implement the menu help command"
if self.caller.db._menu_data:
# if we have menu data, try to use that.
lookstring = self.caller.db._menu_data.get("help", None)
if lookstring:
self.caller.msg(lookstring)
return
# otherwise we use normal help
super(CmdMenuHelp, self).func()
class MenuCmdSet(CmdSet):
"""
Cmdset for the menu. Will replace all other commands.
This always has a few basic commands available.
Note that you must always supply a way to exit the
cmdset manually!
"""
key = "menucmdset"
priority = 1
mergetype = "Replace"
def at_cmdset_creation(self):
"populate cmdset"
pass
#
# Menu Node system
#
class MenuTree(object):
"""
The menu tree object holds the full menu structure consisting of
MenuNodes. Each node is identified by a unique key. The tree
allows for traversal of nodes as well as entering and exiting the
tree as needed. For safety, being in a menu will not survive a
server reboot.
A menutree have two special node keys given by 'startnode' and
'endnode' arguments. The startnode is where the user will start
upon first entering the menu. The endnode need not actually
exist, the moment it is linked to and that link is used, the menu
will be exited and cleanups run. The default keys for these are
'START' and 'END' respectively.
"""
def __init__(self, caller, nodes=None,
startnode="START", endnode="END", exec_end="look"):
"""
We specify startnode/endnode so that the system knows where to
enter and where to exit the menu tree. If nodes is given, it
shuld be a list of valid node objects to add to the tree.
exec_end - if not None, will execute the given command string
directly after the menu system has been exited.
"""
self.tree = {}
self.startnode = startnode
self.endnode = endnode
self.exec_end = exec_end
self.caller = caller
if nodes and utils.is_iter(nodes):
for node in nodes:
self.add(node)
def start(self):
"""
Initialize the menu
"""
self.goto(self.startnode)
def add(self, menunode):
"""
Add a menu node object to the tree. Each node itself keeps
track of which nodes it is connected to.
"""
menunode.init(self)
self.tree[menunode.key] = menunode
def goto(self, key):
"""
Go to a key in the tree. This sets up the cmdsets on the
caller so that they match the choices in that node.
"""
if key == self.endnode:
# if we was given the END node key, we clean up immediately.
self.caller.cmdset.delete("menucmdset")
del self.caller.db._menu_data
if self.exec_end is not None:
self.caller.execute_cmd(self.exec_end)
return
# not exiting, look for a valid code.
node = self.tree.get(key, None)
if node:
if node.code:
# Execute eventual code active on this
# node. self.caller is available at this point.
try:
exec(node.code)
except Exception:
self.caller.msg("{rCode could not be executed for node %s. Continuing anyway.{n" % key)
# clean old menu cmdset and replace with the new one
self.caller.cmdset.delete("menucmdset")
self.caller.cmdset.add(node.cmdset)
# set the menu flag data for the default commands
self.caller.db._menu_data = {"help": node.helptext,
"look": str(node.text)}
# display the node
self.caller.msg(node.text)
else:
self.caller.msg("{rMenu node '%s' does not exist - maybe it's not created yet..{n" % key)
class MenuNode(object):
"""
This represents a node in a menu tree. The node will display its
textual content and offer menu links to other nodes (the relevant
commands are created automatically)
"""
def __init__(self, key, text="", links=None, linktexts=None,
keywords=None, cols=1, helptext=None,
selectcmds=None, code="", nodefaultcmds=False, separator=""):
"""
key - the unique identifier of this node.
text - is the text that will be displayed at top when viewing this
node.
links - a list of keys for unique menunodes this is connected to.
The actual keys will not printed - keywords will be used
(or a number)
linktexts - an optional list of texts to describe the links. Must
match link list if defined. Entries can be None to not
generate any extra text for a particular link.
keywords - an optional list of unique keys for choosing links. Must
match links list. If not given, index numbers will be used.
Also individual list entries can be None and will be replaed
by indices. If CMD_NOMATCH or CMD_NOENTRY, no text will be
generated to indicate the option exists.
cols - how many columns to use for displaying options.
helptext - if defined, this is shown when using the help command
instead of the normal help index.
selectcmds- a list of custom cmdclasses for handling each option.
Must match links list, but some entries may be set to None
to use default menu cmds. The given command's key will be
used for the menu list entry unless it's CMD_NOMATCH or
CMD_NOENTRY, in which case no text will be generated. These
commands have access to self.menutree and so can be used to
select nodes.
code - functional code. This will be executed just before this
node is loaded (i.e. as soon after it's been selected from
another node). self.caller is available to call from this
code block, as well as ev.
nodefaultcmds - if true, don't offer the default help and look commands
in the node
separator - this string will be put on the line between menu nodes5B.
"""
self.key = key
self.cmdset = None
self.links = links
self.linktexts = linktexts
self.keywords = keywords
self.cols = cols
self.selectcmds = selectcmds
self.code = code
self.nodefaultcmds = nodefaultcmds
self.separator = separator
Nlinks = len(self.links)
# validate the input
if not self.links:
self.links = []
if not self.linktexts or (len(self.linktexts) != Nlinks):
self.linktexts = [None for i in range(Nlinks)]
if not self.keywords or (len(self.keywords) != Nlinks):
self.keywords = [None for i in range(Nlinks)]
if not selectcmds or (len(self.selectcmds) != Nlinks):
self.selectcmds = [None for i in range(Nlinks)]
# Format default text for the menu-help command
if not helptext:
helptext = "Select one of the valid options ("
for i in range(Nlinks):
if self.keywords[i]:
if self.keywords[i] not in (CMD_NOMATCH, CMD_NOINPUT):
helptext += "%s, " % self.keywords[i]
else:
helptext += "%s, " % (i + 1)
helptext = helptext.rstrip(", ") + ")"
self.helptext = helptext
# Format text display
string = ""
if text:
string += "%s\n" % text
# format the choices into as many collumns as specified
choices = []
for ilink, link in enumerate(self.links):
choice = ""
if self.keywords[ilink]:
if self.keywords[ilink] not in (CMD_NOMATCH, CMD_NOINPUT):
choice += "{g%s{n" % self.keywords[ilink]
else:
choice += "{g %i{n" % (ilink + 1)
if self.linktexts[ilink]:
choice += " - %s" % self.linktexts[ilink]
choices.append(choice)
cols = [[] for i in range(min(len(choices), cols))]
while True:
for i in range(len(cols)):
if not choices:
cols[i].append("")
else:
cols[i].append(choices.pop(0))
if not choices:
break
ftable = utils.format_table(cols)
for row in ftable:
string += "\n" + "".join(row)
# store text
self.text = self.separator + "\n" + string.rstrip()
def init(self, menutree):
"""
Called by menu tree. Initializes the commands needed by
the menutree structure.
"""
# Create the relevant cmdset
self.cmdset = MenuCmdSet()
if not self.nodefaultcmds:
# add default menu commands
self.cmdset.add(CmdMenuLook())
self.cmdset.add(CmdMenuHelp())
for i, link in enumerate(self.links):
if self.selectcmds[i]:
cmd = self.selectcmds[i]()
else:
cmd = CmdMenuNode()
cmd.key = str(i + 1)
# this is the operable command, it moves us to the next node.
cmd.code = "self.menutree.goto('%s')" % link
# also custom commands get access to the menutree.
cmd.menutree = menutree
if self.keywords[i] and cmd.key not in (CMD_NOMATCH, CMD_NOINPUT):
cmd.aliases = [self.keywords[i]]
self.cmdset.add(cmd)
def __str__(self):
"Returns the string representation."
return self.text
#
# A simple yes/no question. Call this from a command to give object
# a cmdset where they may say yes or no to a question. Does not
# make use the node system since there is only one level of choice.
#
def prompt_yesno(caller, question="", yescode="", nocode="", default="N"):
"""
This sets up a simple yes/no questionnaire. Question will
be asked, followed by a Y/[N] prompt where the [x] signifies
the default selection.
"""
# creating and defining commands
cmdyes = CmdMenuNode()
cmdyes.key = "yes"
cmdyes.aliases = ["y"]
# this will be executed in the context of the yes command (so
# self.caller will be available)
cmdyes.code = yescode + "\nself.caller.cmdset.delete('menucmdset')\ndel self.caller.db._menu_data"
cmdno = CmdMenuNode()
cmdno.key = "no"
cmdno.aliases = ["n"]
# this will be executed in the context of the no command
cmdno.code = nocode + "\nself.caller.cmdset.delete('menucmdset')\ndel self.caller.db._menu_data"
errorcmd = CmdMenuNode()
errorcmd.key = CMD_NOMATCH
errorcmd.code = "self.caller.msg('Please choose either Yes or No.')"
defaultcmd = CmdMenuNode()
defaultcmd.key = CMD_NOINPUT
defaultcmd.code = "self.caller.execute_cmd('%s')" % default
# creating cmdset (this will already have look/help commands)
yesnocmdset = MenuCmdSet()
yesnocmdset.add(cmdyes)
yesnocmdset.add(cmdno)
yesnocmdset.add(errorcmd)
yesnocmdset.add(defaultcmd)
# assinging menu data flags to caller.
caller.db._menu_data = {"help": "Please select Yes or No.",
"look": "Please select Yes or No."}
# assign cmdset and ask question
caller.cmdset.add(yesnocmdset)
if default == "Y":
prompt = "[Y]/N"
else:
prompt = "Y/[N]"
prompt = "%s %s: " % (question, prompt)
caller.msg(prompt)
#
# Menu command test
#
class CmdMenuTest(Command):
"""
testing menu module
Usage:
menu
menu yesno
This will test the menu system. The normal operation will produce
a small menu tree you can move around in. The 'yesno' option will
instead show a one-time yes/no question.
"""
key = "menu"
locks = "cmd:all()"
help_category = "Menu"
def func(self):
"Testing the menu system"
if not self.args or self.args != "yesno":
# testing the full menu-tree system
node0 = MenuNode("START", text="Start node. Select one of the links below. Here the links are ordered in one column.",
links=["node1", "node2", "END"], linktexts=["Goto first node", "Goto second node", "Quit"])
node1 = MenuNode("node1", text="First node. This node shows letters instead of numbers for the choices.",
links=["END", "START"], linktexts=["Quit", "Back to start"], keywords=["q","b"])
node2 = MenuNode("node2", text="Second node. This node lists choices in two columns.",
links=["node3", "START"], linktexts=["Set an attribute", "Back to start"], cols=2)
node3 = MenuNode("node3", text="Attribute 'menutest' set on you. You can examine it (only works if you are allowed to use the examine command) or remove it. You can also quit and examine it manually.",
links=["node4", "node5", "node2", "END"], linktexts=["Remove attribute", "Examine attribute",
"Back to second node", "Quit menu"], cols=2,
code="self.caller.db.menutest='Testing!'")
node4 = MenuNode("node4", text="Attribute 'menutest' removed again.",
links=["node2"], linktexts=["Back to second node."], cols=2,
code="del self.caller.db.menutest")
node5 = MenuNode("node5", links=["node4", "node2"], linktexts=["Remove attribute", "Back to second node."], cols=2,
code="self.caller.msg('%s/%s = %s' % (self.caller.key, 'menutest', self.caller.db.menutest))")
menu = MenuTree(self.caller, nodes=(node0, node1, node2, node3, node4, node5))
menu.start()
else:
"Testing the yesno question"
prompt_yesno(self.caller, question="Please answer yes or no - Are you the master of this mud or not?",
yescode="self.caller.msg('{gGood for you!{n')",
nocode="self.caller.msg('{GNow you are just being modest ...{n')",
default="N")
|
|
"""
Predictions widget
"""
from collections import OrderedDict, namedtuple
import numpy
from PyQt4 import QtCore, QtGui
import Orange
from Orange.data import ContinuousVariable, DiscreteVariable
from Orange.widgets import widget, gui
from Orange.widgets.settings import Setting
# Input slot for the Predictors channel
PredictorSlot = namedtuple(
"PredictorSlot",
["predictor", # The `Model` instance
"name", # Predictor name
"results"] # Computed prediction results or None.
)
def pname(predictor):
"""Return a predictor name."""
if hasattr(predictor, "name"):
return predictor.name
else:
return type(predictor).__name__
class OWPredictions(widget.OWWidget):
name = "Predictions"
icon = "icons/Predictions.svg"
priority = 200
description = "Displays predictions of models for a particular data set."
inputs = [("Data", Orange.data.Table, "set_data"),
("Predictors", Orange.classification.Model,
"set_predictor", widget.Multiple)]
outputs = [("Predictions", Orange.data.Table),
("Evaluation Results", Orange.evaluation.Results)]
show_attrs = Setting(True)
show_predictions = Setting(True)
show_probabilities = Setting(True)
def __init__(self):
super().__init__()
box = gui.widgetBox(self.controlArea, "Info")
self.infolabel = gui.widgetLabel(
box, "No data on input\nPredictors: 0\nTask: N/A")
self.infolabel.setMinimumWidth(150)
box = gui.widgetBox(self.controlArea, "Output")
self.checkbox_class = gui.checkBox(
box, self, "show_attrs", "Original data", callback=self.commit)
self.checkbox_class = gui.checkBox(
box, self, "show_predictions", "Predictions", callback=self.commit)
self.checkbox_prob = gui.checkBox(
box, self, "show_probabilities", "Probabilities",
callback=self.commit)
QtGui.qApp.processEvents()
QtCore.QTimer.singleShot(0, self.fix_size)
#: input data
self.data = None
#: A dict mapping input ids to PredictorSlot
self.predictors = OrderedDict()
#: A class variable (prediction target)
self.class_var = None
def fix_size(self):
self.adjustSize()
self.setFixedSize(self.size())
def set_data(self, data):
self.data = data
self.invalidate_predictions()
def set_predictor(self, predictor=None, id=None):
if id in self.predictors:
self.predictors[id] = self.predictors[id]._replace(
predictor=predictor, name=pname(predictor), results=None)
else:
self.predictors[id] = \
PredictorSlot(predictor, pname(predictor), None)
if predictor is not None:
self.class_var = predictor.domain.class_var
def handleNewSignals(self):
for inputid, pred in list(self.predictors.items()):
if pred.predictor is None:
del self.predictors[inputid]
elif pred.results is None:
if self.data is not None:
results = self.predict(pred.predictor, self.data)
self.predictors[inputid] = pred._replace(results=results)
if not self.predictors:
self.class_var = None
# Check for prediction target consistency
target_vars = set([p.predictor.domain.class_var
for p in self.predictors.values()])
if len(target_vars) > 1:
self.warning(0, "Inconsistent class variables")
else:
self.warning(0)
# Update the Info box text.
info = []
if self.data is not None:
info.append("Data: {} instances.".format(len(self.data)))
else:
info.append("Data: N/A")
if self.predictors:
info.append("Predictors: {}".format(len(self.predictors)))
else:
info.append("Predictors: N/A")
if self.class_var is not None:
if self.is_discrete(self.class_var):
info.append("Task: Classification")
self.checkbox_class.setEnabled(True)
self.checkbox_prob.setEnabled(True)
else:
info.append("Task: Regression")
self.checkbox_class.setEnabled(False)
self.checkbox_prob.setEnabled(False)
else:
info.append("Task: N/A")
self.infolabel.setText("\n".join(info))
self.commit()
def invalidate_predictions(self):
for inputid, pred in list(self.predictors.items()):
self.predictors[inputid] = pred._replace(results=None)
def commit(self):
if self.data is None or not self.predictors:
self.send("Predictions", None)
self.send("Evaluation Results", None)
return
predictor = next(iter(self.predictors.values())).predictor
class_var = predictor.domain.class_var
classification = self.is_discrete(class_var)
newattrs = []
newcolumns = []
slots = list(self.predictors.values())
if classification:
if self.show_predictions:
mc = [DiscreteVariable(name=p.name, values=class_var.values)
for p in slots]
newattrs.extend(mc)
newcolumns.extend(p.results[0].reshape((-1, 1))
for p in slots)
if self.show_probabilities:
for p in slots:
m = [ContinuousVariable(name="%s(%s)" % (p.name, value))
for value in class_var.values]
newattrs.extend(m)
newcolumns.extend(p.results[1] for p in slots)
else:
# regression
mc = [ContinuousVariable(name=p.name)
for p in self.predictors.values()]
newattrs.extend(mc)
newcolumns.extend(p.results[0].reshape((-1, 1))
for p in slots)
if self.show_attrs:
X = [self.data.X]
attrs = list(self.data.domain.attributes) + newattrs
else:
X = []
attrs = newattrs
domain = Orange.data.Domain(attrs, self.data.domain.class_var,
metas=self.data.domain.metas)
if newcolumns:
X.extend(numpy.atleast_2d(cols) for cols in newcolumns)
if X:
X = numpy.hstack(tuple(X))
else:
X = numpy.zeros((len(self.data), 0))
predictions = Orange.data.Table.from_numpy(
domain, X, self.data.Y, metas=self.data.metas)
predictions.name = self.data.name
results = None
if self.data.domain.class_var == class_var:
N = len(self.data)
results = Orange.evaluation.Results(self.data, store_data=True)
results.folds = None
results.row_indices = numpy.arange(N)
results.actual = self.data.Y.ravel()
results.predicted = numpy.vstack(
tuple(p.results[0] for p in slots))
if classification:
results.probabilities = numpy.array(
[p.results[1] for p in slots])
results.learner_names = [pname(p.predictor) for p in slots]
self.send("Predictions", predictions)
self.send("Evaluation Results", results)
@classmethod
def predict(cls, predictor, data):
if isinstance(predictor.domain.class_var, DiscreteVariable):
return cls.predict_discrete(predictor, data)
elif isinstance(predictor.domain.class_var, ContinuousVariable):
return cls.predict_continuous(predictor, data)
@staticmethod
def predict_discrete(predictor, data):
return predictor(data, Orange.classification.Model.ValueProbs)
@staticmethod
def predict_continuous(predictor, data):
values = predictor(data, Orange.classification.Model.Value)
return values, [None] * len(data)
@staticmethod
def is_discrete(var):
return isinstance(var, Orange.data.DiscreteVariable)
if __name__ == "__main__":
app = QtGui.QApplication([])
w = OWPredictions()
data = Orange.data.Table("iris")
svm_clf = Orange.classification.SVMLearner(probability=True)(data)
lr_clf = Orange.classification.LogisticRegressionLearner()(data)
w.set_data(data)
w.set_predictor(svm_clf, 0)
w.set_predictor(lr_clf, 1)
w.handleNewSignals()
w.show()
app.exec()
w.saveSettings()
|
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import base64
import os
import sys
if sys.version_info < (3,):
from httplib import (
HTTPSConnection,
HTTPConnection,
HTTP_PORT,
HTTPS_PORT,
)
else:
from http.client import (
HTTPSConnection,
HTTPConnection,
HTTP_PORT,
HTTPS_PORT,
)
from azure.http import HTTPError, HTTPResponse
from azure import _USER_AGENT_STRING
class _HTTPClient(object):
'''
Takes the request and sends it to cloud service and returns the response.
'''
def __init__(self, service_instance, cert_file=None, account_name=None,
account_key=None, service_namespace=None, issuer=None,
protocol='https'):
'''
service_instance: service client instance.
cert_file:
certificate file name/location. This is only used in hosted
service management.
account_name: the storage account.
account_key:
the storage account access key for storage services or servicebus
access key for service bus service.
service_namespace: the service namespace for service bus.
issuer: the issuer for service bus service.
'''
self.service_instance = service_instance
self.status = None
self.respheader = None
self.message = None
self.cert_file = cert_file
self.account_name = account_name
self.account_key = account_key
self.service_namespace = service_namespace
self.issuer = issuer
self.protocol = protocol
self.proxy_host = None
self.proxy_port = None
self.proxy_user = None
self.proxy_password = None
self.use_httplib = self.should_use_httplib()
def should_use_httplib(self):
if sys.platform.lower().startswith('win') and self.cert_file:
# On Windows, auto-detect between Windows Store Certificate
# (winhttp) and OpenSSL .pem certificate file (httplib).
#
# We used to only support certificates installed in the Windows
# Certificate Store.
# cert_file example: CURRENT_USER\my\CertificateName
#
# We now support using an OpenSSL .pem certificate file,
# for a consistent experience across all platforms.
# cert_file example: account\certificate.pem
#
# When using OpenSSL .pem certificate file on Windows, make sure
# you are on CPython 2.7.4 or later.
# If it's not an existing file on disk, then treat it as a path in
# the Windows Certificate Store, which means we can't use httplib.
if not os.path.isfile(self.cert_file):
return False
return True
def set_proxy(self, host, port, user, password):
'''
Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
host: Address of the proxy. Ex: '192.168.0.100'
port: Port of the proxy. Ex: 6000
user: User for proxy authorization.
password: Password for proxy authorization.
'''
self.proxy_host = host
self.proxy_port = port
self.proxy_user = user
self.proxy_password = password
def get_connection(self, request):
''' Create connection for the request. '''
protocol = request.protocol_override \
if request.protocol_override else self.protocol
target_host = request.host
target_port = HTTP_PORT if protocol == 'http' else HTTPS_PORT
if not self.use_httplib:
import azure.http.winhttp
connection = azure.http.winhttp._HTTPConnection(
target_host, cert_file=self.cert_file, protocol=protocol)
proxy_host = self.proxy_host
proxy_port = self.proxy_port
else:
if ':' in target_host:
target_host, _, target_port = target_host.rpartition(':')
if self.proxy_host:
proxy_host = target_host
proxy_port = target_port
host = self.proxy_host
port = self.proxy_port
else:
host = target_host
port = target_port
if protocol == 'http':
connection = HTTPConnection(host, int(port))
else:
connection = HTTPSConnection(
host, int(port), cert_file=self.cert_file)
if self.proxy_host:
headers = None
if self.proxy_user and self.proxy_password:
auth = base64.encodestring(
"{0}:{1}".format(self.proxy_user, self.proxy_password))
headers = {'Proxy-Authorization': 'Basic {0}'.format(auth)}
connection.set_tunnel(proxy_host, int(proxy_port), headers)
return connection
def send_request_headers(self, connection, request_headers):
if self.use_httplib:
if self.proxy_host:
for i in connection._buffer:
if i.startswith("Host: "):
connection._buffer.remove(i)
connection.putheader(
'Host', "{0}:{1}".format(connection._tunnel_host,
connection._tunnel_port))
for name, value in request_headers:
if value:
connection.putheader(name, value)
connection.putheader('User-Agent', _USER_AGENT_STRING)
connection.endheaders()
def send_request_body(self, connection, request_body):
if request_body:
assert isinstance(request_body, bytes)
connection.send(request_body)
elif (not isinstance(connection, HTTPSConnection) and
not isinstance(connection, HTTPConnection)):
connection.send(None)
def perform_request(self, request):
''' Sends request to cloud service server and return the response. '''
connection = self.get_connection(request)
try:
connection.putrequest(request.method, request.path)
if not self.use_httplib:
if self.proxy_host and self.proxy_user:
connection.set_proxy_credentials(
self.proxy_user, self.proxy_password)
self.send_request_headers(connection, request.headers)
self.send_request_body(connection, request.body)
resp = connection.getresponse()
self.status = int(resp.status)
self.message = resp.reason
self.respheader = headers = resp.getheaders()
# for consistency across platforms, make header names lowercase
for i, value in enumerate(headers):
headers[i] = (value[0].lower(), value[1])
respbody = None
if resp.length is None:
respbody = resp.read()
elif resp.length > 0:
respbody = resp.read(resp.length)
response = HTTPResponse(
int(resp.status), resp.reason, headers, respbody)
if self.status >= 300:
raise HTTPError(self.status, self.message,
self.respheader, respbody)
return response
finally:
connection.close()
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training related logic for Keras model in TF 2.0 context.
Note that all the code under this module is under active development, please DO
NOT use it unless you are really sure what you are doing.
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.framework import errors
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras.distribute import distributed_training_utils as dist_utils
from tensorflow.python.keras.engine import data_adapter
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.engine import training_v2_utils
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.profiler import traceme
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
# The list of DataAdapter that support validation_split, only numpy and data
# tensor support validation_split for now.
_ADAPTER_FOR_VALIDATION_SPLIT = [data_adapter.TensorLikeDataAdapter,
data_adapter.GenericArrayLikeDataAdapter]
# The list of DataAdapter that support model._standardize_user_data. Currently
# keras.sequence/python generator will cause error when calling
# model._standardize_user_data, this should be updated in future cl, eg, the
# dataset/generate/sequence input will be peeked and processed by
# model._standardize_user_data()
_ADAPTER_FOR_STANDARDIZE_USER_DATA = [
data_adapter.TensorLikeDataAdapter,
data_adapter.GenericArrayLikeDataAdapter,
data_adapter.CompositeTensorDataAdapter
]
def run_one_epoch(model,
iterator,
execution_function,
dataset_size=None,
batch_size=None,
strategy=None,
steps_per_epoch=None,
num_samples=None,
mode=ModeKeys.TRAIN,
training_context=None,
total_epochs=None):
"""Run the execution function with the data from iterator.
Given the dataset iterator and execution function, get the data from iterator
and call it with the execution function to get the result (metric/loss).
It will run for steps_per_epoch or until to the iterator is fully consumed.
Args:
model: The keras model to run.
iterator: the dataset iterator to fetch the data.
execution_function: a tf.function that can be called with data.
dataset_size: the size of iterator, None when unknown.
batch_size: The size of the current batch.
strategy: the distribution strategy instance from the model.
steps_per_epoch: the number of steps to run for the epoch.
num_samples: the number of samples for the whole epoch if known. This can be
used to calculate the final partial batch, and scale the loss.
mode: the mode for the current epoch.
training_context: the context that contains callbacks and progress bar.
total_epochs: the total number of epochs that will be run.
Used when throw error when the iterator unexpectedly
reaches its end.
Returns:
The loss and metric value from the model.
"""
# Only use the sample to count if there is a partial batch at the end.
use_steps = num_samples is None
if mode == ModeKeys.PREDICT:
aggregator = training_utils.OutputsAggregator(
use_steps=use_steps,
steps=steps_per_epoch,
num_samples=num_samples,
batch_size=batch_size)
else:
aggregator = training_utils.MetricsAggregator(
use_steps=use_steps, steps=steps_per_epoch, num_samples=num_samples)
callbacks = training_context.callbacks
progbar = training_context.progbar
if callbacks.model.stop_training:
return
target_steps = steps_per_epoch or np.inf
step = 0
while step < target_steps:
if use_steps:
current_batch_size = 1
elif step < target_steps - 1:
current_batch_size = batch_size
else:
current_batch_size = num_samples - step * batch_size
with training_context.on_batch(
step=step, mode=mode, size=current_batch_size) as batch_logs:
try:
batch_outs = execution_function(iterator)
except (StopIteration, errors.OutOfRangeError):
# TODO(kaftan): File bug about tf function and errors.OutOfRangeError?
# Are there any other C++ errors tf function should recapture?
# The only acceptable case here is that the input has a unknown
# length, and configured to fully consume it.
if (dataset_size is None
and steps_per_epoch is None
and step > 0):
# The input passed by the user ran out of batches.
# Now we know the cardinality of the input(dataset or generator).
steps_per_epoch = step
aggregator.steps = steps_per_epoch
if mode == ModeKeys.TRAIN:
progbar.params['steps'] = steps_per_epoch
progbar.progbar.target = steps_per_epoch
else:
callbacks.model.stop_training = True
logging.warning(
'Your input ran out of data; interrupting training. '
'Make sure that your dataset or generator can generate at '
'least `steps_per_epoch * epochs` batches (in this case, '
'{} batches). You may need to use the repeat() function '
'when building your dataset.'.format(
total_epochs * steps_per_epoch))
# In either case, break out the loop for training batch.
# Also note the training_context that data inputs are exhausted, so all
# the post batch hooks can be skipped.
batch_logs['data_exhausted'] = True
break
if mode != ModeKeys.PREDICT:
data_batch_size = batch_outs['batch_size']
batch_outs = (batch_outs['total_loss'] + batch_outs['output_losses']
+ batch_outs['metrics'])
if current_batch_size != data_batch_size:
batch_logs['size'] = data_batch_size
current_batch_size = data_batch_size
else:
batch_outs = training_v2_utils._aggregate_predict_results(
strategy, batch_outs, model)
if step == 0:
aggregator.create(batch_outs)
if use_steps:
aggregator.aggregate(batch_outs)
else:
aggregator.aggregate(
batch_outs,
batch_start=step * batch_size,
batch_end=step * batch_size + current_batch_size)
cbks.make_logs(model, batch_logs, batch_outs, mode)
step += 1
if callbacks.model.stop_training:
break
# End of an epoch.
aggregator.finalize()
return aggregator.results
class Loop(training_utils.TrainingLoop):
"""The training loop for the TF 2.0.
This class has some existing assumption for runtime, eg eager by default,
have distribution strategy, etc.
"""
def fit(
self, model, x=None, y=None, batch_size=None, epochs=1, verbose=1,
callbacks=None, validation_split=0., validation_data=None, shuffle=True,
class_weight=None, sample_weight=None, initial_epoch=0,
steps_per_epoch=None, validation_steps=None, validation_freq=1,
max_queue_size=10, workers=1, use_multiprocessing=False, **kwargs):
batch_size = model._validate_or_infer_batch_size(
batch_size, steps_per_epoch, x)
strategy = model.distribute_strategy
batch_size, steps_per_epoch = dist_utils.process_batch_and_step_size(
strategy,
x,
batch_size,
steps_per_epoch,
ModeKeys.TRAIN,
validation_split=validation_split)
dist_utils.validate_callbacks(input_callbacks=callbacks,
optimizer=model.optimizer)
# Enter tf.distribute.Strategy scope.
with strategy.scope():
training_data_adapter, validation_adapter = _process_training_inputs(
model,
x,
y,
batch_size=batch_size,
epochs=epochs,
sample_weights=sample_weight,
class_weights=class_weight,
validation_split=validation_split,
steps_per_epoch=steps_per_epoch,
shuffle=shuffle,
validation_data=validation_data,
validation_steps=validation_steps,
distribution_strategy=strategy,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
total_samples = _get_total_number_of_samples(training_data_adapter)
use_sample = total_samples is not None
do_validation = (validation_adapter is not None)
recreate_training_iterator = (
training_data_adapter.should_recreate_iterator())
if not steps_per_epoch:
# TODO(b/139762795): Add step inference for when steps is None to
# prevent end of sequence warning message.
steps_per_epoch = training_data_adapter.get_size()
# tf.print('{} on {} steps.'.format(ModeKeys.TRAIN, steps_per_epoch))
training_context = TrainingContext()
training_dataset = training_data_adapter.get_dataset()
# Raise an error if steps_per_epoch isn't specified but the dataset
# is infinite.
# TODO(scottzhu): This check should probably happen in the adapter
inferred_steps = training_utils.infer_steps_for_dataset(
model,
training_dataset,
steps_per_epoch,
steps_name='steps_per_epoch',
epochs=0)
steps_per_epoch = (
inferred_steps if steps_per_epoch is None else steps_per_epoch)
training_dataset = strategy.experimental_distribute_dataset(
training_dataset)
training_function = training_v2_utils._get_or_make_execution_function(
model, ModeKeys.TRAIN)
training_data_iter = None
if do_validation:
validation_dataset = validation_adapter.get_dataset()
if not validation_steps:
# Raise an error if validation_steps isn't specified but the
# validation dataset is infinite.
validation_steps = (
validation_adapter.get_size() or
training_utils.infer_steps_for_dataset(
model,
validation_dataset,
validation_steps,
steps_name='validation_steps'))
eval_function = training_v2_utils._get_or_make_execution_function(
model, ModeKeys.TEST)
eval_data_iter = None
validation_dataset = strategy.experimental_distribute_dataset(
validation_dataset)
val_total_samples = _get_total_number_of_samples(validation_adapter)
else:
val_total_samples = None
if verbose and (total_samples or steps_per_epoch):
_print_train_info(total_samples, steps_per_epoch, val_total_samples,
validation_steps)
training_callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=do_validation,
batch_size=batch_size,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
samples=total_samples or steps_per_epoch,
count_mode='samples' if use_sample else 'steps',
verbose=0, # Handle ProgBarLogger separately in this loop.
mode=ModeKeys.TRAIN)
with training_context.on_start(model, training_callbacks, use_sample,
verbose, ModeKeys.TRAIN):
initial_epoch = model._maybe_load_initial_epoch_from_ckpt(
initial_epoch, ModeKeys.TRAIN)
for epoch in range(initial_epoch, epochs):
if training_context.callbacks.model.stop_training:
break
# Training
with training_context.on_epoch(epoch, ModeKeys.TRAIN) as epoch_logs:
model.reset_metrics()
if training_data_iter is None or recreate_training_iterator:
if training_data_iter is not None and ds_context.has_strategy():
# TODO(kaftan): remove this when MultiDeviceIterator is a
## compositetensor (unless this is more efficient)
training_data_iter._initializer # pylint: disable=pointless-statement
else:
training_data_iter = iter(training_dataset)
training_result = run_one_epoch(
model,
training_data_iter,
training_function,
dataset_size=training_data_adapter.get_size(),
batch_size=training_data_adapter.batch_size(),
strategy=strategy,
steps_per_epoch=steps_per_epoch,
num_samples=total_samples,
mode=ModeKeys.TRAIN,
training_context=training_context,
total_epochs=epochs)
cbks.make_logs(model, epoch_logs, training_result, ModeKeys.TRAIN)
# In the case of steps_per_epoch = None, the final cardinality will
# be determined when the inputs are fully consumed (eg dataset or
# generator). Update the steps_per_epoch to the new value.
if (steps_per_epoch is None
and training_context.progbar.progbar.target is not None):
steps_per_epoch = training_context.progbar.progbar.target
# Evaluation
if (do_validation and
training_utils.should_run_validation(validation_freq, epoch) and
not training_callbacks.model.stop_training):
if eval_data_iter is not None and ds_context.has_strategy():
# TODO(kaftan): remove this when MultiDeviceIterator is a
## compositetensor (unless this is more efficient)
eval_data_iter._initializer # pylint: disable=pointless-statement
else:
eval_data_iter = iter(validation_dataset)
validation_callbacks = cbks.configure_callbacks(
training_callbacks,
model,
batch_size=batch_size,
epochs=1,
steps_per_epoch=validation_steps,
samples=val_total_samples or validation_steps,
count_mode='samples' if use_sample else 'steps',
verbose=0, # Handle ProgBarLogger separately in this loop.
mode=ModeKeys.TEST)
eval_context = TrainingContext()
with eval_context.on_start(
model,
validation_callbacks,
use_sample,
verbose=0,
mode=ModeKeys.TEST):
with eval_context.on_epoch(epoch, ModeKeys.TEST):
model.reset_metrics()
eval_result = run_one_epoch(
model,
eval_data_iter,
eval_function,
dataset_size=validation_adapter.get_size(),
batch_size=validation_adapter.batch_size(),
strategy=strategy,
steps_per_epoch=validation_steps,
num_samples=val_total_samples,
mode=ModeKeys.TEST,
training_context=eval_context,
total_epochs=1)
cbks.make_logs(model, epoch_logs, eval_result, ModeKeys.TEST,
prefix='val_')
return model.history
def _model_iteration(
self, model, mode, x=None, y=None, batch_size=None, verbose=1,
sample_weight=None, steps=None, callbacks=None, max_queue_size=10,
workers=1, use_multiprocessing=False, **kwargs):
batch_size = model._validate_or_infer_batch_size(
batch_size, steps, x)
strategy = model.distribute_strategy
batch_size, steps = dist_utils.process_batch_and_step_size(
strategy, x, batch_size, steps, mode)
dist_utils.validate_callbacks(input_callbacks=callbacks,
optimizer=model.optimizer)
# Enter tf.distribute.Strategy scope.
with strategy.scope():
adapter = _process_inputs(
model,
mode,
x,
y,
batch_size=batch_size,
sample_weights=sample_weight,
steps=steps,
distribution_strategy=strategy,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
total_samples = _get_total_number_of_samples(adapter)
use_sample = total_samples is not None
dataset = adapter.get_dataset()
if not steps:
# Raise an error if `steps` isn't specified but the dataset
# is infinite.
steps = adapter.get_size() or training_utils.infer_steps_for_dataset(
model, dataset, steps, steps_name='steps')
# tf.print('{} on {} steps.'.format(ModeKeys.TRAIN, steps_per_epoch))
training_context = TrainingContext()
if training_v2_utils._should_add_batch_index_to_element(strategy, mode):
dataset = training_v2_utils._add_batch_index_to_element(dataset)
dataset = strategy.experimental_distribute_dataset(dataset)
execution_function = training_v2_utils._get_or_make_execution_function(
model, mode)
data_iterator = iter(dataset)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=False,
batch_size=batch_size,
epochs=1,
steps_per_epoch=steps,
samples=total_samples,
count_mode='samples' if use_sample else 'steps',
verbose=0, # Handle ProgBarLogger separately in this loop.
mode=mode)
with training_context.on_start(
model, callbacks, use_sample, verbose, mode):
with training_context.on_epoch(0, mode) as epoch_logs:
model.reset_metrics()
result = run_one_epoch(
model,
data_iterator,
execution_function,
dataset_size=adapter.get_size(),
batch_size=adapter.batch_size(),
strategy=strategy,
steps_per_epoch=steps,
num_samples=total_samples,
mode=mode,
training_context=training_context,
total_epochs=1)
cbks.make_logs(model, epoch_logs, result, mode)
if len(result) == 1:
result = result[0]
return result
def evaluate(
self, model, x=None, y=None, batch_size=None, verbose=1,
sample_weight=None, steps=None, callbacks=None, max_queue_size=10,
workers=1, use_multiprocessing=False, **kwargs):
return self._model_iteration(
model, ModeKeys.TEST, x=x, y=y, batch_size=batch_size, verbose=verbose,
sample_weight=sample_weight, steps=steps, callbacks=callbacks,
max_queue_size=max_queue_size, workers=workers,
use_multiprocessing=use_multiprocessing, **kwargs)
def predict(self, model, x, batch_size=None, verbose=0, steps=None,
callbacks=None, max_queue_size=10, workers=1,
use_multiprocessing=False, **kwargs):
return self._model_iteration(
model, ModeKeys.PREDICT, x=x, batch_size=batch_size, verbose=verbose,
steps=steps, callbacks=callbacks, max_queue_size=max_queue_size,
workers=workers, use_multiprocessing=use_multiprocessing, **kwargs)
def _process_training_inputs(model,
x,
y,
batch_size=None,
epochs=1,
sample_weights=None,
class_weights=None,
steps_per_epoch=None,
validation_split=0.,
validation_data=None,
validation_steps=None,
shuffle=True,
distribution_strategy=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
"""Process the data input for fit() with respect to validation_split."""
if validation_split and 0. < validation_split < 1. and validation_data:
raise ValueError('validation_data and validation_split cannot be used '
'at same time.')
adapter_cls = data_adapter.select_data_adapter(x, y)
# Handle validation_split, we want to split the data and get the training
# section before we give it to data adapter.
if validation_split and 0. < validation_split < 1.:
if adapter_cls not in _ADAPTER_FOR_VALIDATION_SPLIT:
raise ValueError(
'`validation_split` argument is not supported when '
'data adapter is {}. Received: x={}, validation_split={}'.format(
adapter_cls, x, validation_split))
# Retrieve the training section from x and y, and then construct dataset
# from it.
x, y, sample_weights = model._standardize_user_data(
x,
y,
sample_weight=sample_weights,
class_weight=class_weights,
batch_size=batch_size,
check_steps=False,
steps=steps_per_epoch)
(x, y, sample_weights,
val_x, val_y,
val_sample_weights) = training_utils.split_training_and_validation_data(
x, y, sample_weights, validation_split)
sample_weight_modes = [
e.sample_weight_mode for e in model._training_endpoints
]
train_adapter = adapter_cls(
x,
y,
batch_size=batch_size,
steps=steps_per_epoch,
epochs=epochs,
sample_weights=sample_weights,
sample_weight_modes=sample_weight_modes,
shuffle=shuffle,
distribution_strategy=distribution_strategy)
val_adapter = adapter_cls(
val_x,
val_y,
steps=validation_steps,
sample_weights=val_sample_weights,
sample_weight_modes=sample_weight_modes,
batch_size=batch_size,
distribution_strategy=distribution_strategy)
else:
train_adapter = _process_inputs(
model,
ModeKeys.TRAIN,
x,
y,
sample_weights=sample_weights,
batch_size=batch_size,
steps=steps_per_epoch,
epochs=epochs,
class_weights=class_weights,
shuffle=shuffle,
distribution_strategy=distribution_strategy,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
val_adapter = None
if validation_data:
(val_x, val_y,
val_sample_weights) = training_utils.unpack_validation_data(
validation_data, raise_if_ambiguous=False)
# For eval data, we use a representative batch size of the
# training data if batch_size was unknown.
# This is useful for generator/sequence training data input with numpy
# validation data input.
if not batch_size:
batch_size = train_adapter.representative_batch_size()
val_adapter = _process_inputs(
model,
ModeKeys.TEST,
val_x,
val_y,
steps=validation_steps,
sample_weights=val_sample_weights,
batch_size=batch_size,
class_weights=class_weights,
distribution_strategy=distribution_strategy)
elif validation_steps:
raise ValueError('`validation_steps` should not be specified if '
'`validation_data` is None.')
return train_adapter, val_adapter
def _process_inputs(model,
mode,
x,
y,
batch_size=None,
epochs=1,
sample_weights=None,
class_weights=None,
shuffle=False,
steps=None,
distribution_strategy=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
"""Process the inputs for fit/eval/predict()."""
adapter_cls = data_adapter.select_data_adapter(x, y)
standardize = functools.partial(
model._standardize_user_data,
class_weight=class_weights,
batch_size=batch_size,
check_steps=False,
steps=steps)
if adapter_cls in _ADAPTER_FOR_STANDARDIZE_USER_DATA:
standardize_function = None
x, y, sample_weights = standardize(
x, y, sample_weight=sample_weights)
elif adapter_cls is data_adapter.ListsOfScalarsDataAdapter:
standardize_function = standardize
else:
def standardize_function(dataset):
"""Data adapters can standardize when appropriate."""
# First we call _standardize_user_data with the dataset since that has
# enough structure to build the model.
if not model._is_compiled:
# We don't actually care about the values of these attributes, but they
# are only created in compile and are accessed in _standardize_user_data
model._training_endpoints = getattr(model, '_training_endpoints', [])
model.sample_weight_mode = getattr(model, 'sample_weight_mode', None)
standardize(dataset, extract_tensors_from_dataset=False)
# Then we map using only the tensor standardization portion.
def map_fn(x, y=None, sample_weights=None):
"""Tensor manipulation portion of standardization for Dataset.map."""
if (y is None and sample_weights is None):
# namedtuples are forbidden because it is ambiguous if they should be
# unpacked. If y or sample_weights is present then `x` was not the
# top level structure, and the correct behavior is unambiguous.
data_adapter.assert_not_namedtuple(x)
standardized = model._standardize_tensors(
x, y, sample_weights,
run_eagerly=False,
dict_inputs=isinstance(x, dict),
is_dataset=False,
class_weight=class_weights,
batch_size=None)
x, y, sample_weights = nest._list_to_tuple(standardized)
if y is None:
return (x,)
if sample_weights is None:
return x, y
return x, y, sample_weights
return dataset.map(map_fn, num_parallel_calls=dataset_ops.AUTOTUNE)
if mode == ModeKeys.PREDICT:
sample_weight_modes = None
else:
sample_weight_modes = [
e.sample_weight_mode for e in model._training_endpoints
] or model.sample_weight_mode
adapter = adapter_cls(
x,
y,
standardize_function=standardize_function,
batch_size=batch_size,
epochs=epochs,
steps=steps,
sample_weights=sample_weights,
sample_weight_modes=sample_weight_modes,
shuffle=shuffle,
distribution_strategy=distribution_strategy,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
return adapter
def _get_total_number_of_samples(adapter):
if not adapter.get_size() or not adapter.batch_size():
return None
total_sample = adapter.get_size() * adapter.batch_size()
if adapter.has_partial_batch():
total_sample -= (adapter.batch_size() - adapter.partial_batch_size())
return total_sample
def _print_train_info(total_samples, steps, val_total_samples, val_steps):
increment = 'samples' if total_samples else 'steps'
conjunction = 'on' if total_samples else 'for'
msg = 'Train {} {} {}'.format(conjunction, total_samples or steps, increment)
if val_total_samples or val_steps:
increment = 'samples' if val_total_samples else 'steps'
conjunction = 'on' if val_total_samples else 'for'
msg += ', validate {} {} {}'.format(conjunction, val_total_samples or
val_steps, increment)
print(msg)
class TrainingContext(object):
"""Utility object that wrap around callbacks and progress bars."""
@tf_contextlib.contextmanager
def on_start(self, model, callbacks=None, use_samples=False, verbose=0,
mode=ModeKeys.TRAIN):
"""Provide a scope for the whole training process."""
# TODO(omalleyt): Handle ProgBar as part of Callbacks once hooks are ready.
progbar = training_utils.get_progbar(
model, 'samples' if use_samples else 'steps')
progbar.params = callbacks.params
progbar.params['verbose'] = verbose
callbacks.model.stop_training = False
callbacks._call_begin_hook(mode)
progbar.on_train_begin()
# Cache those two instance so that it can be used in other functions.
self.callbacks = callbacks
self.progbar = progbar
try:
yield
model._successful_loop_finish = True
finally:
# End of all epochs
self.callbacks._call_end_hook(mode)
@tf_contextlib.contextmanager
def on_epoch(self, epoch=0, mode=ModeKeys.TRAIN):
"""Provide a scope for running one epoch."""
epoch_logs = {}
if mode == ModeKeys.TRAIN:
self.callbacks.on_epoch_begin(epoch, epoch_logs)
self.progbar.on_epoch_begin(epoch, epoch_logs)
try:
yield epoch_logs
finally:
if mode == ModeKeys.TRAIN:
# Epochs only apply to `fit`.
self.callbacks.on_epoch_end(epoch, epoch_logs)
self.progbar.on_epoch_end(epoch, epoch_logs)
@tf_contextlib.contextmanager
def on_batch(self, step=0, mode=ModeKeys.TRAIN, size=1):
"""Provide a scope for running one batch."""
with traceme.TraceMe(
'TraceContext', graph_type=mode, step_num=step, batch_size=size):
batch_logs = {'batch': step, 'size': size}
self.callbacks._call_batch_hook(
mode, 'begin', step, batch_logs)
self.progbar.on_batch_begin(step, batch_logs)
try:
yield batch_logs
finally:
if not batch_logs.pop('data_exhausted', False):
self.callbacks._call_batch_hook(
mode, 'end', step, batch_logs)
self.progbar.on_batch_end(step, batch_logs)
|
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import datetime
from math import ceil
from flask import jsonify, request
from flask_restplus import inputs
from flexget.api import api, APIResource
from flexget.plugins.api.series import NoResultFound
from flexget.plugins.filter import movie_queue as mq
from flexget.utils import qualities
movie_queue_api = api.namespace('movie_queue', description='Movie Queue operations (DEPRECATED)')
default_error_schema = {
'type': 'object',
'properties': {
'status': {'type': 'string'},
'message': {'type': 'string'}
}
}
default_error_schema = api.schema('default_error_schema', default_error_schema)
empty_response = api.schema('empty', {'type': 'object'})
movie_object = {
'type': 'object',
'properties': {
'added_date': {'type': 'string'},
'is_downloaded': {'type': 'boolean'},
'download_date': {'type': 'string'},
'entry_original_url': {'type': 'string'},
'entry_title': {'type': 'string'},
'entry_url': {'type': 'string'},
'id': {'type': 'integer'},
'imdb_id': {'type': 'string'},
'quality': {'type': 'string'},
'title': {'type': 'string'},
'tmdb_id': {'type': 'string'},
'queue_name': {'type': 'string'}
}
}
movie_object_schema = api.schema('movie_object', movie_object)
movie_queue_schema = {
'type': 'object',
'properties': {
'movies': {
'type': 'array',
'items': movie_object
},
'number_of_movies': {'type': 'integer'},
'total_number_of_pages': {'type': 'integer'},
'page_number': {'type': 'integer'}
}
}
movie_queue_schema = api.schema('list_movie_queue', movie_queue_schema)
movie_queue_parser = api.parser()
movie_queue_parser.add_argument('page', type=int, default=1, help='Page number')
movie_queue_parser.add_argument('max', type=int, default=100, help='Movies per page')
movie_queue_parser.add_argument('queue_name', default='default', help='Filter by movie queue name')
movie_queue_parser.add_argument('is_downloaded', type=inputs.boolean, help='Filter list by movies download status')
movie_queue_parser.add_argument('sort_by', choices=('added', 'is_downloaded', 'id', 'title', 'download_date'),
default='added', help="Sort response by attribute")
movie_queue_parser.add_argument('order', choices=('asc', 'desc'), default='desc', help="Sorting order")
movie_add_input_schema = {
'type': 'object',
'properties': {
'title': {'type': 'string'},
'imdb_id': {'type': 'string', 'pattern': r'tt\d{7}'},
'tmdb_id': {'type': 'integer'},
'quality': {'type': 'string', 'format': 'quality_requirements', 'default': 'any'},
'queue_name': {'type': 'string', 'default': 'default'}
},
'anyOf': [
{'required': ['title']},
{'required': ['imdb_id']},
{'required': ['tmdb_id']}
]
}
movie_add_input_schema = api.schema('movie_add_input_schema', movie_add_input_schema)
movie_edit_input_schema = {
'type': 'object',
'properties': {
'quality': {'type': 'string', 'format': 'quality_requirements'},
'reset_downloaded': {'type': 'boolean', 'default': True}
},
'anyOf': [
{'required': ['quality']},
{'required': ['reset_downloaded']}
]
}
movie_edit_input_schema = api.schema('movie_edit_input_schema', movie_edit_input_schema)
@api.deprecated
@movie_queue_api.route('/')
class MovieQueueAPI(APIResource):
@api.response(404, 'Page does not exist', model=default_error_schema)
@api.response(code_or_apierror=200, model=movie_queue_schema)
@api.doc(parser=movie_queue_parser, description="Get flexget's queued movies")
def get(self, session=None):
""" List queued movies """
args = movie_queue_parser.parse_args()
page = args['page']
max_results = args['max']
downloaded = args['is_downloaded']
sort_by = args['sort_by']
order = args['order']
queue_name = args['queue_name']
# Handles default if it explicitly called
if order == 'desc':
order = True
else:
order = False
raw_movie_queue = mq.queue_get(session=session, downloaded=downloaded, queue_name=queue_name)
converted_movie_queue = [movie.to_dict() for movie in raw_movie_queue]
sorted_movie_list = sorted(converted_movie_queue,
key=lambda movie: movie[sort_by] if movie[sort_by] else datetime.datetime,
reverse=order)
count = len(sorted_movie_list)
pages = int(ceil(count / float(max_results)))
if page > pages and pages != 0:
return {'status': 'error',
'message': 'page %s does not exist' % page}, 404
start = (page - 1) * max_results
finish = start + max_results
if finish > count:
finish = count
movie_items = []
for movie_number in range(start, finish):
movie_items.append(sorted_movie_list[movie_number])
return jsonify({
'movies': movie_items,
'number_of_movies': count,
'page_number': page,
'total_number_of_pages': pages
})
@api.response(500, 'Movie already in queue', model=default_error_schema)
@api.response(201, 'Movie successfully added', model=movie_object_schema)
@api.validate(movie_add_input_schema)
@api.doc(description="Add a movie to flexget's queued movies")
def post(self, session=None):
""" Add movies to movie queue """
kwargs = request.json
kwargs['quality'] = qualities.Requirements(kwargs.get('quality'))
kwargs['session'] = session
try:
movie = mq.queue_add(**kwargs)
except mq.QueueError as e:
reply = {
'status': 'error',
'message': e.message
}
return reply, 500
reply = jsonify(movie)
reply.status_code = 201
return reply
@api.deprecated
@api.response(404, 'ID not found', model=default_error_schema)
@movie_queue_api.route('/<id>/')
@api.doc(params={'id': 'ID of Queued Movie'})
class MovieQueueManageAPI(APIResource):
@api.response(200, 'Movie successfully retrieved', movie_object_schema)
@api.doc(description="Get a specific movie")
def get(self, id, session=None):
""" Returns a movie from queue by ID """
try:
movie = mq.get_movie_by_id(movie_id=id)
except NoResultFound as e:
return {'status': 'error',
'message': 'movie with ID {0} was not found'.format(id)}, 404
return jsonify(movie)
@api.response(200, 'Movie successfully deleted', model=empty_response)
@api.doc(description="Delete a specific movie")
def delete(self, id, session=None):
""" Delete movies from movie queue """
try:
mq.delete_movie_by_id(movie_id=id)
except NoResultFound:
return {'status': 'error',
'message': 'movie with ID {0} was not found'.format(id)}, 404
return {}
@api.response(405, 'Movie not marked as downloaded', model=default_error_schema)
@api.response(200, 'Movie successfully updated', movie_object_schema)
@api.validate(model=movie_edit_input_schema,
description='Values to use when editing existing movie. At least one value should be used')
@api.doc(description="Update a specific movie")
def put(self, id, session=None):
""" Updates movie quality or downloaded state in movie queue """
data = request.json
try:
movie = mq.get_movie_by_id(movie_id=id)
except NoResultFound:
return {'status': 'error',
'message': 'movie with ID {0} was not found'.format(id)}, 404
queue_name = movie.get('queue_name')
if data.get('reset_downloaded'):
try:
movie = mq.queue_forget(movie_id=id, queue_name=queue_name)
except mq.QueueError as e:
if e.errno == 1:
reply = {
'status': 'error',
'message': e.message
}
return reply, 405
else:
reply = {
'status': 'error',
'message': e.message
}
return reply, 404
if data.get('quality'):
try:
movie = mq.queue_edit(quality=data['quality'], movie_id=id, queue_name=queue_name)
except mq.QueueError as e:
reply = {'status': 'error',
'message': e.message}
return reply, 404
if not movie:
return {'status': 'error',
'message': 'Not enough parameters to edit movie data'}, 400
return jsonify(movie)
|
|
# coding=utf-8
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optimizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
from tf_slim.layers import optimizers as optimizers_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
def setUpModule():
tf.disable_eager_execution()
def disable_resource_variables(f):
def wrapper(*args, **kwargs):
variable_scope.disable_resource_variables()
try:
f(*args, **kwargs)
finally:
variable_scope.enable_resource_variables()
return wrapper
def _setup_model():
x = array_ops.placeholder(dtypes.float32, [])
var = variable_scope.get_variable(
"test", [], initializer=init_ops.constant_initializer(10))
loss = math_ops.abs(var * x)
global_step = tf.train.create_global_step()
return x, var, loss, global_step
def _no_op_learning_rate_decay_fn(lr, global_step):
assert lr is not None
assert global_step is not None
return lr
class OptimizersTest(test.TestCase):
def testSGDOptimizer(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1),
lambda lr: gradient_descent.GradientDescentOptimizer(learning_rate=lr),
"Momentum"
]
for optimizer in optimizers:
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss, global_step, learning_rate=0.1, optimizer=optimizer)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertEqual(var_value, 9.5)
self.assertEqual(global_step_value, 1)
def testNoLrCallable(self):
def optimizer_fn():
return gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss, global_step, learning_rate=None, optimizer=optimizer_fn)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertEqual(var_value, 9.5)
self.assertEqual(global_step_value, 1)
def testWrongOptimizer(self):
optimizers = ["blah", variables.Variable, object(), lambda x: None]
for optimizer in optimizers:
with ops.Graph().as_default() as g:
with self.session(graph=g):
_, _, loss, global_step = _setup_model()
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
loss, global_step, learning_rate=0.1, optimizer=optimizer)
def testBadSummaries(self):
with ops.Graph().as_default() as g, self.session(graph=g):
_, _, loss, global_step = _setup_model()
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
loss, global_step, learning_rate=0.1, optimizer="SGD",
summaries=["loss", "bad_summary"])
def testInvalidLoss(self):
with ops.Graph().as_default() as g, self.session(graph=g):
_, _, _, global_step = _setup_model()
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
None, global_step, learning_rate=0.1, optimizer="SGD")
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
[[1.0]], global_step, learning_rate=0.1, optimizer="SGD")
def testInvalidGlobalStep(self):
with ops.Graph().as_default() as g, self.session(graph=g):
x = array_ops.placeholder(dtypes.float32, [])
var = variable_scope.get_variable(
"test", [], initializer=init_ops.constant_initializer(10))
loss = math_ops.abs(var * x)
with self.assertRaises(AttributeError):
optimizers_lib.optimize_loss(
loss,
global_step=constant_op.constant(
43, dtype=dtypes.int64),
learning_rate=0.1,
optimizer="SGD")
with self.assertRaises(TypeError):
optimizers_lib.optimize_loss(
loss,
global_step=variable_scope.get_variable(
"global_step", [],
trainable=False,
dtype=dtypes.float64,
initializer=init_ops.constant_initializer(
0.0, dtype=dtypes.float64)),
learning_rate=0.1,
optimizer="SGD")
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
loss,
global_step=variable_scope.get_variable(
"global_step", [1],
trainable=False,
dtype=dtypes.int64,
initializer=init_ops.constant_initializer(
[0], dtype=dtypes.int64)),
learning_rate=0.1,
optimizer="SGD")
def testInvalidLearningRate(self):
with ops.Graph().as_default() as g, self.session(graph=g):
_, _, loss, global_step = _setup_model()
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
loss, global_step, learning_rate=-0.1, optimizer="SGD")
@disable_resource_variables
def testGradientNoise(self):
random_seed.set_random_seed(42)
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_noise_scale=10.0)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
# Due to randomness the following number may change if graph is different.
self.assertAlmostEqual(var_value, 9.801016, 4)
self.assertEqual(global_step_value, 1)
@disable_resource_variables
def testGradientNoiseWithClipping(self):
random_seed.set_random_seed(42)
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_noise_scale=10.0,
clip_gradients=10.0)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertAlmostEqual(var_value, 9.801016, 4)
self.assertEqual(global_step_value, 1)
def testGradientClip(self):
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
clip_gradients=0.1)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertAlmostEqual(var_value, 9.98999, 4)
self.assertEqual(global_step_value, 1)
def testAdaptiveGradientClip(self):
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
clip_gradients = optimizers_lib.adaptive_clipping_fn()
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
clip_gradients=clip_gradients)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertAlmostEqual(var_value, 9.8916, 4)
self.assertEqual(global_step_value, 1)
var_count = 0
for var in variables.global_variables():
if var.name.startswith("OptimizeLoss/AdaptiveMaxNorm"):
var_count += 1
self.assertEqual(2, var_count)
def testGradientMultiply(self):
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_multipliers={var: 7.})
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
# var(0) = 10, x = 5, var(0)/dx = 5,
# var(1) = var(0) - learning_rate * gradient_multiplier * var(0)/dx
self.assertAlmostEqual(var_value, 6.5, 4)
self.assertEqual(global_step_value, 1)
def testGradientMultiplyInt32Tensor(self):
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
v = array_ops.placeholder(dtypes.float32, [])
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_multipliers={var: v})
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5, v: 7.})
var_value, global_step_value = session.run([var, global_step])
# var(0) = 10, x = 5, var(0)/dx = 5,
# var(1) = var(0) - learning_rate * gradient_multiplier * var(0)/dx
self.assertAlmostEqual(var_value, 6.5, 4)
self.assertEqual(global_step_value, 1)
def testGradientMultiplyInt64Tensor(self):
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
v = array_ops.placeholder(dtypes.float64, [])
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_multipliers={var: v})
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5, v: 7.})
var_value, global_step_value = session.run([var, global_step])
# var(0) = 10, x = 5, var(0)/dx = 5,
# var(1) = var(0) - learning_rate * gradient_multiplier * var(0)/dx
self.assertAlmostEqual(var_value, 6.5, 4)
self.assertEqual(global_step_value, 1)
def testIgnoreVariablesWithNoGradients(self):
_, _, loss, global_step = _setup_model()
unused_variable = variable_scope.get_variable("ignore_me", [])
optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_noise_scale=10.0,
gradient_multipliers={unused_variable: 1.},
clip_gradients=10.0)
def testNoGlobalStep(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g) as session:
x = array_ops.placeholder(dtypes.float32, [])
var = variable_scope.get_variable(
"test", [], initializer=init_ops.constant_initializer(10))
loss = math_ops.abs(var * x)
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step=None,
learning_rate=0.1,
optimizer=optimizer,
update_ops=[update_op])
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
def testNoGlobalStepWithDecay(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g):
x = array_ops.placeholder(dtypes.float32, [])
var = variable_scope.get_variable(
"test", [], initializer=init_ops.constant_initializer(10))
loss = math_ops.abs(var * x)
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
with self.assertRaisesRegexp(
ValueError, "global_step is required for learning_rate_decay_fn"):
optimizers_lib.optimize_loss(
loss,
global_step=None,
learning_rate=0.1,
learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
optimizer=optimizer,
update_ops=[update_op])
def testNoGlobalStepArg(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step=None,
learning_rate=0.1,
optimizer=optimizer,
update_ops=[update_op])
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
self.assertEqual(1, global_step.eval())
def testUpdateOp(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer=optimizer,
update_ops=[update_op])
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
self.assertEqual(1, global_step.eval())
def testUpdateOpNoIncrementGlobalStep(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer=optimizer,
update_ops=[update_op],
increment_global_step=False)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
self.assertEqual(0, global_step.eval())
def testUpdateOpWithNoOpDecay(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
optimizer=optimizer,
update_ops=[update_op])
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
self.assertEqual(1, global_step.eval())
def testUpdateOpFromCollection(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, update_op)
train = optimizers_lib.optimize_loss(
loss, global_step, learning_rate=0.1, optimizer=optimizer)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, update_var_value, global_step_value = session.run(
[var, update_var, global_step])
self.assertEqual(var_value, 9.5)
self.assertEqual(update_var_value, 20)
self.assertEqual(global_step_value, 1)
class AdaptiveClipping(test.TestCase):
def testAverages(self):
with self.cached_session() as session:
scale = 2.
grad = array_ops.ones([3, 4]) * scale
log_norm = np.log(np.sqrt(scale**2 * grad.get_shape().num_elements()))
grads_and_vars = [(grad, grad)]
grads_and_vars = optimizers_lib.adaptive_clipping_fn(
decay=0.5)(grads_and_vars)
var_dict = {}
for var in variables.global_variables():
if var.name.startswith("AdaptiveMaxNorm"):
var_dict[var.name.split(":")[0]] = var
self.assertEqual(2, len(var_dict))
moving_mean = var_dict["AdaptiveMaxNorm/mean"]
moving_sq_mean = var_dict["AdaptiveMaxNorm/sq_mean"]
variables.global_variables_initializer().run()
mean, sq_mean = session.run([moving_mean, moving_sq_mean])
self.assertEqual([0], mean)
self.assertEqual([0], sq_mean)
for i in range(20):
mean, sq_mean, _ = session.run(
[moving_mean, moving_sq_mean, grads_and_vars[0][0]])
if i == 0:
self.assertLess(mean, 0.9 * log_norm)
self.assertLess(sq_mean, 0.9 * log_norm**2)
self.assertAlmostEqual(float(mean), log_norm, places=4)
self.assertAlmostEqual(float(sq_mean), log_norm**2, places=4)
def testClip(self):
with self.cached_session() as session:
spike = 1000.
multiplier = array_ops.placeholder(dtypes.float32, [], "multiplier")
step = array_ops.placeholder(dtypes.int32, [], "step")
grad = array_ops.ones([3, 4]) * multiplier
grads_and_vars = [(grad, grad)]
grads_and_vars = optimizers_lib.adaptive_clipping_fn(
decay=0.9, global_step=step)(grads_and_vars)
variables.global_variables_initializer().run()
def run(scale, i):
return session.run(grads_and_vars[0][0],
feed_dict={multiplier: scale,
step: i})
for i in range(20):
scale = [1., -2.][i % 2]
clipped_grad = run(scale, i)
if i > 3:
self.assertAllClose(np.ones(clipped_grad.shape) * scale, clipped_grad)
# assert that the spike will have low influence.
clipped_grad = run(spike, 20)
self.assertTrue((clipped_grad < 25.).all())
# assert that a repeated spike will converge to this new value.
for i in range(10):
clipped_grad = run(spike, i + 21)
self.assertAllClose(np.ones(clipped_grad.shape) * spike, clipped_grad)
if __name__ == "__main__":
test.main()
|
|
from __future__ import print_function
from unittest import TestCase
import numpy as np
from nose import SkipTest
from nose.tools import assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn import mixture
from sklearn.utils import check_random_state
from hmmlearn import hmm
from hmmlearn.utils import normalize
np.seterr(all='warn')
def fit_hmm_and_monitor_log_likelihood(h, X, lengths=None, n_iter=1):
h.n_iter = 1 # make sure we do a single iteration at a time
h.init_params = '' # and don't re-init params
loglikelihoods = np.empty(n_iter, dtype=float)
for i in range(n_iter):
h.fit(X, lengths=lengths)
loglikelihoods[i] = h.score(X, lengths=lengths)
return loglikelihoods
class GaussianHMMTestMixin(object):
covariance_type = None # set by subclasses
def setUp(self):
self.prng = prng = np.random.RandomState(10)
self.n_components = n_components = 3
self.n_features = n_features = 3
self.startprob = prng.rand(n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = prng.rand(n_components, n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, n_components))
self.means = prng.randint(-20, 20, (n_components, n_features))
self.covars = {
'spherical': (1.0 + 2 * np.dot(prng.rand(n_components, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)),
'diag': (1.0 + 2 * prng.rand(n_components, n_features)) ** 2,
'full': np.array([make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)
for x in range(n_components)]),
}
self.expanded_covars = {
'spherical': [np.eye(n_features) * cov
for cov in self.covars['spherical']],
'diag': [np.diag(cov) for cov in self.covars['diag']],
'tied': [self.covars['tied']] * n_components,
'full': self.covars['full'],
}
def test_bad_covariance_type(self):
with assert_raises(ValueError):
h = hmm.GaussianHMM(20, covariance_type='badcovariance_type')
h.means_ = self.means
h.covars_ = []
h.startprob_ = self.startprob
h.transmat_ = self.transmat
h._check()
def test_score_samples_and_decode(self):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.means_ = self.means
h.covars_ = self.covars[self.covariance_type]
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * h.means_
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = self.prng.randn(n_samples, self.n_features) + h.means_[gaussidx]
h._init(X, params="st")
ll, posteriors = h.score_samples(X)
self.assertEqual(posteriors.shape, (n_samples, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(n_samples))
viterbi_ll, stateseq = h.decode(X)
assert_array_equal(stateseq, gaussidx)
def test_sample(self, n=1000):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.transmat_ = self.transmat
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * self.means
h.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
X, state_sequence = h.sample(n, random_state=self.prng)
self.assertEqual(X.shape, (n, self.n_features))
self.assertEqual(len(state_sequence), n)
def test_fit(self, params='stmc', n_iter=5, **kwargs):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.transmat_ = normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.means_ = 20 * self.means
h.covars_ = self.covars[self.covariance_type]
lengths = [10] * 10
X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(X, lengths=lengths)
trainll = fit_hmm_and_monitor_log_likelihood(
h, X, lengths=lengths, n_iter=n_iter)
# Check that the log-likelihood is always increasing during training.
diff = np.diff(trainll)
message = ("Decreasing log-likelihood for {0} covariance: {1}"
.format(self.covariance_type, diff))
self.assertTrue(np.all(diff >= -1e-6), message)
def test_fit_works_on_sequences_of_different_length(self):
lengths = [3, 4, 5]
X = self.prng.rand(sum(lengths), self.n_features)
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(X, lengths=lengths)
def test_fit_with_length_one_signal(self):
lengths = [10, 8, 1]
X = self.prng.rand(sum(lengths), self.n_features)
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: zero-size array to reduction operation maximum which
# has no identity
h.fit(X, lengths=lengths)
def test_fit_with_priors(self, params='stmc', n_iter=5):
startprob_prior = 10 * self.startprob + 2.0
transmat_prior = 10 * self.transmat + 2.0
means_prior = self.means
means_weight = 2.0
covars_weight = 2.0
if self.covariance_type in ('full', 'tied'):
covars_weight += self.n_features
covars_prior = self.covars[self.covariance_type]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.startprob_prior = startprob_prior
h.transmat_ = normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.transmat_prior = transmat_prior
h.means_ = 20 * self.means
h.means_prior = means_prior
h.means_weight = means_weight
h.covars_ = self.covars[self.covariance_type]
h.covars_prior = covars_prior
h.covars_weight = covars_weight
lengths = [100] * 10
X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)
# Re-initialize the parameters and check that we can converge to the
# original parameter values.
h_learn = hmm.GaussianHMM(self.n_components, self.covariance_type,
params=params)
h_learn.n_iter = 0
h_learn.fit(X, lengths=lengths)
fit_hmm_and_monitor_log_likelihood(
h_learn, X, lengths=lengths, n_iter=n_iter)
# Make sure we've converged to the right parameters.
# a) means
self.assertTrue(np.allclose(sorted(h.means_.tolist()),
sorted(h_learn.means_.tolist()),
0.01))
# b) covars are hard to estimate precisely from a relatively small
# sample, thus the large threshold
self.assertTrue(np.allclose(sorted(h._covars_.tolist()),
sorted(h_learn._covars_.tolist()),
10))
def test_fit_non_ergodic_transmat(self):
h = hmm.GaussianHMM(n_components=5, covariance_type='full',
n_iter=100, init_params='st')
h.startprob_ = np.array([1, 0, 0, 0, 0])
h.transmat_ = np.array([[0.9, 0.1, 0, 0, 0],
[0, 0.9, 0.1, 0, 0],
[0, 0, 0.9, 0.1, 0],
[0, 0, 0, 0.9, 0.1],
[0, 0, 0, 0, 1.0]])
h.means_ = np.zeros((5, 10))
h.covars_ = np.tile(np.identity(10), (5, 1, 1))
lengths = [10] * 10
X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)
h.fit(X, lengths=lengths)
# TODO: write the actual test
class TestGaussianHMMWithSphericalCovars(GaussianHMMTestMixin, TestCase):
covariance_type = 'spherical'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
class TestGaussianHMMWithDiagonalCovars(GaussianHMMTestMixin, TestCase):
covariance_type = 'diag'
class TestGaussianHMMWithTiedCovars(GaussianHMMTestMixin, TestCase):
covariance_type = 'tied'
class TestGaussianHMMWithFullCovars(GaussianHMMTestMixin, TestCase):
covariance_type = 'full'
class MultinomialHMMTestCase(TestCase):
"""Using examples from Wikipedia
- http://en.wikipedia.org/wiki/Hidden_Markov_model
- http://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 2 # ('Rainy', 'Sunny')
self.n_features = 3 # ('walk', 'shop', 'clean')
self.emissionprob = np.array([[0.1, 0.4, 0.5], [0.6, 0.3, 0.1]])
self.startprob = np.array([0.6, 0.4])
self.transmat = np.array([[0.7, 0.3], [0.4, 0.6]])
self.h = hmm.MultinomialHMM(self.n_components)
self.h.startprob_ = self.startprob
self.h.transmat_ = self.transmat
self.h.emissionprob_ = self.emissionprob
def test_set_emissionprob(self):
h = hmm.MultinomialHMM(self.n_components)
emissionprob = np.array([[0.8, 0.2, 0.0], [0.7, 0.2, 1.0]])
h.emissionprob = emissionprob
assert np.allclose(emissionprob, h.emissionprob)
def test_wikipedia_viterbi_example(self):
# From http://en.wikipedia.org/wiki/Viterbi_algorithm:
# "This reveals that the observations ['walk', 'shop', 'clean']
# were most likely generated by states ['Sunny', 'Rainy',
# 'Rainy'], with probability 0.01344."
X = [[0], [1], [2]]
logprob, state_sequence = self.h.decode(X)
self.assertAlmostEqual(np.exp(logprob), 0.01344)
assert_array_equal(state_sequence, [1, 0, 0])
def test_decode_map_algorithm(self):
X = [[0], [1], [2]]
h = hmm.MultinomialHMM(self.n_components, algorithm="map")
h.startprob_ = self.startprob
h.transmat_ = self.transmat
h.emissionprob_ = self.emissionprob
_logprob, state_sequence = h.decode(X)
assert_array_equal(state_sequence, [1, 0, 0])
def test_predict(self):
X = [[0], [1], [2]]
state_sequence = self.h.predict(X)
posteriors = self.h.predict_proba(X)
assert_array_equal(state_sequence, [1, 0, 0])
assert_array_almost_equal(posteriors, [
[0.23170303, 0.76829697],
[0.62406281, 0.37593719],
[0.86397706, 0.13602294],
])
def test_attributes(self):
h = hmm.MultinomialHMM(self.n_components)
self.assertEqual(h.n_components, self.n_components)
h.startprob_ = self.startprob
h.transmat_ = self.transmat
h.emissionprob_ = self.emissionprob
assert_array_almost_equal(h.emissionprob_, self.emissionprob)
with assert_raises(ValueError):
h.emissionprob_ = []
h._check()
with assert_raises(ValueError):
h.emissionprob_ = np.zeros((self.n_components - 2,
self.n_features))
h._check()
def test_score_samples(self):
idx = np.repeat(np.arange(self.n_components), 10)
n_samples = len(idx)
X = np.atleast_2d(
(self.prng.rand(n_samples) * self.n_features).astype(int)).T
ll, posteriors = self.h.score_samples(X)
self.assertEqual(posteriors.shape, (n_samples, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(n_samples))
def test_sample(self, n=1000):
X, state_sequence = self.h.sample(n, random_state=self.prng)
self.assertEqual(X.ndim, 2)
self.assertEqual(len(X), n)
self.assertEqual(len(state_sequence), n)
self.assertEqual(len(np.unique(X)), self.n_features)
def test_fit(self, params='ste', n_iter=5, **kwargs):
h = self.h
h.params = params
lengths = [10] * 10
X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)
# Mess up the parameters and see if we can re-learn them.
h.startprob_ = normalize(self.prng.rand(self.n_components))
h.transmat_ = normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.emissionprob_ = normalize(
self.prng.rand(self.n_components, self.n_features), axis=1)
trainll = fit_hmm_and_monitor_log_likelihood(
h, X, lengths=lengths, n_iter=n_iter)
# Check that the log-likelihood is always increasing during training.
diff = np.diff(trainll)
self.assertTrue(np.all(diff >= -1e-6),
"Decreasing log-likelihood: {0}" .format(diff))
def test_fit_emissionprob(self):
self.test_fit('e')
def test_fit_with_init(self, params='ste', n_iter=5, verbose=False,
**kwargs):
h = self.h
learner = hmm.MultinomialHMM(self.n_components)
lengths = [10] * 10
X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)
# use init_function to initialize paramerters
learner._init(X, lengths=lengths, params=params)
trainll = fit_hmm_and_monitor_log_likelihood(learner, X, n_iter=n_iter)
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print()
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
self.assertTrue(np.all(np.diff(trainll) > -1.e-3))
def test__check_input_symbols(self):
self.assertTrue(self.h._check_input_symbols([[0, 0, 2, 1, 3, 1, 1]]))
self.assertFalse(self.h._check_input_symbols([[0, 0, 3, 5, 10]]))
self.assertFalse(self.h._check_input_symbols([[0]]))
self.assertFalse(self.h._check_input_symbols([[0., 2., 1., 3.]]))
self.assertFalse(self.h._check_input_symbols([[0, 0, -2, 1, 3, 1, 1]]))
def create_random_gmm(n_mix, n_features, covariance_type, prng=0):
prng = check_random_state(prng)
g = mixture.GMM(n_mix, covariance_type=covariance_type)
g.means_ = prng.randint(-20, 20, (n_mix, n_features))
mincv = 0.1
g.covars_ = {
'spherical': (mincv + mincv * np.dot(prng.rand(n_mix, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features)),
'diag': (mincv + mincv * prng.rand(n_mix, n_features)) ** 2,
'full': np.array(
[make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features) for x in range(n_mix)])
}[covariance_type]
g.weights_ = normalize(prng.rand(n_mix))
return g
class GMMHMMTestMixin(object):
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 3
self.n_mix = 2
self.n_features = 2
self.covariance_type = 'diag'
self.startprob = self.prng.rand(self.n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = self.prng.rand(self.n_components, self.n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, self.n_components))
self.gmms = []
for state in range(self.n_components):
self.gmms.append(create_random_gmm(
self.n_mix, self.n_features, self.covariance_type,
prng=self.prng))
def test_score_samples_and_decode(self):
h = hmm.GMMHMM(self.n_components)
h.startprob_ = self.startprob
h.transmat_ = self.transmat
h.gmms_ = self.gmms
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
for g in h.gmms_:
g.means_ *= 20
refstateseq = np.repeat(np.arange(self.n_components), 5)
n_samples = len(refstateseq)
X = [h.gmms_[x].sample(1, random_state=self.prng).flatten()
for x in refstateseq]
_ll, posteriors = h.score_samples(X)
self.assertEqual(posteriors.shape, (n_samples, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(n_samples))
_logprob, stateseq = h.decode(X)
assert_array_equal(stateseq, refstateseq)
def test_sample(self, n=1000):
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
h.startprob_ = self.startprob
h.transmat_ = self.transmat
h.gmms_ = self.gmms
X, state_sequence = h.sample(n, random_state=self.prng)
self.assertEqual(X.shape, (n, self.n_features))
self.assertEqual(len(state_sequence), n)
def test_fit(self, params='stmwc', n_iter=5, verbose=False, **kwargs):
h = hmm.GMMHMM(self.n_components, covars_prior=1.0)
h.startprob_ = self.startprob
h.transmat_ = normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.gmms_ = self.gmms
lengths = [10] * 10
X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(X, lengths=lengths)
h.transmat_ = normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.startprob_ = normalize(self.prng.rand(self.n_components))
trainll = fit_hmm_and_monitor_log_likelihood(
h, X, lengths=lengths, n_iter=n_iter)
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
# XXX: this test appears to check that training log likelihood should
# never be decreasing (up to a tolerance of 0.5, why?) but this is not
# the case when the seed changes.
raise SkipTest("Unstable test: trainll is not always increasing "
"depending on seed")
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_works_on_sequences_of_different_length(self):
lengths = [3, 4, 5]
X = self.prng.rand(sum(lengths), self.n_features)
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(X, lengths=lengths)
class TestGMMHMMWithDiagCovars(GMMHMMTestMixin, TestCase):
covariance_type = 'diag'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
def test_fit_means(self):
self.test_fit('m')
class TestGMMHMMWithTiedCovars(GMMHMMTestMixin, TestCase):
covariance_type = 'tied'
class TestGMMHMMWithFullCovars(GMMHMMTestMixin, TestCase):
covariance_type = 'full'
|
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import datetime
import time
import copy
import argparse
import json
import ast
import base64
from functools import wraps
from decimal import Decimal
import util
from util import print_msg, format_satoshis, print_stderr
import bitcoin
from bitcoin import is_address, hash_160_to_bc_address, hash_160, COIN, TYPE_ADDRESS
from transaction import Transaction
import paymentrequest
from paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
import contacts
known_commands = {}
class Command:
def __init__(self, func, s):
self.name = func.__name__
self.requires_network = 'n' in s
self.requires_wallet = 'w' in s
self.requires_password = 'p' in s
self.description = func.__doc__
self.help = self.description.split('.')[0] if self.description else None
varnames = func.func_code.co_varnames[1:func.func_code.co_argcount]
self.defaults = func.func_defaults
if self.defaults:
n = len(self.defaults)
self.params = list(varnames[:-n])
self.options = list(varnames[-n:])
else:
self.params = list(varnames)
self.options = []
self.defaults = []
def command(s):
def decorator(func):
global known_commands
name = func.__name__
known_commands[name] = Command(func, s)
@wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
return decorator
class Commands:
def __init__(self, config, wallet, network, callback = None, password=None, new_password=None):
self.config = config
self.wallet = wallet
self.network = network
self._callback = callback
self._password = password
self.new_password = new_password
self.contacts = contacts.Contacts(self.config)
def _run(self, method, args, password_getter):
cmd = known_commands[method]
if cmd.requires_password and self.wallet.use_encryption:
self._password = apply(password_getter,())
if self._password is None:
return
f = getattr(self, method)
result = f(*args)
self._password = None
if self._callback:
apply(self._callback, ())
return result
@command('')
def commands(self):
"""List of commands"""
return ' '.join(sorted(known_commands.keys()))
@command('')
def create(self):
"""Create a new wallet"""
raise BaseException('Not a JSON-RPC command')
@command('wn')
def restore(self, text):
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of Cryptoescudo addresses
or Cryptoescudo private keys. If you want to be prompted for your
seed, type '?' or ':' (concealed) """
raise BaseException('Not a JSON-RPC command')
@command('w')
def deseed(self):
"""Remove seed from wallet. This creates a seedless, watching-only
wallet."""
raise BaseException('Not a JSON-RPC command')
@command('wp')
def password(self):
"""Change wallet password. """
self.wallet.update_password(self._password, self.new_password)
self.wallet.storage.write()
return {'password':self.wallet.use_encryption}
@command('')
def getconfig(self, key):
"""Return a configuration variable. """
return self.config.get(key)
@command('')
def setconfig(self, key, value):
"""Set a configuration variable. 'value' may be a string or a Python expression."""
try:
value = ast.literal_eval(value)
except:
pass
self.config.set_key(key, value)
return True
@command('')
def make_seed(self, nbits=128, entropy=1, language=None):
"""Create a seed"""
from mnemonic import Mnemonic
s = Mnemonic(language).make_seed(nbits, custom_entropy=entropy)
return s.encode('utf8')
@command('')
def check_seed(self, seed, entropy=1, language=None):
"""Check that a seed was generated with given entropy"""
from mnemonic import Mnemonic
return Mnemonic(language).check_seed(seed, entropy)
@command('n')
def getaddresshistory(self, address):
"""Return the transaction history of any address. Note: This is a
walletless server query, results are not checked by SPV.
"""
return self.network.synchronous_get(('blockchain.address.get_history', [address]))
@command('w')
def listunspent(self):
"""List unspent outputs. Returns the list of unspent transaction
outputs in your wallet."""
l = copy.deepcopy(self.wallet.get_spendable_coins(exclude_frozen = False))
for i in l:
v = i["value"]
i["value"] = float(v)/COIN if v is not None else None
return l
@command('n')
def getaddressunspent(self, address):
"""Returns the UTXO list of any address. Note: This
is a walletless server query, results are not checked by SPV.
"""
return self.network.synchronous_get(('blockchain.address.listunspent', [address]))
@command('n')
def getutxoaddress(self, txid, pos):
"""Get the address of a UTXO. Note: This is a walletless server query, results are
not checked by SPV.
"""
r = self.network.synchronous_get(('blockchain.utxo.get_address', [txid, pos]))
return {'address': r}
@command('')
def serialize(self, jsontx):
"""Create a transaction from json inputs. Inputs must have a redeemPubkey. Outputs must be a list of (address, value).
"""
keypairs = {}
inputs = jsontx.get('inputs')
outputs = jsontx.get('outputs')
locktime = jsontx.get('locktime', 0)
for txin in inputs:
if txin.get('output'):
prevout_hash, prevout_n = txin['output'].split(':')
txin['prevout_n'] = int(prevout_n)
txin['prevout_hash'] = prevout_hash
else:
raise BaseException('Output point missing', txin)
if txin.get('redeemPubkey'):
pubkey = txin['redeemPubkey']
txin['pubkeys'] = [pubkey]
txin['x_pubkeys'] = [pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
privkey = txin.get('privkey')
if privkey:
keypairs[pubkey] = privkey
elif txin.get('redeemScript'):
raise BaseException('Not implemented')
else:
raise BaseException('No redeem script')
outputs = map(lambda x: (TYPE_ADDRESS, x[0], int(COIN*Decimal(x[1]))), outputs)
tx = Transaction.from_io(inputs, outputs, locktime=locktime)
tx.sign(keypairs)
return tx.as_dict()
@command('wp')
def signtransaction(self, tx, privkey=None):
"""Sign a transaction. The wallet keys will be used unless a private key is provided."""
tx = Transaction(tx)
if privkey:
pubkey = bitcoin.public_key_from_private_key(privkey)
h160 = bitcoin.hash_160(pubkey.decode('hex'))
x_pubkey = 'fd' + (chr(0) + h160).encode('hex')
tx.sign({x_pubkey:privkey})
else:
self.wallet.sign_transaction(tx, self._password)
return tx.as_dict()
@command('')
def deserialize(self, tx):
"""Deserialize a serialized transaction"""
tx = Transaction(tx)
return tx.deserialize()
@command('n')
def broadcast(self, tx, timeout=30):
"""Broadcast a transaction to the network. """
tx = Transaction(tx)
return self.network.broadcast(tx, timeout)
@command('')
def createmultisig(self, num, pubkeys):
"""Create multisig address"""
assert isinstance(pubkeys, list), (type(num), type(pubkeys))
redeem_script = Transaction.multisig_script(pubkeys, num)
address = hash_160_to_bc_address(hash_160(redeem_script.decode('hex')), 5)
return {'address':address, 'redeemScript':redeem_script}
@command('w')
def freeze(self, address):
"""Freeze address. Freeze the funds at one of your wallet\'s addresses"""
return self.wallet.set_frozen_state([address], True)
@command('w')
def unfreeze(self, address):
"""Unfreeze address. Unfreeze the funds at one of your wallet\'s address"""
return self.wallet.set_frozen_state([address], False)
@command('wp')
def getprivatekeys(self, address):
"""Get private keys of addresses. You may pass a single wallet address, or a list of wallet addresses."""
if is_address(address):
return self.wallet.get_private_key(address, self._password)
domain = json_loads(address)
return [self.wallet.get_private_key(address, self._password) for address in domain]
@command('w')
def ismine(self, address):
"""Check if address is in wallet. Return true if and only address is in wallet"""
return self.wallet.is_mine(address)
@command('')
def dumpprivkeys(self):
"""Deprecated."""
return "This command is deprecated. Use a pipe instead: 'electrum-cesc listaddresses | electrum-cesc getprivatekeys - '"
@command('')
def validateaddress(self, address):
"""Check that an address is valid. """
return is_address(address)
@command('w')
def getpubkeys(self, address):
"""Return the public keys for a wallet address. """
return self.wallet.get_public_keys(address)
@command('w')
def getbalance(self, account=None):
"""Return the balance of your wallet. """
if account is None:
c, u, x = self.wallet.get_balance()
else:
c, u, x = self.wallet.get_account_balance(account)
out = {"confirmed": str(Decimal(c)/COIN)}
if u:
out["unconfirmed"] = str(Decimal(u)/COIN)
if x:
out["unmatured"] = str(Decimal(x)/COIN)
return out
@command('n')
def getaddressbalance(self, address):
"""Return the balance of any address. Note: This is a walletless
server query, results are not checked by SPV.
"""
out = self.network.synchronous_get(('blockchain.address.get_balance', [address]))
out["confirmed"] = str(Decimal(out["confirmed"])/COIN)
out["unconfirmed"] = str(Decimal(out["unconfirmed"])/COIN)
return out
@command('n')
def getproof(self, address):
"""Get Merkle branch of an address in the UTXO set"""
p = self.network.synchronous_get(('blockchain.address.get_proof', [address]))
out = []
for i,s in p:
out.append(i)
return out
@command('n')
def getmerkle(self, txid, height):
"""Get Merkle branch of a transaction included in a block. Electrum
uses this to verify transactions (Simple Payment Verification)."""
return self.network.synchronous_get(('blockchain.transaction.get_merkle', [txid, int(height)]))
@command('n')
def getservers(self):
"""Return the list of available servers"""
return self.network.get_servers()
@command('')
def version(self):
"""Return the version of electrum."""
import electrum_cesc as electrum # Needs to stay here to prevent ciruclar imports
return electrum.ELECTRUM_VERSION
@command('w')
def getmpk(self):
"""Get master public key. Return your wallet\'s master public key"""
return self.wallet.get_master_public_key()
@command('wp')
def getmasterprivate(self):
"""Get master private key. Return your wallet\'s master private key"""
return str(self.wallet.get_master_private_key(self.wallet.root_name, self._password))
@command('wp')
def getseed(self):
"""Get seed phrase. Print the generation seed of your wallet."""
s = self.wallet.get_mnemonic(self._password)
return s.encode('utf8')
@command('wp')
def importprivkey(self, privkey):
"""Import a private key. """
try:
addr = self.wallet.import_key(privkey, self._password)
out = "Keypair imported: " + addr
except Exception as e:
out = "Error: " + str(e)
return out
def _resolver(self, x):
if x is None:
return None
out = self.contacts.resolve(x)
if out.get('type') == 'openalias' and self.nocheck is False and out.get('validated') is False:
raise BaseException('cannot verify alias', x)
return out['address']
@command('n')
def sweep(self, privkey, destination, tx_fee=None, nocheck=False):
"""Sweep private keys. Returns a transaction that spends UTXOs from
privkey to a destination address. The transaction is not
broadcasted."""
privkeys = privkey if type(privkey) is list else [privkey]
self.nocheck = nocheck
dest = self._resolver(destination)
if tx_fee is None:
tx_fee = 0.001
fee = int(Decimal(tx_fee)*COIN)
return Transaction.sweep(privkeys, self.network, dest, fee)
@command('wp')
def signmessage(self, address, message):
"""Sign a message with a key. Use quotes if your message contains
whitespaces"""
sig = self.wallet.sign_message(address, message, self._password)
return base64.b64encode(sig)
@command('')
def verifymessage(self, address, signature, message):
"""Verify a signature."""
sig = base64.b64decode(signature)
return bitcoin.verify_message(address, sig, message)
def _mktx(self, outputs, fee, change_addr, domain, nocheck, unsigned, rbf):
self.nocheck = nocheck
change_addr = self._resolver(change_addr)
domain = None if domain is None else map(self._resolver, domain)
fee = None if fee is None else int(COIN*Decimal(fee))
final_outputs = []
for address, amount in outputs:
address = self._resolver(address)
if amount == '!':
assert len(outputs) == 1
inputs = self.wallet.get_spendable_coins(domain)
amount, fee = self.wallet.get_max_amount(self.config, inputs, (TYPE_ADDRESS, address), fee)
else:
amount = int(COIN*Decimal(amount))
final_outputs.append((TYPE_ADDRESS, address, amount))
coins = self.wallet.get_spendable_coins(domain)
tx = self.wallet.make_unsigned_transaction(coins, final_outputs, self.config, fee, change_addr)
if rbf:
tx.set_sequence(0)
if not unsigned:
self.wallet.sign_transaction(tx, self._password)
return tx
@command('wp')
def payto(self, destination, amount, tx_fee=None, from_addr=None, change_addr=None, nocheck=False, unsigned=False, rbf=False):
"""Create a transaction. """
domain = [from_addr] if from_addr else None
tx = self._mktx([(destination, amount)], tx_fee, change_addr, domain, nocheck, unsigned, rbf)
return tx.as_dict()
@command('wp')
def paytomany(self, outputs, tx_fee=None, from_addr=None, change_addr=None, nocheck=False, unsigned=False, rbf=False):
"""Create a multi-output transaction. """
domain = [from_addr] if from_addr else None
tx = self._mktx(outputs, tx_fee, change_addr, domain, nocheck, unsigned, rbf)
return tx.as_dict()
@command('w')
def history(self):
"""Wallet history. Returns the transaction history of your wallet."""
balance = 0
out = []
for item in self.wallet.get_history():
tx_hash, height, conf, timestamp, value, balance = item
if timestamp:
date = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
else:
date = "----"
label = self.wallet.get_label(tx_hash)
out.append({
'txid': tx_hash,
'timestamp': timestamp,
'date': date,
'label': label,
'value': float(value)/COIN if value is not None else None,
'height': height,
'confirmations': conf
})
return out
@command('w')
def setlabel(self, key, label):
"""Assign a label to an item. Item may be a Cryptoescudo address or a
transaction ID"""
self.wallet.set_label(key, label)
@command('')
def listcontacts(self):
"""Show your list of contacts"""
return self.contacts
@command('')
def getalias(self, key):
"""Retrieve alias. Lookup in your list of contacts, and for an OpenAlias DNS record."""
return self.contacts.resolve(key)
@command('')
def searchcontacts(self, query):
"""Search through contacts, return matching entries. """
results = {}
for key, value in self.contacts.items():
if query.lower() in key.lower():
results[key] = value
return results
@command('w')
def listaddresses(self, receiving=False, change=False, show_labels=False, frozen=False, unused=False, funded=False, show_balance=False):
"""List wallet addresses. Returns the list of all addresses in your wallet. Use optional arguments to filter the results."""
out = []
for addr in self.wallet.addresses(True):
if frozen and not self.wallet.is_frozen(addr):
continue
if receiving and self.wallet.is_change(addr):
continue
if change and not self.wallet.is_change(addr):
continue
if unused and self.wallet.is_used(addr):
continue
if funded and self.wallet.is_empty(addr):
continue
item = addr
if show_balance:
item += ", "+ format_satoshis(sum(self.wallet.get_addr_balance(addr)))
if show_labels:
item += ', ' + repr(self.wallet.labels.get(addr, ''))
out.append(item)
return out
@command('w')
def gettransaction(self, txid):
"""Retrieve a transaction. """
tx = self.wallet.transactions.get(txid) if self.wallet else None
if tx is None and self.network:
raw = self.network.synchronous_get(('blockchain.transaction.get', [txid]))
if raw:
tx = Transaction(raw)
else:
raise BaseException("Unknown transaction")
return tx.as_dict()
@command('')
def encrypt(self, pubkey, message):
"""Encrypt a message with a public key. Use quotes if the message contains whitespaces."""
return bitcoin.encrypt_message(message, pubkey)
@command('wp')
def decrypt(self, pubkey, encrypted):
"""Decrypt a message encrypted with a public key."""
return self.wallet.decrypt_message(pubkey, encrypted, self._password)
def _format_request(self, out):
pr_str = {
PR_UNKNOWN: 'Unknown',
PR_UNPAID: 'Pending',
PR_PAID: 'Paid',
PR_EXPIRED: 'Expired',
}
out['amount (CESC)'] = format_satoshis(out.get('amount'))
out['status'] = pr_str[out.get('status', PR_UNKNOWN)]
return out
@command('w')
def getrequest(self, key):
"""Return a payment request"""
r = self.wallet.get_payment_request(key, self.config)
if not r:
raise BaseException("Request not found")
return self._format_request(r)
#@command('w')
#def ackrequest(self, serialized):
# """<Not implemented>"""
# pass
@command('w')
def listrequests(self, pending=False, expired=False, paid=False):
"""List the payment requests you made."""
out = self.wallet.get_sorted_requests(self.config)
if pending:
f = PR_UNPAID
elif expired:
f = PR_EXPIRED
elif paid:
f = PR_PAID
else:
f = None
if f is not None:
out = filter(lambda x: x.get('status')==f, out)
return map(self._format_request, out)
@command('w')
def addrequest(self, amount, memo='', expiration=None, force=False):
"""Create a payment request."""
addr = self.wallet.get_unused_address(None)
if addr is None:
if force:
addr = self.wallet.create_new_address(None, False)
else:
return False
amount = int(COIN*Decimal(amount))
expiration = int(expiration) if expiration else None
req = self.wallet.make_payment_request(addr, amount, memo, expiration)
self.wallet.add_payment_request(req, self.config)
out = self.wallet.get_payment_request(addr, self.config)
return self._format_request(out)
@command('wp')
def signrequest(self, address):
"Sign payment request with an OpenAlias"
alias = self.config.get('alias')
if not alias:
raise BaseException('No alias in your configuration')
alias_addr = self.contacts.resolve(alias)['address']
self.wallet.sign_payment_request(address, alias, alias_addr, self._password)
@command('w')
def rmrequest(self, address):
"""Remove a payment request"""
return self.wallet.remove_payment_request(address, self.config)
@command('w')
def clearrequests(self):
"""Remove all payment requests"""
for k in self.wallet.receive_requests.keys():
self.wallet.remove_payment_request(k, self.config)
@command('n')
def notify(self, address, URL):
"""Watch an address. Everytime the address changes, a http POST is sent to the URL."""
def callback(x):
import urllib2
headers = {'content-type':'application/json'}
data = {'address':address, 'status':x.get('result')}
try:
req = urllib2.Request(URL, json.dumps(data), headers)
response_stream = urllib2.urlopen(req)
util.print_error('Got Response for %s' % address)
except BaseException as e:
util.print_error(str(e))
self.network.send([('blockchain.address.subscribe', [address])], callback)
return True
@command('wn')
def is_synchronized(self):
""" return wallet synchronization status """
return self.wallet.is_up_to_date()
@command('')
def help(self):
# for the python console
return sorted(known_commands.keys())
param_descriptions = {
'privkey': 'Private key. Type \'?\' to get a prompt.',
'destination': 'Cryptoescudo address, contact or alias',
'address': 'Cryptoescudo address',
'seed': 'Seed phrase',
'txid': 'Transaction ID',
'pos': 'Position',
'height': 'Block height',
'tx': 'Serialized transaction (hexadecimal)',
'key': 'Variable name',
'pubkey': 'Public key',
'message': 'Clear text message. Use quotes if it contains spaces.',
'encrypted': 'Encrypted message',
'amount': 'Amount to be sent (in CESC). Type \'!\' to send the maximum available.',
'requested_amount': 'Requested amount (in CESC).',
'outputs': 'list of ["address", amount]',
}
command_options = {
'password': ("-W", "--password", "Password"),
'receiving': (None, "--receiving", "Show only receiving addresses"),
'change': (None, "--change", "Show only change addresses"),
'frozen': (None, "--frozen", "Show only frozen addresses"),
'unused': (None, "--unused", "Show only unused addresses"),
'funded': (None, "--funded", "Show only funded addresses"),
'show_balance':("-b", "--balance", "Show the balances of listed addresses"),
'show_labels': ("-l", "--labels", "Show the labels of listed addresses"),
'nocheck': (None, "--nocheck", "Do not verify aliases"),
'tx_fee': ("-f", "--fee", "Transaction fee (in CESC)"),
'from_addr': ("-F", "--from", "Source address. If it isn't in the wallet, it will ask for the private key unless supplied in the format public_key:private_key. It's not saved in the wallet."),
'change_addr': ("-c", "--change", "Change address. Default is a spare address, or the source address if it's not in the wallet"),
'nbits': (None, "--nbits", "Number of bits of entropy"),
'entropy': (None, "--entropy", "Custom entropy"),
'language': ("-L", "--lang", "Default language for wordlist"),
'gap_limit': ("-G", "--gap", "Gap limit"),
'privkey': (None, "--privkey", "Private key. Set to '?' to get a prompt."),
'unsigned': ("-u", "--unsigned", "Do not sign transaction"),
'rbf': (None, "--rbf", "Replace-by-fee transaction"),
'domain': ("-D", "--domain", "List of addresses"),
'account': (None, "--account", "Account"),
'memo': ("-m", "--memo", "Description of the request"),
'expiration': (None, "--expiration", "Time in seconds"),
'timeout': (None, "--timeout", "Timeout in seconds"),
'force': (None, "--force", "Create new address beyong gap limit, if no more address is available."),
'pending': (None, "--pending", "Show only pending requests."),
'expired': (None, "--expired", "Show only expired requests."),
'paid': (None, "--paid", "Show only paid requests."),
}
# don't use floats because of rounding errors
from transaction import tx_from_str
json_loads = lambda x: json.loads(x, parse_float=lambda x: str(Decimal(x)))
arg_types = {
'num': int,
'nbits': int,
'entropy': long,
'tx': tx_from_str,
'pubkeys': json_loads,
'jsontx': json_loads,
'inputs': json_loads,
'outputs': json_loads,
'tx_fee': lambda x: str(Decimal(x)) if x is not None else None,
'amount': lambda x: str(Decimal(x)) if x!='!' else '!',
}
config_variables = {
'addrequest': {
'requests_dir': 'directory where a bip70 file will be written.',
'ssl_privkey': 'Path to your SSL private key, needed to sign the request.',
'ssl_chain': 'Chain of SSL certificates, needed for signed requests. Put your certificate at the top and the root CA at the end',
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of Cryptoescudo: URIs. Example: \"(\'file:///var/www/\',\'https://electrum-cesc.org/\')\"',
},
'listrequests':{
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of Cryptoescudo: URIs. Example: \"(\'file:///var/www/\',\'https://electrum-cesc.org/\')\"',
}
}
def set_default_subparser(self, name, args=None):
"""see http://stackoverflow.com/questions/5176691/argparse-how-to-specify-a-default-subcommand"""
subparser_found = False
for arg in sys.argv[1:]:
if arg in ['-h', '--help']: # global help if no subparser
break
else:
for x in self._subparsers._actions:
if not isinstance(x, argparse._SubParsersAction):
continue
for sp_name in x._name_parser_map.keys():
if sp_name in sys.argv[1:]:
subparser_found = True
if not subparser_found:
# insert default in first position, this implies no
# global options without a sub_parsers specified
if args is None:
sys.argv.insert(1, name)
else:
args.insert(0, name)
argparse.ArgumentParser.set_default_subparser = set_default_subparser
def add_network_options(parser):
parser.add_argument("-1", "--oneserver", action="store_true", dest="oneserver", default=False, help="connect to one server only")
parser.add_argument("-s", "--server", dest="server", default=None, help="set server host:port:protocol, where protocol is either t (tcp) or s (ssl)")
parser.add_argument("-p", "--proxy", dest="proxy", default=None, help="set proxy [type:]host[:port], where type is socks4,socks5 or http")
from util import profiler
@profiler
def get_parser():
# parent parser, because set_default_subparser removes global options
parent_parser = argparse.ArgumentParser('parent', add_help=False)
group = parent_parser.add_argument_group('global options')
group.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Show debugging information")
group.add_argument("-D", "--dir", dest="electrum_path", help="electrum directory")
group.add_argument("-P", "--portable", action="store_true", dest="portable", default=False, help="Use local 'electrum-cesc_data' directory")
group.add_argument("-w", "--wallet", dest="wallet_path", help="wallet path")
# create main parser
parser = argparse.ArgumentParser(
parents=[parent_parser],
epilog="Run 'electrum-cesc help <command>' to see the help for a command")
subparsers = parser.add_subparsers(dest='cmd', metavar='<command>')
# gui
parser_gui = subparsers.add_parser('gui', parents=[parent_parser], description="Run Electrum's Graphical User Interface.", help="Run GUI (default)")
parser_gui.add_argument("url", nargs='?', default=None, help="Cryptoescudo URI (or bip70 file)")
#parser_gui.set_defaults(func=run_gui)
parser_gui.add_argument("-g", "--gui", dest="gui", help="select graphical user interface", choices=['qt', 'kivy', 'text', 'stdio'])
parser_gui.add_argument("-o", "--offline", action="store_true", dest="offline", default=False, help="Run offline")
parser_gui.add_argument("-m", action="store_true", dest="hide_gui", default=False, help="hide GUI on startup")
parser_gui.add_argument("-L", "--lang", dest="language", default=None, help="default language used in GUI")
add_network_options(parser_gui)
# daemon
parser_daemon = subparsers.add_parser('daemon', parents=[parent_parser], help="Run Daemon")
parser_daemon.add_argument("subcommand", choices=['start', 'status', 'stop'])
#parser_daemon.set_defaults(func=run_daemon)
add_network_options(parser_daemon)
# commands
for cmdname in sorted(known_commands.keys()):
cmd = known_commands[cmdname]
p = subparsers.add_parser(cmdname, parents=[parent_parser], help=cmd.help, description=cmd.description)
if cmdname == 'restore':
p.add_argument("-o", "--offline", action="store_true", dest="offline", default=False, help="Run offline")
#p.set_defaults(func=run_cmdline)
if cmd.requires_password:
p.add_argument("-W", "--password", dest="password", default=None, help="password")
for optname, default in zip(cmd.options, cmd.defaults):
a, b, help = command_options[optname]
action = "store_true" if type(default) is bool else 'store'
args = (a, b) if a else (b,)
if action == 'store':
_type = arg_types.get(optname, str)
p.add_argument(*args, dest=optname, action=action, default=default, help=help, type=_type)
else:
p.add_argument(*args, dest=optname, action=action, default=default, help=help)
for param in cmd.params:
h = param_descriptions.get(param, '')
_type = arg_types.get(param, str)
p.add_argument(param, help=h, type=_type)
cvh = config_variables.get(cmdname)
if cvh:
group = p.add_argument_group('configuration variables', '(set with setconfig/getconfig)')
for k, v in cvh.items():
group.add_argument(k, nargs='?', help=v)
# 'gui' is the default command
parser.set_default_subparser('gui')
return parser
|
|
from __future__ import unicode_literals
import pytz
from datetime import datetime
from django.utils import timezone
from django.core.exceptions import PermissionDenied, ValidationError
from django.views.generic import ListView, DeleteView, View
from django.shortcuts import redirect
from django.views.defaults import page_not_found
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.http import HttpResponse
from django.db.models import Q
from website import search
from osf.models import NodeLog
from osf.models.user import OSFUser
from osf.models.node import Node
from osf.models.registrations import Registration
from osf.models import SpamStatus
from admin.base.utils import change_embargo_date, validate_embargo_date
from admin.base.views import GuidFormView, GuidView
from osf.models.admin_log_entry import (
update_admin_log,
NODE_REMOVED,
NODE_RESTORED,
CONTRIBUTOR_REMOVED,
CONFIRM_SPAM,
CONFIRM_HAM,
REINDEX_SHARE,
REINDEX_ELASTIC,
)
from admin.nodes.templatetags.node_extras import reverse_node
from admin.nodes.serializers import serialize_node, serialize_simple_user_and_node_permissions, serialize_log
from website.project.tasks import update_node_share
from website.project.views.register import osf_admin_change_status_identifier
class NodeFormView(PermissionRequiredMixin, GuidFormView):
""" Allow authorized admin user to input specific node guid.
Basic form. No admin models.
"""
template_name = 'nodes/search.html'
object_type = 'node'
permission_required = 'osf.view_node'
raise_exception = True
@property
def success_url(self):
return reverse_node(self.guid)
class NodeRemoveContributorView(PermissionRequiredMixin, DeleteView):
""" Allow authorized admin user to remove project contributor
Interface with OSF database. No admin models.
"""
template_name = 'nodes/remove_contributor.html'
context_object_name = 'node'
permission_required = ('osf.view_node', 'osf.change_node')
raise_exception = True
def delete(self, request, *args, **kwargs):
try:
node, user = self.get_object()
if node.remove_contributor(user, None, log=False):
update_admin_log(
user_id=self.request.user.id,
object_id=node.pk,
object_repr='Contributor',
message='User {} removed from node {}.'.format(
user.pk, node.pk
),
action_flag=CONTRIBUTOR_REMOVED
)
# Log invisibly on the OSF.
osf_log = NodeLog(
action=NodeLog.CONTRIB_REMOVED,
user=None,
params={
'project': node.parent_id,
'node': node.pk,
'contributors': user.pk
},
date=timezone.now(),
should_hide=True,
)
osf_log.save()
except AttributeError:
return page_not_found(
request,
AttributeError(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
kwargs.get('node_id')
)
)
)
return redirect(reverse_node(self.kwargs.get('node_id')))
def get_context_data(self, **kwargs):
context = {}
node, user = kwargs.get('object')
context.setdefault('node_id', node._id)
context.setdefault('user', serialize_simple_user_and_node_permissions(node, user))
return super(NodeRemoveContributorView, self).get_context_data(**context)
def get_object(self, queryset=None):
return (Node.load(self.kwargs.get('node_id')),
OSFUser.load(self.kwargs.get('user_id')))
class NodeDeleteBase(DeleteView):
template_name = None
context_object_name = 'node'
object = None
def get_context_data(self, **kwargs):
context = {}
context.setdefault('guid', kwargs.get('object')._id)
return super(NodeDeleteBase, self).get_context_data(**context)
def get_object(self, queryset=None):
return Node.load(self.kwargs.get('guid')) or Registration.load(self.kwargs.get('guid'))
class NodeDeleteView(PermissionRequiredMixin, NodeDeleteBase):
""" Allow authorized admin user to remove/hide nodes
Interface with OSF database. No admin models.
"""
template_name = 'nodes/remove_node.html'
object = None
permission_required = ('osf.view_node', 'osf.delete_node')
raise_exception = True
def delete(self, request, *args, **kwargs):
try:
node = self.get_object()
flag = None
osf_flag = None
message = None
if node.is_deleted:
node.is_deleted = False
node.deleted_date = None
flag = NODE_RESTORED
message = 'Node {} restored.'.format(node.pk)
osf_flag = NodeLog.NODE_CREATED
elif not node.is_registration:
node.is_deleted = True
node.deleted_date = timezone.now()
flag = NODE_REMOVED
message = 'Node {} removed.'.format(node.pk)
osf_flag = NodeLog.NODE_REMOVED
node.save()
if flag is not None:
update_admin_log(
user_id=self.request.user.id,
object_id=node.pk,
object_repr='Node',
message=message,
action_flag=flag
)
if osf_flag is not None:
# Log invisibly on the OSF.
osf_log = NodeLog(
action=osf_flag,
user=None,
params={
'project': node.parent_id,
},
date=timezone.now(),
should_hide=True,
)
osf_log.save()
except AttributeError:
return page_not_found(
request,
AttributeError(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
kwargs.get('guid')
)
)
)
return redirect(reverse_node(self.kwargs.get('guid')))
class NodeView(PermissionRequiredMixin, GuidView):
""" Allow authorized admin user to view nodes
View of OSF database. No admin models.
"""
template_name = 'nodes/node.html'
context_object_name = 'node'
permission_required = 'osf.view_node'
raise_exception = True
def get_context_data(self, **kwargs):
kwargs = super(NodeView, self).get_context_data(**kwargs)
kwargs.update({'SPAM_STATUS': SpamStatus}) # Pass spam status in to check against
return kwargs
def get_object(self, queryset=None):
guid = self.kwargs.get('guid')
node = Node.load(guid) or Registration.load(guid)
return serialize_node(node)
class AdminNodeLogView(PermissionRequiredMixin, ListView):
""" Allow admins to see logs"""
template_name = 'nodes/node_logs.html'
context_object_name = 'node'
paginate_by = 10
paginate_orphans = 1
ordering = 'date'
permission_required = 'osf.view_node'
raise_exception = True
def get_object(self, queryset=None):
return Node.load(self.kwargs.get('guid')) or Registration.load(self.kwargs.get('guid'))
def get_queryset(self):
node = self.get_object()
query = Q(node_id__in=list(Node.objects.get_children(node).values_list('id', flat=True)) + [node.id])
return NodeLog.objects.filter(query).order_by('-date').include(
'node__guids', 'user__guids', 'original_node__guids', limit_includes=10
)
def get_context_data(self, **kwargs):
query_set = self.get_queryset()
page_size = self.get_paginate_by(query_set)
paginator, page, query_set, is_paginated = self.paginate_queryset(
query_set, page_size)
return {
'logs': map(serialize_log, query_set),
'page': page,
}
class RegistrationListView(PermissionRequiredMixin, ListView):
""" Allow authorized admin user to view list of registrations
View of OSF database. No admin models.
"""
template_name = 'nodes/registration_list.html'
paginate_by = 10
paginate_orphans = 1
ordering = 'created'
context_object_name = '-node'
permission_required = 'osf.view_registration'
raise_exception = True
def get_queryset(self):
return Registration.objects.all().order_by(self.ordering)
def get_context_data(self, **kwargs):
query_set = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(query_set)
paginator, page, query_set, is_paginated = self.paginate_queryset(
query_set, page_size)
return {
'nodes': map(serialize_node, query_set),
'page': page,
}
class RegistrationUpdateEmbargoView(PermissionRequiredMixin, View):
""" Allow authorized admin user to update the embargo of a registration
"""
permission_required = ('osf.change_node')
raise_exception = True
def post(self, request, *args, **kwargs):
validation_only = (request.POST.get('validation_only', False) == 'True')
end_date = request.POST.get('date')
user = request.user
registration = self.get_object()
try:
end_date = pytz.utc.localize(datetime.strptime(end_date, '%m/%d/%Y'))
except ValueError:
return HttpResponse('Please enter a valid date.', status=400)
try:
if validation_only:
validate_embargo_date(registration, user, end_date)
else:
change_embargo_date(registration, user, end_date)
except ValidationError as e:
return HttpResponse(e, status=409)
except PermissionDenied as e:
return HttpResponse(e, status=403)
return redirect(reverse_node(self.kwargs.get('guid')))
def get_object(self, queryset=None):
return Registration.load(self.kwargs.get('guid'))
class NodeSpamList(PermissionRequiredMixin, ListView):
SPAM_STATE = SpamStatus.UNKNOWN
paginate_by = 25
paginate_orphans = 1
ordering = 'created'
context_object_name = '-node'
permission_required = 'osf.view_spam'
raise_exception = True
def get_queryset(self):
return Node.objects.filter(spam_status=self.SPAM_STATE).order_by(self.ordering)
def get_context_data(self, **kwargs):
query_set = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(query_set)
paginator, page, query_set, is_paginated = self.paginate_queryset(
query_set, page_size)
return {
'nodes': map(serialize_node, query_set),
'page': page,
}
class NodeFlaggedSpamList(NodeSpamList, DeleteView):
SPAM_STATE = SpamStatus.FLAGGED
template_name = 'nodes/flagged_spam_list.html'
def delete(self, request, *args, **kwargs):
if not request.user.has_perm('auth.mark_spam'):
raise PermissionDenied('You do not have permission to update a node flagged as spam.')
node_ids = [
nid for nid in request.POST.keys()
if nid != 'csrfmiddlewaretoken'
]
for nid in node_ids:
node = Node.load(nid)
osf_admin_change_status_identifier(node, 'unavailable | spam')
node.confirm_spam(save=True)
update_admin_log(
user_id=self.request.user.id,
object_id=nid,
object_repr='Node',
message='Confirmed SPAM: {}'.format(nid),
action_flag=CONFIRM_SPAM
)
return redirect('nodes:flagged-spam')
class NodeKnownSpamList(NodeSpamList):
SPAM_STATE = SpamStatus.SPAM
template_name = 'nodes/known_spam_list.html'
class NodeKnownHamList(NodeSpamList):
SPAM_STATE = SpamStatus.HAM
template_name = 'nodes/known_spam_list.html'
class NodeConfirmSpamView(PermissionRequiredMixin, NodeDeleteBase):
template_name = 'nodes/confirm_spam.html'
permission_required = 'osf.mark_spam'
raise_exception = True
def delete(self, request, *args, **kwargs):
node = self.get_object()
osf_admin_change_status_identifier(node, 'unavailable | spam')
node.confirm_spam(save=True)
update_admin_log(
user_id=self.request.user.id,
object_id=node._id,
object_repr='Node',
message='Confirmed SPAM: {}'.format(node._id),
action_flag=CONFIRM_SPAM
)
return redirect(reverse_node(self.kwargs.get('guid')))
class NodeConfirmHamView(PermissionRequiredMixin, NodeDeleteBase):
template_name = 'nodes/confirm_ham.html'
permission_required = 'osf.mark_spam'
raise_exception = True
def delete(self, request, *args, **kwargs):
node = self.get_object()
node.confirm_ham(save=True)
osf_admin_change_status_identifier(node, 'public')
update_admin_log(
user_id=self.request.user.id,
object_id=node._id,
object_repr='Node',
message='Confirmed HAM: {}'.format(node._id),
action_flag=CONFIRM_HAM
)
return redirect(reverse_node(self.kwargs.get('guid')))
class NodeReindexShare(PermissionRequiredMixin, NodeDeleteBase):
template_name = 'nodes/reindex_node_share.html'
permission_required = 'osf.mark_spam'
raise_exception = True
def get_object(self, queryset=None):
return Node.load(self.kwargs.get('guid')) or Registration.load(self.kwargs.get('guid'))
def delete(self, request, *args, **kwargs):
node = self.get_object()
update_node_share(node)
update_admin_log(
user_id=self.request.user.id,
object_id=node._id,
object_repr='Node',
message='Node Reindexed (SHARE): {}'.format(node._id),
action_flag=REINDEX_SHARE
)
return redirect(reverse_node(self.kwargs.get('guid')))
class NodeReindexElastic(PermissionRequiredMixin, NodeDeleteBase):
template_name = 'nodes/reindex_node_elastic.html'
permission_required = 'osf.mark_spam'
raise_exception = True
def get_object(self, queryset=None):
return Node.load(self.kwargs.get('guid')) or Registration.load(self.kwargs.get('guid'))
def delete(self, request, *args, **kwargs):
node = self.get_object()
search.search.update_node(node, bulk=False, async=False)
update_admin_log(
user_id=self.request.user.id,
object_id=node._id,
object_repr='Node',
message='Node Reindexed (Elastic): {}'.format(node._id),
action_flag=REINDEX_ELASTIC
)
return redirect(reverse_node(self.kwargs.get('guid')))
|
|
##
# Copyright (c) 2007-2013 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from caldavclientlibrary.browser.command import Command
from caldavclientlibrary.browser.command import WrongOptions
from caldavclientlibrary.protocol.url import URL
from caldavclientlibrary.protocol.webdav.definitions import davxml
from caldavclientlibrary.browser.subshell import SubShell
from caldavclientlibrary.browser import commands
from caldavclientlibrary.protocol.webdav.ace import ACE
from caldavclientlibrary.browser import utils
from caldavclientlibrary.protocol.caldav.definitions import caldavxml
from xml.etree.ElementTree import QName
import readline
import os
import getopt
import shlex
class Cmd(Command):
def __init__(self):
super(Command, self).__init__()
self.cmds = ("acl",)
self.subshell = None
def execute(self, cmdname, options):
interactive = False
path = None
try:
opts, args = getopt.getopt(shlex.split(options), 'i')
except getopt.GetoptError, e:
print str(e)
print self.usage(cmdname)
raise WrongOptions
for name, _ignore_value in opts:
if name == "-i":
interactive = True
else:
print "Unknown option: %s" % (name,)
print self.usage(cmdname)
raise WrongOptions
if len(args) > 1:
print "Wrong number of arguments: %d" % (len(args),)
print self.usage(cmdname)
raise WrongOptions
elif args:
path = args[0]
if not path.startswith("/"):
path = os.path.join(self.shell.wd, path)
else:
path = self.shell.wd
if not path.endswith("/"):
path += "/"
resource = URL(url=path)
results, bad = self.shell.account.session.getProperties(resource, (davxml.acl,))
if davxml.acl in bad:
print "Could not retrieve DAV:acl property, status=%d" % (bad[davxml.acl],)
else:
if interactive:
self.doInteractiveMode(resource, results[davxml.acl])
else:
aces = ACE.parseFromACL(results[davxml.acl])
print utils.printACEList(aces, self.shell.account)
return True
def doInteractiveMode(self, resource, acls):
print "Entering ACL edit mode on resource: %s" % (resource.relativeURL(),)
if not self.subshell:
self.subshell = SubShell(self.shell, "ACL", (
commands.help.Cmd(),
commands.logging.Cmd(),
commands.quit.Cmd(),
Add(),
Change(),
Remove(),
List(),
))
self.subshell.resource = resource
self.subshell.account = self.shell.account
self.subshell.run()
def usage(self, name):
return """Usage: %s [OPTIONS] [PATH]
PATH is a relative or absolute path.
Options:
-i interactive mode for adding, changing and deleting ACLs.
if not present, existing ACLs will be printed.
""" % (name,)
def helpDescription(self):
return "Manage the access privileges of a directory or file."
class CommonACLCommand(Command):
def displayACEList(self):
# First list the current set
results, bad = self.shell.shell.account.session.getProperties(self.shell.resource, (davxml.acl,))
if davxml.acl in bad:
print "Could not retrieve DAV:acl property, status=%d" % (bad[davxml.acl],)
return None
else:
aces = ACE.parseFromACL(results[davxml.acl])
print utils.printACEList(aces, self.shell.shell.account)
return aces
def createACE(self, oldace=None):
ace = ACE()
print "Principal Type:"
print " 1. Principal path"
print " 2. All"
print " 3. Authenticated"
print " 4. Unauthenticated"
print " 5. Property"
insert = None
if oldace:
mapper = {
str(davxml.href): "1",
str(davxml.all): "2",
str(davxml.authenticated): "3",
str(davxml.unauthenticated): "4",
str(davxml.property): "5",
}
insert = mapper.get(oldace.principal)
choice = utils.numericInput("Select type: ", 1, 5, insert=insert)
if choice == "q":
return None
if choice == 1:
href = utils.textInput("Enter principal path: ", insert=oldace.data if oldace else None)
principal = self.shell.shell.account.getPrincipal(URL(url=href))
ace.principal = str(davxml.href)
ace.data = principal.principalURL.relativeURL()
elif choice == 2:
ace.principal = str(davxml.all)
elif choice == 3:
ace.principal = str(davxml.authenticated)
elif choice == 4:
ace.principal = str(davxml.unauthenticated)
elif choice == 5:
prop = utils.textInput("Enter property qname: ", insert=str(oldace.data) if oldace else None)
ace.principal = str(davxml.property)
ace.data = QName(prop)
invert = utils.yesNoInput("Invert principal [y/n]: ", insert=("y" if oldace.invert else "n") if oldace else None)
ace.invert = (invert == "y")
grant = utils.choiceInput("Grant or Deny privileges [g/d]: ", ("g", "d",), insert=("g" if oldace.grant else "d") if oldace else None)
ace.grant = (grant == "g")
print "Privileges:"
print " a. {DAV}read"
print " b. {DAV}write"
print " c. {DAV}write-properties"
print " d. {DAV}write-content"
print " e. {DAV}read-acl"
print " f. {DAV}read-current-user-privilege-set"
print " g. {DAV}write-acl"
print " h. {DAV}bind"
print " i. {DAV}unbind"
print " j. {DAV}all"
print " k. {CALDAV}read-free-busy"
print " l. {CALDAV}schedule"
print " q. quit without changes"
choice = utils.multiChoiceInput(
"Select multiple items: ",
[char for char in "abcdefghijklq"],
)
if "q" in choice:
return None
mappedPrivs = {
'a': davxml.read,
'b': davxml.write,
'c': davxml.write_properties,
'd': davxml.write_content,
'e': davxml.read_acl,
'f': davxml.read_current_user_privilege_set,
'g': davxml.write_acl,
'h': davxml.bind,
'i': davxml.unbind,
'j': davxml.all,
'k': caldavxml.read_free_busy,
'l': caldavxml.schedule,
}
ace.privs = ()
for char in choice:
ace.privs += (mappedPrivs[char],)
return ace
class Add(CommonACLCommand):
def __init__(self):
super(Command, self).__init__()
self.cmds = ("add",)
def execute(self, name, options):
# First list the current set
aces = self.displayACEList()
if aces:
# Ask user which one to delete
while True:
result = raw_input("Add ACL before [1 - %d] or cancel [q]: " % (len(aces) + 1,))
if readline.get_current_history_length():
readline.remove_history_item(readline.get_current_history_length() - 1)
if not result:
continue
if result[0] == "q":
break
try:
number = int(result)
if number > len(aces):
number = len(aces)
except ValueError:
print "Invalid input, try again."
continue
# Try and get the new ace
ace = self.createACE()
if not ace:
break
aces.insert(number, ace)
# Now remove those that cannot be edited
aces = [ace for ace in aces if ace.canChange()]
# Now execute
self.shell.shell.account.session.setACL(self.shell.resource, aces)
break
def usage(self, name):
return """Usage: %s
""" % (name,)
def helpDescription(self):
return "Add ACL to existing resource."
class Change(CommonACLCommand):
def __init__(self):
super(Command, self).__init__()
self.cmds = ("change",)
def execute(self, name, options):
# First list the current set
aces = self.displayACEList()
if aces:
# Ask user which one to delete
while True:
result = raw_input("Change ACL at [1 - %d] or cancel [q]: " % (len(aces),))
if readline.get_current_history_length():
readline.remove_history_item(readline.get_current_history_length() - 1)
if not result:
continue
if result[0] == "q":
break
try:
number = int(result)
except ValueError:
print "Invalid input, try again."
continue
# Check that the targeted ace is editable
if not aces[number - 1].canChange():
print "You cannot change a protected or inherited ace."
break
# Try and get the new ace
ace = self.createACE(oldace=aces[number - 1])
if not ace:
break
aces[number - 1] = ace
# Now remove those that cannot be edited
aces = [ace for ace in aces if ace.canChange()]
# Now execute
self.shell.shell.account.session.setACL(self.shell.resource, aces)
break
def usage(self, name):
return """Usage: %s
""" % (name,)
def helpDescription(self):
return "Change ACL on existing resource."
class Remove(CommonACLCommand):
def __init__(self):
super(Command, self).__init__()
self.cmds = ("remove",)
def execute(self, name, options):
# First list the current set
aces = self.displayACEList()
if aces:
# Ask user which one to delete
while True:
result = raw_input("Remove ACL [1 - %d] or cancel [q]: " % (len(aces),))
if readline.get_current_history_length():
readline.remove_history_item(readline.get_current_history_length() - 1)
if not result:
continue
if result[0] == "q":
break
try:
number = int(result)
except ValueError:
print "Invalid input, try again."
continue
# Check that the targeted ace is editable
if not aces[number - 1].canChange():
print "You cannot remove a protected or inherited ace."
break
# Remove the one we are removing
del aces[number - 1]
# Now remove those that cannot be edited
aces = [ace for ace in aces if ace.canChange()]
# Now execute
self.shell.shell.account.session.setACL(self.shell.resource, aces)
break
def usage(self, name):
return """Usage: %s
""" % (name,)
def helpDescription(self):
return "Remove ACL on existing resource."
class List(CommonACLCommand):
def __init__(self):
super(Command, self).__init__()
self.cmds = ("list",)
def execute(self, name, options):
self.displayACEList()
return True
def usage(self, name):
return """Usage: %s
""" % (name,)
def helpDescription(self):
return "List current ACLs on existing resource."
|
|
from typing import Callable, List, Sequence, TypeVar
import pytest
from pluggy import HookimplMarker, HookspecMarker, PluginManager, PluginValidationError
from pluggy._hooks import HookImpl, _HookCaller
hookspec = HookspecMarker("example")
hookimpl = HookimplMarker("example")
@pytest.fixture
def hc(pm: PluginManager) -> _HookCaller:
class Hooks:
@hookspec
def he_method1(self, arg: object) -> None:
pass
pm.add_hookspecs(Hooks)
return pm.hook.he_method1
FuncT = TypeVar("FuncT", bound=Callable[..., object])
class AddMeth:
def __init__(self, hc: _HookCaller) -> None:
self.hc = hc
def __call__(
self, tryfirst: bool = False, trylast: bool = False, hookwrapper: bool = False
) -> Callable[[FuncT], FuncT]:
def wrap(func: FuncT) -> FuncT:
hookimpl(tryfirst=tryfirst, trylast=trylast, hookwrapper=hookwrapper)(func)
self.hc._add_hookimpl(
HookImpl(None, "<temp>", func, func.example_impl), # type: ignore[attr-defined]
)
return func
return wrap
@pytest.fixture
def addmeth(hc: _HookCaller) -> AddMeth:
return AddMeth(hc)
def funcs(hookmethods: Sequence[HookImpl]) -> List[Callable[..., object]]:
return [hookmethod.function for hookmethod in hookmethods]
def test_adding_nonwrappers(hc: _HookCaller, addmeth: AddMeth) -> None:
@addmeth()
def he_method1() -> None:
pass
@addmeth()
def he_method2() -> None:
pass
@addmeth()
def he_method3() -> None:
pass
assert funcs(hc.get_hookimpls()) == [he_method1, he_method2, he_method3]
def test_adding_nonwrappers_trylast(hc: _HookCaller, addmeth: AddMeth) -> None:
@addmeth()
def he_method1_middle() -> None:
pass
@addmeth(trylast=True)
def he_method1() -> None:
pass
@addmeth()
def he_method1_b() -> None:
pass
assert funcs(hc.get_hookimpls()) == [he_method1, he_method1_middle, he_method1_b]
def test_adding_nonwrappers_trylast3(hc: _HookCaller, addmeth: AddMeth) -> None:
@addmeth()
def he_method1_a() -> None:
pass
@addmeth(trylast=True)
def he_method1_b() -> None:
pass
@addmeth()
def he_method1_c() -> None:
pass
@addmeth(trylast=True)
def he_method1_d() -> None:
pass
assert funcs(hc.get_hookimpls()) == [
he_method1_d,
he_method1_b,
he_method1_a,
he_method1_c,
]
def test_adding_nonwrappers_trylast2(hc: _HookCaller, addmeth: AddMeth) -> None:
@addmeth()
def he_method1_middle() -> None:
pass
@addmeth()
def he_method1_b() -> None:
pass
@addmeth(trylast=True)
def he_method1() -> None:
pass
assert funcs(hc.get_hookimpls()) == [he_method1, he_method1_middle, he_method1_b]
def test_adding_nonwrappers_tryfirst(hc: _HookCaller, addmeth: AddMeth) -> None:
@addmeth(tryfirst=True)
def he_method1() -> None:
pass
@addmeth()
def he_method1_middle() -> None:
pass
@addmeth()
def he_method1_b() -> None:
pass
assert funcs(hc.get_hookimpls()) == [he_method1_middle, he_method1_b, he_method1]
def test_adding_wrappers_ordering(hc: _HookCaller, addmeth: AddMeth) -> None:
@addmeth(hookwrapper=True)
def he_method1() -> None:
pass
@addmeth()
def he_method1_middle() -> None:
pass
@addmeth(hookwrapper=True)
def he_method3() -> None:
pass
assert funcs(hc.get_hookimpls()) == [
he_method1_middle,
he_method1,
he_method3,
]
def test_adding_wrappers_ordering_tryfirst(hc: _HookCaller, addmeth: AddMeth) -> None:
@addmeth(hookwrapper=True, tryfirst=True)
def he_method1() -> None:
pass
@addmeth(hookwrapper=True)
def he_method2() -> None:
pass
assert funcs(hc.get_hookimpls()) == [he_method2, he_method1]
def test_adding_wrappers_complex(hc: _HookCaller, addmeth: AddMeth) -> None:
assert funcs(hc.get_hookimpls()) == []
@addmeth(hookwrapper=True, trylast=True)
def m1() -> None:
...
assert funcs(hc.get_hookimpls()) == [m1]
@addmeth()
def m2() -> None:
...
assert funcs(hc.get_hookimpls()) == [m2, m1]
@addmeth(trylast=True)
def m3() -> None:
...
assert funcs(hc.get_hookimpls()) == [m3, m2, m1]
@addmeth(hookwrapper=True)
def m4() -> None:
...
assert funcs(hc.get_hookimpls()) == [m3, m2, m1, m4]
@addmeth(hookwrapper=True, tryfirst=True)
def m5() -> None:
...
assert funcs(hc.get_hookimpls()) == [m3, m2, m1, m4, m5]
@addmeth(tryfirst=True)
def m6() -> None:
...
assert funcs(hc.get_hookimpls()) == [m3, m2, m6, m1, m4, m5]
@addmeth()
def m7() -> None:
...
assert funcs(hc.get_hookimpls()) == [m3, m2, m7, m6, m1, m4, m5]
@addmeth(hookwrapper=True)
def m8() -> None:
...
assert funcs(hc.get_hookimpls()) == [m3, m2, m7, m6, m1, m4, m8, m5]
@addmeth(trylast=True)
def m9() -> None:
...
assert funcs(hc.get_hookimpls()) == [m9, m3, m2, m7, m6, m1, m4, m8, m5]
@addmeth(tryfirst=True)
def m10() -> None:
...
assert funcs(hc.get_hookimpls()) == [m9, m3, m2, m7, m6, m10, m1, m4, m8, m5]
@addmeth(hookwrapper=True, trylast=True)
def m11() -> None:
...
assert funcs(hc.get_hookimpls()) == [m9, m3, m2, m7, m6, m10, m11, m1, m4, m8, m5]
@addmeth(hookwrapper=True)
def m12() -> None:
...
assert funcs(hc.get_hookimpls()) == [
m9,
m3,
m2,
m7,
m6,
m10,
m11,
m1,
m4,
m8,
m12,
m5,
]
@addmeth()
def m13() -> None:
...
assert funcs(hc.get_hookimpls()) == [
m9,
m3,
m2,
m7,
m13,
m6,
m10,
m11,
m1,
m4,
m8,
m12,
m5,
]
def test_hookspec(pm: PluginManager) -> None:
class HookSpec:
@hookspec()
def he_myhook1(arg1) -> None:
pass
@hookspec(firstresult=True)
def he_myhook2(arg1) -> None:
pass
@hookspec(firstresult=False)
def he_myhook3(arg1) -> None:
pass
pm.add_hookspecs(HookSpec)
assert pm.hook.he_myhook1.spec is not None
assert not pm.hook.he_myhook1.spec.opts["firstresult"]
assert pm.hook.he_myhook2.spec is not None
assert pm.hook.he_myhook2.spec.opts["firstresult"]
assert pm.hook.he_myhook3.spec is not None
assert not pm.hook.he_myhook3.spec.opts["firstresult"]
@pytest.mark.parametrize("name", ["hookwrapper", "optionalhook", "tryfirst", "trylast"])
@pytest.mark.parametrize("val", [True, False])
def test_hookimpl(name: str, val: bool) -> None:
@hookimpl(**{name: val}) # type: ignore[misc,call-overload]
def he_myhook1(arg1) -> None:
pass
if val:
assert he_myhook1.example_impl.get(name)
else:
assert not hasattr(he_myhook1, name)
def test_hookrelay_registry(pm: PluginManager) -> None:
"""Verify hook caller instances are registered by name onto the relay
and can be likewise unregistered."""
class Api:
@hookspec
def hello(self, arg: object) -> None:
"api hook 1"
pm.add_hookspecs(Api)
hook = pm.hook
assert hasattr(hook, "hello")
assert repr(hook.hello).find("hello") != -1
class Plugin:
@hookimpl
def hello(self, arg):
return arg + 1
plugin = Plugin()
pm.register(plugin)
out = hook.hello(arg=3)
assert out == [4]
assert not hasattr(hook, "world")
pm.unregister(plugin)
assert hook.hello(arg=3) == []
def test_hookrelay_registration_by_specname(pm: PluginManager) -> None:
"""Verify hook caller instances may also be registered by specifying a
specname option to the hookimpl"""
class Api:
@hookspec
def hello(self, arg: object) -> None:
"api hook 1"
pm.add_hookspecs(Api)
hook = pm.hook
assert hasattr(hook, "hello")
assert len(pm.hook.hello.get_hookimpls()) == 0
class Plugin:
@hookimpl(specname="hello")
def foo(self, arg: int) -> int:
return arg + 1
plugin = Plugin()
pm.register(plugin)
out = hook.hello(arg=3)
assert out == [4]
def test_hookrelay_registration_by_specname_raises(pm: PluginManager) -> None:
"""Verify using specname still raises the types of errors during registration as it
would have without using specname."""
class Api:
@hookspec
def hello(self, arg: object) -> None:
"api hook 1"
pm.add_hookspecs(Api)
# make sure a bad signature still raises an error when using specname
class Plugin:
@hookimpl(specname="hello")
def foo(self, arg: int, too, many, args) -> int:
return arg + 1
with pytest.raises(PluginValidationError):
pm.register(Plugin())
# make sure check_pending still fails if specname doesn't have a
# corresponding spec. EVEN if the function name matches one.
class Plugin2:
@hookimpl(specname="bar")
def hello(self, arg: int) -> int:
return arg + 1
pm.register(Plugin2())
with pytest.raises(PluginValidationError):
pm.check_pending()
|
|
#!/usr/bin/env python
"""A Verification, Storage and Query/Retrieve SCP application."""
import argparse
from configparser import ConfigParser
import os
import sys
import pydicom.config
from pydicom.dataset import Dataset
from pynetdicom import (
AE, evt, AllStoragePresentationContexts, ALL_TRANSFER_SYNTAXES
)
from pynetdicom import _config, _handlers
from pynetdicom.apps.common import setup_logging
from pynetdicom.sop_class import (
VerificationSOPClass,
PatientRootQueryRetrieveInformationModelFind,
PatientRootQueryRetrieveInformationModelMove,
PatientRootQueryRetrieveInformationModelGet,
StudyRootQueryRetrieveInformationModelFind,
StudyRootQueryRetrieveInformationModelMove,
StudyRootQueryRetrieveInformationModelGet
)
from pynetdicom.utils import validate_ae_title
#from pynetdicom.apps.qrscp import config
from pynetdicom.apps.qrscp.handlers import (
handle_echo, handle_find, handle_get, handle_move, handle_store
)
from pynetdicom.apps.qrscp import db
# Use `None` for empty values
pydicom.config.use_none_as_empty_text_VR_value = True
# Don't log identifiers
_config.LOG_RESPONSE_IDENTIFIERS = False
# Override the standard logging handlers
def _dont_log(event):
pass
_handlers._send_c_find_rsp = _dont_log
_handlers._send_c_get_rsp = _dont_log
_handlers._send_c_move_rsp = _dont_log
_handlers._send_c_store_rq = _dont_log
_handlers._recv_c_store_rsp = _dont_log
__version__ = "1.0.0"
def _log_config(config, logger):
"""Log the configuration settings.
Parameters
----------
logger : logging.Logger
The application's logger.
"""
logger.debug("Configuration settings")
app = config["DEFAULT"]
aet, port, pdu = app["ae_title"], app["port"], app["max_pdu"]
logger.debug(
f" AE title: {aet}, Port: {port}, Max. PDU: {pdu}"
)
logger.debug(" Timeouts:")
acse, dimse = app["acse_timeout"], app["dimse_timeout"]
network = app["network_timeout"]
logger.debug(f" ACSE: {acse}, DIMSE: {dimse}, Network: {network}")
logger.debug(f" Storage directory: {app['instance_location']}")
logger.debug(f" Database location: {app['database_location']}")
if config.sections():
logger.debug(" Move destinations: ")
else:
logger.debug(" Move destinations: none")
for ae_title in config.sections():
addr = config[ae_title]["address"]
port = config[ae_title]["port"]
logger.debug(f' {ae_title}: ({addr}, {port})')
logger.debug("")
def _setup_argparser():
"""Setup the command line arguments"""
# Description
parser = argparse.ArgumentParser(
description=(
"The qrscp application implements a Service Class Provider (SCP) "
"for the Verification, Storage and Query/Retrieve (QR) Service "
"Classes."
),
usage="qrscp [options]"
)
# General Options
gen_opts = parser.add_argument_group("General Options")
gen_opts.add_argument(
"--version",
help="print version information and exit",
action="store_true"
)
output = gen_opts.add_mutually_exclusive_group()
output.add_argument(
"-q", "--quiet",
help="quiet mode, print no warnings and errors",
action="store_const",
dest="log_type", const="q"
)
output.add_argument(
"-v", "--verbose",
help="verbose mode, print processing details",
action="store_const",
dest="log_type", const="v"
)
output.add_argument(
"-d", "--debug",
help="debug mode, print debug information",
action="store_const",
dest="log_type", const="d"
)
gen_opts.add_argument(
"-ll", "--log-level", metavar="[l]",
help=(
"use level l for the logger (critical, error, warn, info, debug)"
),
type=str,
choices=["critical", "error", "warn", "info", "debug"]
)
fdir = os.path.abspath(os.path.dirname(__file__))
fpath = os.path.join(fdir, "default.ini")
gen_opts.add_argument(
"-c", "--config", metavar="[f]ilename",
help="use configuration file f",
default=fpath,
)
net_opts = parser.add_argument_group("Networking Options")
net_opts.add_argument(
"--port",
help="override the configured TCP/IP listen port number",
)
net_opts.add_argument(
"-aet", "--ae-title", metavar="[a]etitle",
help="override the configured AE title",
)
net_opts.add_argument(
"-ta", "--acse-timeout", metavar="[s]econds",
help="override the configured timeout for ACSE messages",
)
net_opts.add_argument(
"-td", "--dimse-timeout", metavar="[s]econds",
help="override the configured timeout for DIMSE messages",
)
net_opts.add_argument(
"-tn", "--network-timeout", metavar="[s]econds",
help="override the configured timeout for the network",
)
net_opts.add_argument(
"-pdu", "--max-pdu", metavar="[n]umber of bytes",
help="override the configured max receive pdu to n bytes",
)
net_opts.add_argument(
"-ba", "--bind-address", metavar="[a]ddress",
help=(
"override the configured address of the network interface to "
"listen on"
),
)
db_opts = parser.add_argument_group("Database Options")
db_opts.add_argument(
"--database-location", metavar="[f]ile",
help="override the location of the database using file f",
type=str,
)
db_opts.add_argument(
"--instance-location", metavar="[d]irectory",
help=(
"override the configured instance storage location to directory d"
),
type=str
)
db_opts.add_argument(
"--clean",
help=(
"remove all entries from the database and delete the "
"corresponding stored instances"
),
action="store_true",
)
return parser.parse_args()
def clean(db_path, logger):
"""Remove all entries from the database and delete the corresponding
stored instances.
Parameters
----------
db_path : str
The database path to use with create_engine().
logger : logging.Logger
The application logger.
Returns
-------
bool
``True`` if the storage directory and database were both cleaned
successfully, ``False`` otherwise.
"""
engine = create_engine(db_path)
with engine.connect() as conn:
Session = sessionmaker(bind=engine)
session = Session()
try:
fpaths = [ii.filename for ii in session.query(Instance).all()]
except Exception as exc:
logger.error("Exception raised while querying the database")
logger.exception(exc)
session.rollback()
finally:
session.close()
return False
storage_cleaned = True
for fpath in fpaths:
try:
os.remove(os.path.join(config.INSTANCE_LOCATION, fpath))
except Exception as exc:
logger.error(f"Unable to delete the instance at '{fpath}'")
logger.exception(exc)
storage_cleaned = False
if storage_cleaned:
logger.info("Storage directory cleaned successfully")
else:
logger.error("Failed to clean storage directory")
database_cleaned = False
try:
clear(session)
database_cleaned = True
logger.info("Database cleaned successfully")
except Exception as exc:
logger.error("Failed to clean the database")
logger.exception(exc)
session.rollback()
finally:
session.close()
return database_cleaned and storage_cleaned
def main(args=None):
"""Run the application."""
if args is not None:
sys.argv = args
args = _setup_argparser()
if args.version:
print(f"qrscp.py v{__version__}")
sys.exit()
APP_LOGGER = setup_logging(args, "qrscp")
APP_LOGGER.debug(f"qrscp.py v{__version__}")
APP_LOGGER.debug("")
APP_LOGGER.debug("Using configuration from:")
APP_LOGGER.debug(f" {args.config}")
APP_LOGGER.debug("")
config = ConfigParser()
config.read(args.config)
if args.ae_title:
config["DEFAULT"]["ae_title"] = args.ae_title
if args.port:
config["DEFAULT"]["port"] = args.port
if args.max_pdu:
config["DEFAULT"]["max_pdu"] = args.max_pdu
if args.acse_timeout:
config["DEFAULT"]["acse_timeout"] = args.acse_timeout
if args.dimse_timeout:
config["DEFAULT"]["dimse_timeout"] = args.dimse_timeout
if args.network_timeout:
config["DEFAULT"]["network_timeout"] = args.network_timeout
if args.bind_address:
config["DEFAULT"]["bind_address"] = args.bind_address
if args.database_location:
config["DEFAULT"]["database_location"] = args.database_location
if args.instance_location:
config["DEFAULT"]["instance_location"] = args.instance_location
# Log configuration settings
_log_config(config, APP_LOGGER)
app_config = config["DEFAULT"]
dests = {}
for ae_title in config.sections():
dest = config[ae_title]
# Convert to bytes and validate the AE title
ae_title = validate_ae_title(ae_title.encode("ascii"), use_short=True)
dests[ae_title] = (dest["address"], dest.getint("port"))
# Use default or specified configuration file
current_dir = os.path.abspath(os.path.dirname(__file__))
instance_dir = os.path.join(current_dir, app_config["instance_location"])
db_path = os.path.join(current_dir, app_config["database_location"])
# The path to the database
db_path = f"sqlite:///{db_path}"
db.create(db_path)
# Clean up the database and storage directory
if args.clean:
response = input(
"This will delete all instances from both the storage directory "
"and the database. Are you sure you wish to continue? [yes/no]: "
)
if response != "yes":
sys.exit()
if clean(db_path, APP_LOGGER):
sys.exit()
else:
sys.exit(1)
# Try to create the instance storage directory
os.makedirs(instance_dir, exist_ok=True)
ae = AE(app_config["ae_title"])
ae.maximum_pdu_size = app_config.getint("max_pdu")
ae.acse_timeout = app_config.getfloat("acse_timeout")
ae.dimse_timeout = app_config.getfloat("dimse_timeout")
ae.network_timeout = app_config.getfloat("network_timeout")
## Add supported presentation contexts
# Verification SCP
ae.add_supported_context(VerificationSOPClass, ALL_TRANSFER_SYNTAXES)
# Storage SCP - support all transfer syntaxes
for cx in AllStoragePresentationContexts:
ae.add_supported_context(
cx.abstract_syntax, ALL_TRANSFER_SYNTAXES,
scp_role=True, scu_role=False
)
# Query/Retrieve SCP
ae.add_supported_context(PatientRootQueryRetrieveInformationModelFind)
ae.add_supported_context(PatientRootQueryRetrieveInformationModelMove)
ae.add_supported_context(PatientRootQueryRetrieveInformationModelGet)
ae.add_supported_context(StudyRootQueryRetrieveInformationModelFind)
ae.add_supported_context(StudyRootQueryRetrieveInformationModelMove)
ae.add_supported_context(StudyRootQueryRetrieveInformationModelGet)
# Set our handler bindings
handlers = [
(evt.EVT_C_ECHO, handle_echo, [args, APP_LOGGER]),
(evt.EVT_C_FIND, handle_find, [db_path, args, APP_LOGGER]),
(evt.EVT_C_GET, handle_get, [db_path, args, APP_LOGGER]),
(evt.EVT_C_MOVE, handle_move, [dests, db_path, args, APP_LOGGER]),
(
evt.EVT_C_STORE,
handle_store,
[instance_dir, db_path, args, APP_LOGGER]
),
]
# Listen for incoming association requests
ae.start_server(
(app_config["bind_address"], app_config.getint("port")),
evt_handlers=handlers
)
if __name__ == "__main__":
main()
|
|
r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is either posixpath or ntpath
- os.name is either 'posix', 'nt', 'os2' or 'ce'.
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator (always '.')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
else:
raise ImportError('no os specific module found')
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
def _get_masked_mode(mode):
mask = umask(0)
umask(mask)
return mode & ~mask
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(path [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. If the
target directory with the same mode as we specified already exists,
raises an OSError if exist_ok is False, otherwise no exception is
raised. This is recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode, exist_ok)
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
mkdir(name, mode)
except OSError as e:
import stat as st
if not (e.errno == errno.EEXIST and exist_ok and path.isdir(name) and
st.S_IMODE(lstat(name).st_mode) == _get_masked_mode(mode)):
raise
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([getsize(join(root, name)) for name in files]), end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
islink, join, isdir = path.islink, path.join, path.isdir
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
for x in walk(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
exec_func = execve
argrest = (args, env)
else:
exec_func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
exec_func(file, *argrest)
return
last_exc = saved_exc = None
saved_tb = None
path_list = get_exec_path(env)
if name != 'nt':
file = fsencode(file)
path_list = map(fsencode, path_list)
for dir in path_list:
fullname = path.join(dir, file)
try:
exec_func(fullname, *argrest)
except error as e:
last_exc = e
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise saved_exc.with_traceback(saved_tb)
raise last_exc.with_traceback(tb)
def get_exec_path(env=None):
"""Returns the sequence of directories that will be searched for the
named executable (similar to a shell) when launching a process.
*env* must be an environment variable dict or None. If *env* is None,
os.environ will be used.
"""
# Use a local import instead of a global import to limit the number of
# modules loaded at startup: the os module is always loaded at startup by
# Python. It may also avoid a bootstrap issue.
import warnings
if env is None:
env = environ
# {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a
# BytesWarning when using python -b or python -bb: ignore the warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
try:
path_list = env.get('PATH')
except TypeError:
path_list = None
if supports_bytes_environ:
try:
path_listb = env[b'PATH']
except (KeyError, TypeError):
pass
else:
if path_list is not None:
raise ValueError(
"env cannot contain 'PATH' and b'PATH' keys")
path_list = path_listb
if path_list is not None and isinstance(path_list, bytes):
path_list = fsdecode(path_list)
if path_list is None:
path_list = defpath
return path_list.split(pathsep)
# Change environ to automatically call putenv(), unsetenv if they exist.
from _abcoll import MutableMapping # Can't use collections (bootstrap)
class _Environ(MutableMapping):
def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv):
self.encodekey = encodekey
self.decodekey = decodekey
self.encodevalue = encodevalue
self.decodevalue = decodevalue
self.putenv = putenv
self.unsetenv = unsetenv
self._data = data
def __getitem__(self, key):
value = self._data[self.encodekey(key)]
return self.decodevalue(value)
def __setitem__(self, key, value):
key = self.encodekey(key)
value = self.encodevalue(value)
self.putenv(key, value)
self._data[key] = value
def __delitem__(self, key):
key = self.encodekey(key)
self.unsetenv(key)
del self._data[key]
def __iter__(self):
for key in self._data:
yield self.decodekey(key)
def __len__(self):
return len(self._data)
def __repr__(self):
return 'environ({{{}}})'.format(', '.join(
('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value))
for key, value in self._data.items())))
def copy(self):
return dict(self)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
try:
_putenv = putenv
except NameError:
_putenv = lambda key, value: None
else:
__all__.append("putenv")
try:
_unsetenv = unsetenv
except NameError:
_unsetenv = lambda key: _putenv(key, "")
else:
__all__.append("unsetenv")
def _createenviron():
if name in ('os2', 'nt'):
# Where Env Var Names Must Be UPPERCASE
def check_str(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value
encode = check_str
decode = str
def encodekey(key):
return encode(key).upper()
data = {}
for key, value in environ.items():
data[encodekey(key)] = value
else:
# Where Env Var Names Can Be Mixed Case
encoding = sys.getfilesystemencoding()
def encode(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value.encode(encoding, 'surrogateescape')
def decode(value):
return value.decode(encoding, 'surrogateescape')
encodekey = encode
data = environ
return _Environ(data,
encodekey, decode,
encode, decode,
_putenv, _unsetenv)
# unicode environ
environ = _createenviron()
del _createenviron
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return environ.get(key, default)
supports_bytes_environ = name not in ('os2', 'nt')
__all__.extend(("getenv", "supports_bytes_environ"))
if supports_bytes_environ:
def _check_bytes(value):
if not isinstance(value, bytes):
raise TypeError("bytes expected, not %s" % type(value).__name__)
return value
# bytes environ
environb = _Environ(environ._data,
_check_bytes, bytes,
_check_bytes, bytes,
_putenv, _unsetenv)
del _check_bytes
def getenvb(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are bytes."""
return environb.get(key, default)
__all__.extend(("environb", "getenvb"))
def _fscodec():
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
errors = 'strict'
else:
errors = 'surrogateescape'
def fsencode(filename):
"""
Encode filename to the filesystem encoding with 'surrogateescape' error
handler, return bytes unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, bytes):
return filename
elif isinstance(filename, str):
return filename.encode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
def fsdecode(filename):
"""
Decode filename from the filesystem encoding with 'surrogateescape' error
handler, return str unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, str):
return filename
elif isinstance(filename, bytes):
return filename.decode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
return fsencode, fsdecode
fsencode, fsdecode = _fscodec()
del _fscodec
def _exists(name):
return name in globals()
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error("Not stopped, signaled or exited???")
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
import copyreg as _copyreg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copyreg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copyreg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
if not _exists("urandom"):
def urandom(n):
"""urandom(n) -> str
Return a string of n random bytes suitable for cryptographic use.
"""
try:
_urandomfd = open("/dev/urandom", O_RDONLY)
except (OSError, IOError):
raise NotImplementedError("/dev/urandom (or equivalent) not found")
bs = b""
while len(bs) < n:
bs += read(_urandomfd, n - len(bs))
close(_urandomfd)
return bs
# Supply os.popen()
def popen(cmd, mode="r", buffering=-1):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
if mode not in ("r", "w"):
raise ValueError("invalid mode %r" % mode)
if buffering == 0 or buffering == None:
raise ValueError("popen() does not support unbuffered streams")
import subprocess, io
if mode == "r":
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdout), proc)
else:
proc = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdin), proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close:
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if name == 'nt':
return returncode
else:
return returncode << 8 # Shift left to match old behavior
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
# Supply os.fdopen()
def fdopen(fd, *args, **kwargs):
if not isinstance(fd, int):
raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
import io
return io.open(fd, *args, **kwargs)
|
|
__ver__ = '0.6.2'
import urllib, urlparse
import time, random
import sys, weakref
import socket
import simplejson
import http
import upload
import errors
import listing, page
import compatibility
try:
import gzip
except ImportError:
gzip = None
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def parse_timestamp(t):
if t == '0000-00-00T00:00:00Z':
return (0, 0, 0, 0, 0, 0, 0, 0)
return time.strptime(t, '%Y-%m-%dT%H:%M:%SZ')
class WaitToken(object):
def __init__(self):
self.id = '%x' % random.randint(0, sys.maxint)
def __hash__(self):
return hash(self.id)
class Site(object):
api_limit = 500
def __init__(self, host, path = '/w/', ext = '.php', pool = None, retry_timeout = 30,
max_retries = 25, wait_callback = lambda *x: None,
max_lag = 3, compress = True, force_login = True, do_init = True):
self.host = host
self.path = path
self.ext = ext
self.credentials = None
self.compress = compress
self.retry_timeout = retry_timeout
self.max_retries = max_retries
self.wait_callback = wait_callback
self.max_lag = str(max_lag)
self.wait_tokens = weakref.WeakKeyDictionary()
self.blocked = False
self.hasmsg = False
self.groups = []
self.rights = []
self.tokens = {}
self.force_login = force_login
if pool is None:
self.connection = http.HTTPPool()
else:
self.connection = pool
self.version = None
self.pages = listing.PageList(self)
self.categories = listing.PageList(self, namespace = 14)
self.images = listing.PageList(self, namespace = 6)
# Compat
self.Pages = self.pages
self.Categories = self.categories
self.Images = self.images
self.namespaces = self.default_namespaces
self.writeapi = False
self.initialized = False
if do_init:
try:
self.site_init()
except errors.APIError, e:
# Private wiki, do init after login
if e[0] != u'unknown_action': raise
def site_init(self):
meta = self.api('query', meta = 'siteinfo|userinfo',
siprop = 'general|namespaces', uiprop = 'groups|rights')
self.site = meta['query']['general']
self.namespaces = dict(((i['id'], i.get('*', '')) for i in meta['query']['namespaces'].itervalues()))
self.writeapi = 'writeapi' in self.site
if self.site['generator'].startswith('MediaWiki '):
version = self.site['generator'][10:].split('.')
if len(version) == 2 and version[1].endswith('alpha'):
self.version = (int(version[0]), int(version[1][:-5]), 'alpha')
elif len(version) == 3:
self.version = (int(version[0]), int(version[1]), int(version[2]))
else:
raise errors.MediaWikiVersionError('Unknown MediaWiki %s' % '.'.join(version))
else:
raise errors.MediaWikiVersionError('Unknown generator %s' % self.site['generator'])
# Require 1.11 until some compatibility issues are fixed
self.require(1, 11)
userinfo = compatibility.userinfo(meta, self.require(1, 12, raise_error = False))
self.username = userinfo['name']
self.groups = userinfo.get('groups', [])
self.rights = userinfo.get('rights', [])
self.initialized = True
default_namespaces = {0: u'', 1: u'Talk', 2: u'User', 3: u'User talk', 4: u'Project', 5: u'Project talk',
6: u'Image', 7: u'Image talk', 8: u'MediaWiki', 9: u'MediaWiki talk', 10: u'Template', 11: u'Template talk',
12: u'Help', 13: u'Help talk', 14: u'Category', 15: u'Category talk', -1: u'Special', -2: u'Media'}
def __repr__(self):
return "<Site object '%s%s'>" % (self.host, self.path)
def api(self, action, *args, **kwargs):
kwargs.update(args)
if action == 'query':
if 'meta' in kwargs:
kwargs['meta'] += '|userinfo'
else:
kwargs['meta'] = 'userinfo'
if 'uiprop' in kwargs:
kwargs['uiprop'] += '|blockinfo|hasmsg'
else:
kwargs['uiprop'] = 'blockinfo|hasmsg'
token = self.wait_token()
while True:
info = self.raw_api(action, **kwargs)
if not info: info = {}
try:
userinfo = compatibility.userinfo(info, self.require(1, 12, raise_error = None))
except KeyError:
userinfo = ()
if 'blockedby' in userinfo:
self.blocked = (userinfo['blockedby'], userinfo.get('blockreason', u''))
else:
self.blocked = False
self.hasmsg = 'message' in userinfo
self.logged_in = 'anon' not in userinfo
if 'error' in info:
if info['error']['code'] in (u'internal_api_error_DBConnectionError', ):
self.wait(token)
continue
if '*' in info['error']:
raise errors.APIError(info['error']['code'],
info['error']['info'], info['error']['*'])
raise errors.APIError(info['error']['code'],
info['error']['info'], kwargs)
return info
@staticmethod
def _to_str(data):
if type(data) is unicode:
return data.encode('utf-8')
return str(data)
@staticmethod
def _query_string(*args, **kwargs):
kwargs.update(args)
qs = urllib.urlencode([(k, Site._to_str(v)) for k, v in kwargs.iteritems()
if k != 'wpEditToken'])
if 'wpEditToken' in kwargs:
qs += '&wpEditToken=' + urllib.quote(Site._to_str(kwargs['wpEditToken']))
return qs
def raw_call(self, script, data):
url = self.path + script + self.ext
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
if self.compress and gzip:
headers['Accept-Encoding'] = 'gzip'
token = self.wait_token((script, data))
while True:
try:
stream = self.connection.post(self.host,
url, data = data, headers = headers)
if stream.getheader('Content-Encoding') == 'gzip':
# BAD.
seekable_stream = StringIO(stream.read())
stream = gzip.GzipFile(fileobj = seekable_stream)
return stream
except errors.HTTPStatusError, e:
if e[0] == 503 and e[1].getheader('X-Database-Lag'):
self.wait(token, int(e[1].getheader('Retry-After')))
elif e[0] < 500 or e[0] > 599:
raise
else:
self.wait(token)
except errors.HTTPRedirectError:
raise
except errors.HTTPError:
self.wait(token)
except ValueError:
self.wait(token)
def raw_api(self, action, *args, **kwargs):
kwargs['action'] = action
kwargs['format'] = 'json'
data = self._query_string(*args, **kwargs)
json = self.raw_call('api', data).read()
try:
return simplejson.loads(json)
except ValueError:
if json.startswith('MediaWiki API is not enabled for this site.'):
raise errors.APIDisabledError
raise
def raw_index(self, action, *args, **kwargs):
kwargs['action'] = action
kwargs['maxlag'] = self.max_lag
data = self._query_string(*args, **kwargs)
return self.raw_call('index', data).read().decode('utf-8', 'ignore')
def wait_token(self, args = None):
token = WaitToken()
self.wait_tokens[token] = (0, args)
return token
def wait(self, token, min_wait = 0):
retry, args = self.wait_tokens[token]
self.wait_tokens[token] = (retry + 1, args)
if retry > self.max_retries and self.max_retries != -1:
raise errors.MaximumRetriesExceeded(self, token, args)
self.wait_callback(self, token, retry, args)
timeout = self.retry_timeout * retry
if timeout < min_wait: timeout = min_wait
time.sleep(timeout)
return self.wait_tokens[token]
def require(self, major, minor, revision = None, raise_error = True):
if self.version is None:
if raise_error is None: return
raise RuntimeError('Site %s has not yet been initialized' % repr(self))
if revision is None:
if self.version[:2] >= (major, minor):
return True
elif raise_error:
raise errors.MediaWikiVersionError('Requires version %s.%s, current version is %s.%s'
% ((major, minor) + self.version[:2]))
else:
return False
else:
raise NotImplementedError
# Actions
def email(self, user, text, subject, cc = False):
postdata = {}
postdata['wpSubject'] = subject
postdata['wpText'] = text
if cc: postdata['wpCCMe'] = '1'
postdata['wpEditToken'] = self.tokens['edit']
postdata['uselang'] = 'en'
postdata['title'] = u'Special:Emailuser/' + user
data = self.raw_index('submit', **postdata)
if 'var wgAction = "success";' not in data:
if 'This user has not specified a valid e-mail address' in data:
# Dirty hack
raise errors.NoSpecifiedEmailError, user
raise errors.EmailError, data
def login(self, username = None, password = None, cookies = None):
if self.initialized: self.require(1, 10)
if username and password:
self.credentials = (username, password)
if cookies:
if self.host not in self.conn.cookies:
self.conn.cookies[self.host] = http.CookieJar()
self.conn.cookies[self.host].update(cookies)
if self.credentials:
wait_token = self.wait_token()
while True:
login = self.api('login', lgname = self.credentials[0], lgpassword = self.credentials[1])
if login['login']['result'] == 'Success':
break
elif login['login']['result'] == 'Throttled':
self.wait(wait_token, login['login'].get('wait', 5))
else:
raise errors.LoginError(self, login['login'])
if self.initialized:
info = self.api('query', meta = 'userinfo', uiprop = 'groups|rights')
userinfo = compatibility.userinfo(info, self.require(1, 12, raise_error = False))
self.username = userinfo['name']
self.groups = userinfo.get('groups', [])
self.rights = userinfo.get('rights', [])
self.tokens = {}
else:
self.site_init()
def upload(self, file, filename, description, license = '', ignore = False, file_size = None):
image = self.Images[filename]
if not image.can('upload'):
raise errors.InsufficientPermission(filename)
if image.exists and not ignore:
raise errors.FileExists(filename)
if type(file) is str:
file_size = len(file)
file = StringIO(file)
if file_size is None:
file.seek(0, 2)
file_size = file.tell()
file.seek(0, 0)
predata = {}
# Do this thing later so that an incomplete upload won't work
# predata['wpDestFile'] = filename
predata['wpUploadDescription'] = description
predata['wpLicense'] = license
if ignore: predata['wpIgnoreWarning'] = 'true'
predata['wpUpload'] = 'Upload file'
predata['wpSourceType'] = 'file'
predata['wpDestFile'] = filename
postdata = upload.UploadFile('wpUploadFile', filename, file_size, file, predata)
wait_token = self.wait_token()
while True:
try:
self.connection.post(self.host,
self.path + 'index.php?title=Special:Upload&maxlag=' + self.max_lag,
data = postdata).read()
except errors.HTTPStatusError, e:
if e[0] == 503 and e[1].getheader('X-Database-Lag'):
self.wait(wait_token, int(e[1].getheader('Retry-After')))
elif e[0] < 500 or e[0] > 599:
raise
else:
self.wait(wait_token)
except errors.HTTPError:
self.wait(wait_token)
else:
return
file.seek(0, 0)
def parse(self, text, title = None):
kwargs = {'text': text}
if title is not None: kwargs['title'] = title
result = self.api('parse', **kwargs)
return result['parse']
# Lists
def allpages(self, start = None, prefix = None, namespace = '0', filterredir = 'all',
minsize = None, maxsize = None, prtype = None, prlevel = None,
limit = None, dir = 'ascending', filterlanglinks = 'all', generator = True):
self.require(1, 9)
pfx = listing.List.get_prefix('ap', generator)
kwargs = dict(listing.List.generate_kwargs(pfx, ('from', start), prefix = prefix,
minsize = minsize, maxsize = maxsize, prtype = prtype, prlevel = prlevel,
namespace = namespace, filterredir = filterredir, dir = dir,
filterlanglinks = filterlanglinks))
return listing.List.get_list(generator)(self, 'allpages', 'ap', limit = limit, return_values = 'title', **kwargs)
def alllinks(self, start = None, prefix = None, unique = False, prop = 'title',
namespace = '0', limit = None, generator = True):
self.require(1, 11)
pfx = listing.List.get_prefix('al', generator)
kwargs = dict(listing.List.generate_kwargs(pfx, ('from', start), prefix = prefix,
prop = prop, namespace = namespace))
if unique: kwargs[pfx + 'unique'] = '1'
return listing.List.get_list(generator)(self, 'alllinks', 'al', limit = limit, return_values = 'title', **kwargs)
def allcategories(self, start = None, prefix = None, dir = 'ascending', limit = None, generator = True):
self.require(1, 12)
pfx = listing.List.get_prefix('ac', generator)
kwargs = dict(listing.List.generate_kwargs(pfx, ('from', start), prefix = prefix, dir = dir))
return listing.List.get_list(generator)(self, 'allcategories', 'ac', limit = limit, **kwargs)
def allusers(self, start = None, prefix = None, group = None, prop = None, limit = None):
self.require(1, 11)
kwargs = dict(listing.List.generate_kwargs('au', ('from', start), prefix = prefix,
group = group, prop = prop))
return listing.List(self, 'allusers', 'au', limit = limit, **kwargs)
def blocks(self, start = None, end = None, dir = 'older', ids = None, users = None, limit = None,
prop = 'id|user|by|timestamp|expiry|reason|flags'):
self.require(1, 12)
# TODO: Fix. Fix what?
kwargs = dict(listing.List.generate_kwargs('bk', start = start, end = end, dir = dir,
users = users, prop = prop))
return listing.List(self, 'blocks', 'bk', limit = limit, **kwargs)
def deletedrevisions(self, start = None, end = None, dir = 'older', namespace = None,
limit = None, prop = 'user|comment'):
# TODO: Fix
self.require(1, 12)
kwargs = dict(listing.List.generate_kwargs('dr', start = start, end = end, dir = dir,
namespace = namespace, prop = prop))
return listing.List(self, 'deletedrevs', 'dr', limit = limit, **kwargs)
def exturlusage(self, query, prop = None, protocol = 'http', namespace = None, limit = None):
self.require(1, 11)
kwargs = dict(listing.List.generate_kwargs('eu', query = query, prop = prop,
protocol = protocol, namespace = namespace))
return listing.List(self, 'exturlusage', 'eu', limit = limit, **kwargs)
def logevents(self, type = None, prop = None, start = None, end = None,
dir = 'older', user = None, title = None, limit = None):
self.require(1, 9)
kwargs = dict(listing.List.generate_kwargs('le', prop = prop, type = type, start = start,
end = end, dir = dir, user = user, title = title))
return listing.List(self, 'logevents', 'le', limit = limit, **kwargs)
def random(self, namespace, limit = 20):
self.require(1, 12)
kwargs = dict(listing.List.generate_kwargs('rn', namespace = namespace))
return listing.List(self, 'random', 'rn', limit = limit, **kwargs)
def recentchanges(self, start = None, end = None, dir = 'older', namespace = None,
prop = None, show = None, limit = None, type = None):
self.require(1, 9)
kwargs = dict(listing.List.generate_kwargs('rc', start = start, end = end, dir = dir,
namespace = namespace, prop = prop, show = show, type = type))
return listing.List(self, 'recentchanges', 'rc', limit = limit, **kwargs)
def search(self, search, namespace = '0', what = 'title', redirects = False, limit = None):
self.require(1, 11)
kwargs = dict(listing.List.generate_kwargs('sr', search = search, namespace = namespace, what = what))
if redirects: kwargs['srredirects'] = '1'
return listing.List(self, 'search', 'sr', limit = limit, **kwargs)
def usercontributions(self, user, start = None, end = None, dir = 'older', namespace = None,
prop = None, show = None, limit = None):
self.require(1, 9)
kwargs = dict(listing.List.generate_kwargs('uc', user = user, start = start, end = end,
dir = dir, namespace = namespace, prop = prop, show = show))
return listing.List(self, 'usercontribs', 'uc', limit = limit, **kwargs)
def users(self, users, prop = 'blockinfo|groups|editcount'):
self.require(1, 12)
return listing.List(self, 'users', 'us', ususers = '|'.join(users), usprop = prop)
def watchlist(self, allrev = False, start = None, end = None, namespace = None, dir = 'older',
prop = None, show = None, limit = None):
self.require(1, 9)
kwargs = dict(listing.List.generate_kwargs('wl', start = start, end = end,
namespace = namespace, dir = dir, prop = prop, show = show))
if allrev: kwargs['wlallrev'] = '1'
return listing.List(self, 'watchlist', 'wl', limit = limit, **kwargs)
def expandtemplates(self, text, title = None, generatexml = False):
self.require(1, 11)
kwargs = {}
if title is None: kwargs['title'] = title
if generatexml: kwargs['generatexml'] = '1'
result = self.api('expandtemplates', text = text, **kwargs)
if generatexml:
return result['expandtemplates']['*'], result['parsetree']['*']
else:
return result['expandtemplates']['*']
|
|
"""Tests for the Fronius sensor platform."""
from homeassistant.components.fronius.const import DOMAIN
from homeassistant.components.fronius.coordinator import (
FroniusInverterUpdateCoordinator,
FroniusMeterUpdateCoordinator,
FroniusPowerFlowUpdateCoordinator,
)
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.helpers import device_registry as dr
from homeassistant.util import dt
from . import enable_all_entities, mock_responses, setup_fronius_integration
from tests.common import async_fire_time_changed
async def test_symo_inverter(hass, aioclient_mock):
"""Test Fronius Symo inverter entities."""
def assert_state(entity_id, expected_state):
state = hass.states.get(entity_id)
assert state.state == str(expected_state)
# Init at night
mock_responses(aioclient_mock, night=True)
config_entry = await setup_fronius_integration(hass)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 20
await enable_all_entities(
hass, config_entry.entry_id, FroniusInverterUpdateCoordinator.default_interval
)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 52
assert_state("sensor.current_dc_fronius_inverter_1_http_fronius", 0)
assert_state("sensor.energy_day_fronius_inverter_1_http_fronius", 10828)
assert_state("sensor.energy_total_fronius_inverter_1_http_fronius", 44186900)
assert_state("sensor.energy_year_fronius_inverter_1_http_fronius", 25507686)
assert_state("sensor.voltage_dc_fronius_inverter_1_http_fronius", 16)
# Second test at daytime when inverter is producing
mock_responses(aioclient_mock, night=False)
async_fire_time_changed(
hass, dt.utcnow() + FroniusInverterUpdateCoordinator.default_interval
)
await hass.async_block_till_done()
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 56
await enable_all_entities(
hass, config_entry.entry_id, FroniusInverterUpdateCoordinator.default_interval
)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 58
# 4 additional AC entities
assert_state("sensor.current_dc_fronius_inverter_1_http_fronius", 2.19)
assert_state("sensor.energy_day_fronius_inverter_1_http_fronius", 1113)
assert_state("sensor.energy_total_fronius_inverter_1_http_fronius", 44188000)
assert_state("sensor.energy_year_fronius_inverter_1_http_fronius", 25508798)
assert_state("sensor.voltage_dc_fronius_inverter_1_http_fronius", 518)
assert_state("sensor.current_ac_fronius_inverter_1_http_fronius", 5.19)
assert_state("sensor.frequency_ac_fronius_inverter_1_http_fronius", 49.94)
assert_state("sensor.power_ac_fronius_inverter_1_http_fronius", 1190)
assert_state("sensor.voltage_ac_fronius_inverter_1_http_fronius", 227.90)
# Third test at nighttime - additional AC entities aren't changed
mock_responses(aioclient_mock, night=True)
async_fire_time_changed(
hass, dt.utcnow() + FroniusInverterUpdateCoordinator.default_interval
)
await hass.async_block_till_done()
assert_state("sensor.current_ac_fronius_inverter_1_http_fronius", 5.19)
assert_state("sensor.frequency_ac_fronius_inverter_1_http_fronius", 49.94)
assert_state("sensor.power_ac_fronius_inverter_1_http_fronius", 1190)
assert_state("sensor.voltage_ac_fronius_inverter_1_http_fronius", 227.90)
async def test_symo_logger(hass, aioclient_mock):
"""Test Fronius Symo logger entities."""
def assert_state(entity_id, expected_state):
state = hass.states.get(entity_id)
assert state
assert state.state == str(expected_state)
mock_responses(aioclient_mock)
await setup_fronius_integration(hass)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 24
# states are rounded to 4 decimals
assert_state(
"sensor.cash_factor_fronius_logger_info_0_http_fronius",
0.078,
)
assert_state(
"sensor.co2_factor_fronius_logger_info_0_http_fronius",
0.53,
)
assert_state(
"sensor.delivery_factor_fronius_logger_info_0_http_fronius",
0.15,
)
async def test_symo_meter(hass, aioclient_mock):
"""Test Fronius Symo meter entities."""
def assert_state(entity_id, expected_state):
state = hass.states.get(entity_id)
assert state
assert state.state == str(expected_state)
mock_responses(aioclient_mock)
config_entry = await setup_fronius_integration(hass)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 24
await enable_all_entities(
hass, config_entry.entry_id, FroniusMeterUpdateCoordinator.default_interval
)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 58
# states are rounded to 4 decimals
assert_state("sensor.current_ac_phase_1_fronius_meter_0_http_fronius", 7.755)
assert_state("sensor.current_ac_phase_2_fronius_meter_0_http_fronius", 6.68)
assert_state("sensor.current_ac_phase_3_fronius_meter_0_http_fronius", 10.102)
assert_state(
"sensor.energy_reactive_ac_consumed_fronius_meter_0_http_fronius", 59960790
)
assert_state(
"sensor.energy_reactive_ac_produced_fronius_meter_0_http_fronius", 723160
)
assert_state("sensor.energy_real_ac_minus_fronius_meter_0_http_fronius", 35623065)
assert_state("sensor.energy_real_ac_plus_fronius_meter_0_http_fronius", 15303334)
assert_state("sensor.energy_real_consumed_fronius_meter_0_http_fronius", 15303334)
assert_state("sensor.energy_real_produced_fronius_meter_0_http_fronius", 35623065)
assert_state("sensor.frequency_phase_average_fronius_meter_0_http_fronius", 50)
assert_state("sensor.power_apparent_phase_1_fronius_meter_0_http_fronius", 1772.793)
assert_state("sensor.power_apparent_phase_2_fronius_meter_0_http_fronius", 1527.048)
assert_state("sensor.power_apparent_phase_3_fronius_meter_0_http_fronius", 2333.562)
assert_state("sensor.power_apparent_fronius_meter_0_http_fronius", 5592.57)
assert_state("sensor.power_factor_phase_1_fronius_meter_0_http_fronius", -0.99)
assert_state("sensor.power_factor_phase_2_fronius_meter_0_http_fronius", -0.99)
assert_state("sensor.power_factor_phase_3_fronius_meter_0_http_fronius", 0.99)
assert_state("sensor.power_factor_fronius_meter_0_http_fronius", 1)
assert_state("sensor.power_reactive_phase_1_fronius_meter_0_http_fronius", 51.48)
assert_state("sensor.power_reactive_phase_2_fronius_meter_0_http_fronius", 115.63)
assert_state("sensor.power_reactive_phase_3_fronius_meter_0_http_fronius", -164.24)
assert_state("sensor.power_reactive_fronius_meter_0_http_fronius", 2.87)
assert_state("sensor.power_real_phase_1_fronius_meter_0_http_fronius", 1765.55)
assert_state("sensor.power_real_phase_2_fronius_meter_0_http_fronius", 1515.8)
assert_state("sensor.power_real_phase_3_fronius_meter_0_http_fronius", 2311.22)
assert_state("sensor.power_real_fronius_meter_0_http_fronius", 5592.57)
assert_state("sensor.voltage_ac_phase_1_fronius_meter_0_http_fronius", 228.6)
assert_state("sensor.voltage_ac_phase_2_fronius_meter_0_http_fronius", 228.6)
assert_state("sensor.voltage_ac_phase_3_fronius_meter_0_http_fronius", 231)
assert_state(
"sensor.voltage_ac_phase_to_phase_12_fronius_meter_0_http_fronius", 395.9
)
assert_state(
"sensor.voltage_ac_phase_to_phase_23_fronius_meter_0_http_fronius", 398
)
assert_state(
"sensor.voltage_ac_phase_to_phase_31_fronius_meter_0_http_fronius", 398
)
async def test_symo_power_flow(hass, aioclient_mock):
"""Test Fronius Symo power flow entities."""
async_fire_time_changed(hass, dt.utcnow())
def assert_state(entity_id, expected_state):
state = hass.states.get(entity_id)
assert state.state == str(expected_state)
# First test at night
mock_responses(aioclient_mock, night=True)
config_entry = await setup_fronius_integration(hass)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 20
await enable_all_entities(
hass, config_entry.entry_id, FroniusInverterUpdateCoordinator.default_interval
)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 52
# states are rounded to 4 decimals
assert_state(
"sensor.energy_day_fronius_power_flow_0_http_fronius",
10828,
)
assert_state(
"sensor.energy_total_fronius_power_flow_0_http_fronius",
44186900,
)
assert_state(
"sensor.energy_year_fronius_power_flow_0_http_fronius",
25507686,
)
assert_state(
"sensor.power_grid_fronius_power_flow_0_http_fronius",
975.31,
)
assert_state(
"sensor.power_load_fronius_power_flow_0_http_fronius",
-975.31,
)
assert_state(
"sensor.relative_autonomy_fronius_power_flow_0_http_fronius",
0,
)
# Second test at daytime when inverter is producing
mock_responses(aioclient_mock, night=False)
async_fire_time_changed(
hass, dt.utcnow() + FroniusPowerFlowUpdateCoordinator.default_interval
)
await hass.async_block_till_done()
# 54 because power_flow `rel_SelfConsumption` and `P_PV` is not `null` anymore
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 54
assert_state(
"sensor.energy_day_fronius_power_flow_0_http_fronius",
1101.7001,
)
assert_state(
"sensor.energy_total_fronius_power_flow_0_http_fronius",
44188000,
)
assert_state(
"sensor.energy_year_fronius_power_flow_0_http_fronius",
25508788,
)
assert_state(
"sensor.power_grid_fronius_power_flow_0_http_fronius",
1703.74,
)
assert_state(
"sensor.power_load_fronius_power_flow_0_http_fronius",
-2814.74,
)
assert_state(
"sensor.power_photovoltaics_fronius_power_flow_0_http_fronius",
1111,
)
assert_state(
"sensor.relative_autonomy_fronius_power_flow_0_http_fronius",
39.4708,
)
assert_state(
"sensor.relative_self_consumption_fronius_power_flow_0_http_fronius",
100,
)
async def test_gen24(hass, aioclient_mock):
"""Test Fronius Gen24 inverter entities."""
def assert_state(entity_id, expected_state):
state = hass.states.get(entity_id)
assert state
assert state.state == str(expected_state)
mock_responses(aioclient_mock, fixture_set="gen24")
config_entry = await setup_fronius_integration(hass, is_logger=False)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 22
await enable_all_entities(
hass, config_entry.entry_id, FroniusMeterUpdateCoordinator.default_interval
)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 52
# inverter 1
assert_state("sensor.current_ac_fronius_inverter_1_http_fronius", 0.1589)
assert_state("sensor.current_dc_2_fronius_inverter_1_http_fronius", 0.0754)
assert_state("sensor.status_code_fronius_inverter_1_http_fronius", 7)
assert_state("sensor.current_dc_fronius_inverter_1_http_fronius", 0.0783)
assert_state("sensor.voltage_dc_2_fronius_inverter_1_http_fronius", 403.4312)
assert_state("sensor.power_ac_fronius_inverter_1_http_fronius", 37.3204)
assert_state("sensor.error_code_fronius_inverter_1_http_fronius", 0)
assert_state("sensor.voltage_dc_fronius_inverter_1_http_fronius", 411.3811)
assert_state("sensor.energy_total_fronius_inverter_1_http_fronius", 1530193.42)
assert_state("sensor.inverter_state_fronius_inverter_1_http_fronius", "Running")
assert_state("sensor.voltage_ac_fronius_inverter_1_http_fronius", 234.9168)
assert_state("sensor.frequency_ac_fronius_inverter_1_http_fronius", 49.9917)
# meter
assert_state("sensor.energy_real_produced_fronius_meter_0_http_fronius", 3863340.0)
assert_state("sensor.energy_real_consumed_fronius_meter_0_http_fronius", 2013105.0)
assert_state("sensor.power_real_fronius_meter_0_http_fronius", 653.1)
assert_state("sensor.frequency_phase_average_fronius_meter_0_http_fronius", 49.9)
assert_state("sensor.meter_location_fronius_meter_0_http_fronius", 0.0)
assert_state("sensor.power_factor_fronius_meter_0_http_fronius", 0.828)
assert_state(
"sensor.energy_reactive_ac_consumed_fronius_meter_0_http_fronius", 88221.0
)
assert_state("sensor.energy_real_ac_minus_fronius_meter_0_http_fronius", 3863340.0)
assert_state("sensor.current_ac_phase_2_fronius_meter_0_http_fronius", 2.33)
assert_state("sensor.voltage_ac_phase_1_fronius_meter_0_http_fronius", 235.9)
assert_state(
"sensor.voltage_ac_phase_to_phase_12_fronius_meter_0_http_fronius", 408.7
)
assert_state("sensor.power_real_phase_2_fronius_meter_0_http_fronius", 294.9)
assert_state("sensor.energy_real_ac_plus_fronius_meter_0_http_fronius", 2013105.0)
assert_state("sensor.voltage_ac_phase_2_fronius_meter_0_http_fronius", 236.1)
assert_state(
"sensor.energy_reactive_ac_produced_fronius_meter_0_http_fronius", 1989125.0
)
assert_state("sensor.voltage_ac_phase_3_fronius_meter_0_http_fronius", 236.9)
assert_state("sensor.power_factor_phase_1_fronius_meter_0_http_fronius", 0.441)
assert_state(
"sensor.voltage_ac_phase_to_phase_23_fronius_meter_0_http_fronius", 409.6
)
assert_state("sensor.current_ac_phase_3_fronius_meter_0_http_fronius", 1.825)
assert_state("sensor.power_factor_phase_3_fronius_meter_0_http_fronius", 0.832)
assert_state("sensor.power_apparent_phase_1_fronius_meter_0_http_fronius", 243.3)
assert_state(
"sensor.voltage_ac_phase_to_phase_31_fronius_meter_0_http_fronius", 409.4
)
assert_state("sensor.power_apparent_phase_2_fronius_meter_0_http_fronius", 323.4)
assert_state("sensor.power_apparent_phase_3_fronius_meter_0_http_fronius", 301.2)
assert_state("sensor.power_real_phase_1_fronius_meter_0_http_fronius", 106.8)
assert_state("sensor.power_factor_phase_2_fronius_meter_0_http_fronius", 0.934)
assert_state("sensor.power_real_phase_3_fronius_meter_0_http_fronius", 251.3)
assert_state("sensor.power_reactive_phase_1_fronius_meter_0_http_fronius", -218.6)
assert_state("sensor.power_reactive_phase_2_fronius_meter_0_http_fronius", -132.8)
assert_state("sensor.power_reactive_phase_3_fronius_meter_0_http_fronius", -166.0)
assert_state("sensor.power_apparent_fronius_meter_0_http_fronius", 868.0)
assert_state("sensor.power_reactive_fronius_meter_0_http_fronius", -517.4)
assert_state("sensor.current_ac_phase_1_fronius_meter_0_http_fronius", 1.145)
# power_flow
assert_state("sensor.power_grid_fronius_power_flow_0_http_fronius", 658.4)
assert_state(
"sensor.relative_self_consumption_fronius_power_flow_0_http_fronius", 100.0
)
assert_state(
"sensor.power_photovoltaics_fronius_power_flow_0_http_fronius", 62.9481
)
assert_state("sensor.power_load_fronius_power_flow_0_http_fronius", -695.6827)
assert_state("sensor.meter_mode_fronius_power_flow_0_http_fronius", "meter")
assert_state("sensor.relative_autonomy_fronius_power_flow_0_http_fronius", 5.3592)
assert_state("sensor.energy_total_fronius_power_flow_0_http_fronius", 1530193.42)
async def test_gen24_storage(hass, aioclient_mock):
"""Test Fronius Gen24 inverter with BYD battery and Ohmpilot entities."""
def assert_state(entity_id, expected_state):
state = hass.states.get(entity_id)
assert state
assert state.state == str(expected_state)
mock_responses(aioclient_mock, fixture_set="gen24_storage")
config_entry = await setup_fronius_integration(
hass, is_logger=False, unique_id="12345678"
)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 34
await enable_all_entities(
hass, config_entry.entry_id, FroniusMeterUpdateCoordinator.default_interval
)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 64
# inverter 1
assert_state("sensor.current_dc_fronius_inverter_1_http_fronius", 0.3952)
assert_state("sensor.voltage_dc_2_fronius_inverter_1_http_fronius", 318.8103)
assert_state("sensor.current_dc_2_fronius_inverter_1_http_fronius", 0.3564)
assert_state("sensor.current_ac_fronius_inverter_1_http_fronius", 1.1087)
assert_state("sensor.power_ac_fronius_inverter_1_http_fronius", 250.9093)
assert_state("sensor.error_code_fronius_inverter_1_http_fronius", 0)
assert_state("sensor.status_code_fronius_inverter_1_http_fronius", 7)
assert_state("sensor.energy_total_fronius_inverter_1_http_fronius", 7512794.0117)
assert_state("sensor.inverter_state_fronius_inverter_1_http_fronius", "Running")
assert_state("sensor.voltage_dc_fronius_inverter_1_http_fronius", 419.1009)
assert_state("sensor.voltage_ac_fronius_inverter_1_http_fronius", 227.354)
assert_state("sensor.frequency_ac_fronius_inverter_1_http_fronius", 49.9816)
# meter
assert_state("sensor.energy_real_produced_fronius_meter_0_http_fronius", 1705128.0)
assert_state("sensor.power_real_fronius_meter_0_http_fronius", 487.7)
assert_state("sensor.power_factor_fronius_meter_0_http_fronius", 0.698)
assert_state("sensor.energy_real_consumed_fronius_meter_0_http_fronius", 1247204.0)
assert_state("sensor.frequency_phase_average_fronius_meter_0_http_fronius", 49.9)
assert_state("sensor.meter_location_fronius_meter_0_http_fronius", 0.0)
assert_state("sensor.power_reactive_fronius_meter_0_http_fronius", -501.5)
assert_state(
"sensor.energy_reactive_ac_produced_fronius_meter_0_http_fronius", 3266105.0
)
assert_state("sensor.power_real_phase_3_fronius_meter_0_http_fronius", 19.6)
assert_state("sensor.current_ac_phase_3_fronius_meter_0_http_fronius", 0.645)
assert_state("sensor.energy_real_ac_minus_fronius_meter_0_http_fronius", 1705128.0)
assert_state("sensor.power_apparent_phase_2_fronius_meter_0_http_fronius", 383.9)
assert_state("sensor.current_ac_phase_1_fronius_meter_0_http_fronius", 1.701)
assert_state("sensor.current_ac_phase_2_fronius_meter_0_http_fronius", 1.832)
assert_state("sensor.power_apparent_phase_1_fronius_meter_0_http_fronius", 319.5)
assert_state("sensor.voltage_ac_phase_1_fronius_meter_0_http_fronius", 229.4)
assert_state("sensor.power_real_phase_2_fronius_meter_0_http_fronius", 150.0)
assert_state(
"sensor.voltage_ac_phase_to_phase_31_fronius_meter_0_http_fronius", 394.3
)
assert_state("sensor.voltage_ac_phase_2_fronius_meter_0_http_fronius", 225.6)
assert_state(
"sensor.energy_reactive_ac_consumed_fronius_meter_0_http_fronius", 5482.0
)
assert_state("sensor.energy_real_ac_plus_fronius_meter_0_http_fronius", 1247204.0)
assert_state("sensor.power_factor_phase_1_fronius_meter_0_http_fronius", 0.995)
assert_state("sensor.power_factor_phase_3_fronius_meter_0_http_fronius", 0.163)
assert_state("sensor.power_factor_phase_2_fronius_meter_0_http_fronius", 0.389)
assert_state("sensor.power_reactive_phase_1_fronius_meter_0_http_fronius", -31.3)
assert_state("sensor.power_reactive_phase_3_fronius_meter_0_http_fronius", -116.7)
assert_state(
"sensor.voltage_ac_phase_to_phase_12_fronius_meter_0_http_fronius", 396.0
)
assert_state(
"sensor.voltage_ac_phase_to_phase_23_fronius_meter_0_http_fronius", 393.0
)
assert_state("sensor.power_reactive_phase_2_fronius_meter_0_http_fronius", -353.4)
assert_state("sensor.power_real_phase_1_fronius_meter_0_http_fronius", 317.9)
assert_state("sensor.voltage_ac_phase_3_fronius_meter_0_http_fronius", 228.3)
assert_state("sensor.power_apparent_fronius_meter_0_http_fronius", 821.9)
assert_state("sensor.power_apparent_phase_3_fronius_meter_0_http_fronius", 118.4)
# ohmpilot
assert_state(
"sensor.energy_real_ac_consumed_fronius_ohmpilot_0_http_fronius", 1233295.0
)
assert_state("sensor.power_real_ac_fronius_ohmpilot_0_http_fronius", 0.0)
assert_state("sensor.temperature_channel_1_fronius_ohmpilot_0_http_fronius", 38.9)
assert_state("sensor.state_code_fronius_ohmpilot_0_http_fronius", 0.0)
assert_state(
"sensor.state_message_fronius_ohmpilot_0_http_fronius", "Up and running"
)
# power_flow
assert_state("sensor.power_grid_fronius_power_flow_0_http_fronius", 2274.9)
assert_state("sensor.power_battery_fronius_power_flow_0_http_fronius", 0.1591)
assert_state("sensor.power_load_fronius_power_flow_0_http_fronius", -2459.3092)
assert_state(
"sensor.relative_self_consumption_fronius_power_flow_0_http_fronius", 100.0
)
assert_state(
"sensor.power_photovoltaics_fronius_power_flow_0_http_fronius", 216.4328
)
assert_state("sensor.relative_autonomy_fronius_power_flow_0_http_fronius", 7.4984)
assert_state("sensor.meter_mode_fronius_power_flow_0_http_fronius", "bidirectional")
assert_state("sensor.energy_total_fronius_power_flow_0_http_fronius", 7512664.4042)
# storage
assert_state("sensor.current_dc_fronius_storage_0_http_fronius", 0.0)
assert_state("sensor.state_of_charge_fronius_storage_0_http_fronius", 4.6)
assert_state("sensor.capacity_maximum_fronius_storage_0_http_fronius", 16588)
assert_state("sensor.temperature_cell_fronius_storage_0_http_fronius", 21.5)
assert_state("sensor.capacity_designed_fronius_storage_0_http_fronius", 16588)
assert_state("sensor.voltage_dc_fronius_storage_0_http_fronius", 0.0)
# Devices
device_registry = dr.async_get(hass)
solar_net = device_registry.async_get_device(
identifiers={(DOMAIN, "solar_net_12345678")}
)
assert solar_net.configuration_url == "http://fronius"
assert solar_net.manufacturer == "Fronius"
assert solar_net.name == "SolarNet"
inverter_1 = device_registry.async_get_device(identifiers={(DOMAIN, "12345678")})
assert inverter_1.manufacturer == "Fronius"
assert inverter_1.model == "Gen24"
assert inverter_1.name == "Gen24 Storage"
meter = device_registry.async_get_device(identifiers={(DOMAIN, "1234567890")})
assert meter.manufacturer == "Fronius"
assert meter.model == "Smart Meter TS 65A-3"
assert meter.name == "Smart Meter TS 65A-3"
ohmpilot = device_registry.async_get_device(identifiers={(DOMAIN, "23456789")})
assert ohmpilot.manufacturer == "Fronius"
assert ohmpilot.model == "Ohmpilot 6"
assert ohmpilot.name == "Ohmpilot"
assert ohmpilot.sw_version == "1.0.25-3"
storage = device_registry.async_get_device(
identifiers={(DOMAIN, "P030T020Z2001234567 ")}
)
assert storage.manufacturer == "BYD"
assert storage.model == "BYD Battery-Box Premium HV"
assert storage.name == "BYD Battery-Box Premium HV"
async def test_primo_s0(hass, aioclient_mock):
"""Test Fronius Primo dual inverter with S0 meter entities."""
def assert_state(entity_id, expected_state):
state = hass.states.get(entity_id)
assert state
assert state.state == str(expected_state)
mock_responses(aioclient_mock, fixture_set="primo_s0", inverter_ids=[1, 2])
config_entry = await setup_fronius_integration(hass, is_logger=True)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 29
await enable_all_entities(
hass, config_entry.entry_id, FroniusMeterUpdateCoordinator.default_interval
)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 40
# logger
assert_state("sensor.cash_factor_fronius_logger_info_0_http_fronius", 1)
assert_state("sensor.co2_factor_fronius_logger_info_0_http_fronius", 0.53)
assert_state("sensor.delivery_factor_fronius_logger_info_0_http_fronius", 1)
# inverter 1
assert_state("sensor.energy_total_fronius_inverter_1_http_fronius", 17114940)
assert_state("sensor.energy_day_fronius_inverter_1_http_fronius", 22504)
assert_state("sensor.voltage_dc_fronius_inverter_1_http_fronius", 452.3)
assert_state("sensor.power_ac_fronius_inverter_1_http_fronius", 862)
assert_state("sensor.error_code_fronius_inverter_1_http_fronius", 0)
assert_state("sensor.current_dc_fronius_inverter_1_http_fronius", 4.23)
assert_state("sensor.status_code_fronius_inverter_1_http_fronius", 7)
assert_state("sensor.energy_year_fronius_inverter_1_http_fronius", 7532755.5)
assert_state("sensor.current_ac_fronius_inverter_1_http_fronius", 3.85)
assert_state("sensor.voltage_ac_fronius_inverter_1_http_fronius", 223.9)
assert_state("sensor.frequency_ac_fronius_inverter_1_http_fronius", 60)
assert_state("sensor.led_color_fronius_inverter_1_http_fronius", 2)
assert_state("sensor.led_state_fronius_inverter_1_http_fronius", 0)
# inverter 2
assert_state("sensor.energy_total_fronius_inverter_2_http_fronius", 5796010)
assert_state("sensor.energy_day_fronius_inverter_2_http_fronius", 14237)
assert_state("sensor.voltage_dc_fronius_inverter_2_http_fronius", 329.5)
assert_state("sensor.power_ac_fronius_inverter_2_http_fronius", 296)
assert_state("sensor.error_code_fronius_inverter_2_http_fronius", 0)
assert_state("sensor.current_dc_fronius_inverter_2_http_fronius", 0.97)
assert_state("sensor.status_code_fronius_inverter_2_http_fronius", 7)
assert_state("sensor.energy_year_fronius_inverter_2_http_fronius", 3596193.25)
assert_state("sensor.current_ac_fronius_inverter_2_http_fronius", 1.32)
assert_state("sensor.voltage_ac_fronius_inverter_2_http_fronius", 223.6)
assert_state("sensor.frequency_ac_fronius_inverter_2_http_fronius", 60.01)
assert_state("sensor.led_color_fronius_inverter_2_http_fronius", 2)
assert_state("sensor.led_state_fronius_inverter_2_http_fronius", 0)
# meter
assert_state("sensor.meter_location_fronius_meter_0_http_fronius", 1)
assert_state("sensor.power_real_fronius_meter_0_http_fronius", -2216.7487)
# power_flow
assert_state("sensor.power_load_fronius_power_flow_0_http_fronius", -2218.9349)
assert_state("sensor.meter_mode_fronius_power_flow_0_http_fronius", "vague-meter")
assert_state("sensor.power_photovoltaics_fronius_power_flow_0_http_fronius", 1834)
assert_state("sensor.power_grid_fronius_power_flow_0_http_fronius", 384.9349)
assert_state(
"sensor.relative_self_consumption_fronius_power_flow_0_http_fronius", 100
)
assert_state("sensor.relative_autonomy_fronius_power_flow_0_http_fronius", 82.6523)
assert_state("sensor.energy_total_fronius_power_flow_0_http_fronius", 22910919.5)
assert_state("sensor.energy_day_fronius_power_flow_0_http_fronius", 36724)
assert_state("sensor.energy_year_fronius_power_flow_0_http_fronius", 11128933.25)
# Devices
device_registry = dr.async_get(hass)
solar_net = device_registry.async_get_device(
identifiers={(DOMAIN, "solar_net_123.4567890")}
)
assert solar_net.configuration_url == "http://fronius"
assert solar_net.manufacturer == "Fronius"
assert solar_net.model == "fronius-datamanager-card"
assert solar_net.name == "SolarNet"
assert solar_net.sw_version == "3.18.7-1"
inverter_1 = device_registry.async_get_device(identifiers={(DOMAIN, "123456")})
assert inverter_1.manufacturer == "Fronius"
assert inverter_1.model == "Primo 5.0-1"
assert inverter_1.name == "Primo 5.0-1"
inverter_2 = device_registry.async_get_device(identifiers={(DOMAIN, "234567")})
assert inverter_2.manufacturer == "Fronius"
assert inverter_2.model == "Primo 3.0-1"
assert inverter_2.name == "Primo 3.0-1"
meter = device_registry.async_get_device(
identifiers={(DOMAIN, "solar_net_123.4567890:S0 Meter at inverter 1")}
)
assert meter.manufacturer == "Fronius"
assert meter.model == "S0 Meter at inverter 1"
assert meter.name == "S0 Meter at inverter 1"
|
|
"""
Test functions for models.formula
"""
import string
import numpy as np
import numpy.random as R
import numpy.linalg as L
from numpy.testing import *
from statsmodels.sandbox import formula #, contrast #, utils
from statsmodels.sandbox import contrast_old as contrast
class TestTerm(TestCase):
def test_init(self):
t1 = formula.Term("trivial")
sqr = lambda x: x*x
t2 = formula.Term("not_so_trivial", sqr, "sqr")
self.assertRaises(ValueError, formula.Term, "name", termname=0)
def test_str(self):
t = formula.Term("name")
s = str(t)
def test_add(self):
t1 = formula.Term("t1")
t2 = formula.Term("t2")
f = t1 + t2
self.assert_(isinstance(f, formula.Formula))
self.assert_(f.hasterm(t1))
self.assert_(f.hasterm(t2))
def test_mul(self):
t1 = formula.Term("t1")
t2 = formula.Term("t2")
f = t1 * t2
self.assert_(isinstance(f, formula.Formula))
intercept = formula.Term("intercept")
f = t1 * intercept
self.assertEqual(str(f), str(formula.Formula(t1)))
f = intercept * t1
self.assertEqual(str(f), str(formula.Formula(t1)))
class TestFormula(TestCase):
def setUp(self):
self.X = R.standard_normal((40,10))
self.namespace = {}
self.terms = []
for i in range(10):
name = '%s' % string.ascii_uppercase[i]
self.namespace[name] = self.X[:,i]
self.terms.append(formula.Term(name))
self.formula = self.terms[0]
for i in range(1, 10):
self.formula += self.terms[i]
self.formula.namespace = self.namespace
def test_namespace(self):
space1 = {'X':np.arange(50), 'Y':np.arange(50)*2}
space2 = {'X':np.arange(20), 'Y':np.arange(20)*2}
space3 = {'X':np.arange(30), 'Y':np.arange(30)*2}
X = formula.Term('X')
Y = formula.Term('Y')
X.namespace = space1
assert_almost_equal(X(), np.arange(50))
Y.namespace = space2
assert_almost_equal(Y(), np.arange(20)*2)
f = X + Y
f.namespace = space1
self.assertEqual(f().shape, (2,50))
assert_almost_equal(Y(), np.arange(20)*2)
assert_almost_equal(X(), np.arange(50))
f.namespace = space2
self.assertEqual(f().shape, (2,20))
assert_almost_equal(Y(), np.arange(20)*2)
assert_almost_equal(X(), np.arange(50))
f.namespace = space3
self.assertEqual(f().shape, (2,30))
assert_almost_equal(Y(), np.arange(20)*2)
assert_almost_equal(X(), np.arange(50))
xx = X**2
self.assertEqual(xx().shape, (50,))
xx.namespace = space3
self.assertEqual(xx().shape, (30,))
xx = X * formula.I
self.assertEqual(xx().shape, (50,))
xx.namespace = space3
self.assertEqual(xx().shape, (30,))
xx = X * X
self.assertEqual(xx.namespace, X.namespace)
xx = X + Y
self.assertEqual(xx.namespace, {})
Y.namespace = {'X':np.arange(50), 'Y':np.arange(50)*2}
xx = X + Y
self.assertEqual(xx.namespace, {})
Y.namespace = X.namespace
xx = X+Y
self.assertEqual(xx.namespace, Y.namespace)
def test_termcolumns(self):
t1 = formula.Term("A")
t2 = formula.Term("B")
f = t1 + t2 + t1 * t2
def other(val):
return np.array([3.2*val,4.342*val**2, 5.234*val**3])
q = formula.Quantitative(['other%d' % i for i in range(1,4)], termname='other', func=t1, transform=other)
f += q
q.namespace = f.namespace = self.formula.namespace
a = q()
b = f()
c = f.termcolumns(q)
b = b[c]
assert_almost_equal(a,b)
def test_str(self):
s = str(self.formula)
def test_call(self):
x = self.formula()
self.assertEquals(np.array(x).shape, (10, 40))
def test_design(self):
x = self.formula.design()
self.assertEquals(x.shape, (40, 10))
def test_product(self):
prod = self.formula['A'] * self.formula['C']
f = self.formula + prod
f.namespace = self.namespace
x = f.design()
p = f['A*C']
p.namespace = self.namespace
col = f.termcolumns(prod, dict=False)
assert_almost_equal(np.squeeze(x[:,col]), self.X[:,0] * self.X[:,2])
assert_almost_equal(np.squeeze(p()), self.X[:,0] * self.X[:,2])
def test_intercept1(self):
prod = self.terms[0] * self.terms[2]
f = self.formula + formula.I
icol = f.names().index('intercept')
f.namespace = self.namespace
assert_almost_equal(f()[icol], np.ones((40,)))
def test_intercept3(self):
t = self.formula['A']
t.namespace = self.namespace
prod = t * formula.I
prod.namespace = self.formula.namespace
assert_almost_equal(np.squeeze(prod()), t())
def test_contrast1(self):
term = self.terms[0] + self.terms[2]
c = contrast.Contrast(term, self.formula)
col1 = self.formula.termcolumns(self.terms[0], dict=False)
col2 = self.formula.termcolumns(self.terms[1], dict=False)
test = [[1] + [0]*9, [0]*2 + [1] + [0]*7]
assert_almost_equal(c.matrix, test)
def test_contrast2(self):
dummy = formula.Term('zero')
self.namespace['zero'] = np.zeros((40,), np.float64)
term = dummy + self.terms[2]
c = contrast.Contrast(term, self.formula)
test = [0]*2 + [1] + [0]*7
assert_almost_equal(c.matrix, test)
def test_contrast3(self):
X = self.formula.design()
P = np.dot(X, L.pinv(X))
dummy = formula.Term('noise')
resid = np.identity(40) - P
self.namespace['noise'] = np.transpose(np.dot(resid, R.standard_normal((40,5))))
terms = dummy + self.terms[2]
terms.namespace = self.formula.namespace
c = contrast.Contrast(terms, self.formula)
self.assertEquals(c.matrix.shape, (10,))
def test_power(self):
t = self.terms[2]
t2 = t**2
t.namespace = t2.namespace = self.formula.namespace
assert_almost_equal(t()**2, t2())
def test_quantitative(self):
t = self.terms[2]
sint = formula.Quantitative('t', func=t, transform=np.sin)
t.namespace = sint.namespace = self.formula.namespace
assert_almost_equal(np.sin(t()), sint())
def test_factor1(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
self.assertEquals(list(fac.values()), f)
def test_factor2(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
self.assertEquals(fac().shape, (3,30))
def test_factor3(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
m = fac.main_effect(reference=1)
m.namespace = fac.namespace
self.assertEquals(m().shape, (2,30))
def test_factor4(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
m = fac.main_effect(reference=2)
m.namespace = fac.namespace
r = np.array([np.identity(3)]*10)
r.shape = (30,3)
r = r.T
_m = np.array([r[0]-r[2],r[1]-r[2]])
assert_almost_equal(_m, m())
def test_factor5(self):
f = ['a','b','c']*3
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
assert_equal(fac(), [[1,0,0]*3,
[0,1,0]*3,
[0,0,1]*3])
assert_equal(fac['a'], [1,0,0]*3)
assert_equal(fac['b'], [0,1,0]*3)
assert_equal(fac['c'], [0,0,1]*3)
def test_ordinal_factor(self):
f = ['a','b','c']*3
fac = formula.Factor('ff', ['a','b','c'], ordinal=True)
fac.namespace = {'ff':f}
assert_equal(fac(), [0,1,2]*3)
assert_equal(fac['a'], [1,0,0]*3)
assert_equal(fac['b'], [0,1,0]*3)
assert_equal(fac['c'], [0,0,1]*3)
def test_ordinal_factor2(self):
f = ['b','c', 'a']*3
fac = formula.Factor('ff', ['a','b','c'], ordinal=True)
fac.namespace = {'ff':f}
assert_equal(fac(), [1,2,0]*3)
assert_equal(fac['a'], [0,0,1]*3)
assert_equal(fac['b'], [1,0,0]*3)
assert_equal(fac['c'], [0,1,0]*3)
def test_contrast4(self):
f = self.formula + self.terms[5] + self.terms[5]
f.namespace = self.namespace
estimable = False
c = contrast.Contrast(self.terms[5], f)
self.assertEquals(estimable, False)
def test_interactions(self):
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c']])
assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'a*b', 'a*c', 'b*c']))
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=3)
assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'd', 'a*b', 'a*c', 'a*d', 'b*c', 'b*d', 'c*d', 'a*b*c', 'a*c*d', 'a*b*d', 'b*c*d']))
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=[1,2,3])
assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'd', 'a*b', 'a*c', 'a*d', 'b*c', 'b*d', 'c*d', 'a*b*c', 'a*c*d', 'a*b*d', 'b*c*d']))
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=[3])
assert_equal(set(f.termnames()), set(['a*b*c', 'a*c*d', 'a*b*d', 'b*c*d']))
def test_subtract(self):
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c']])
ff = f - f['a*b']
assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'a*c', 'b*c']))
ff = f - f['a*b'] - f['a*c']
assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'b*c']))
ff = f - (f['a*b'] + f['a*c'])
assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'b*c']))
|
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from __future__ import print_function
import time
from threading import Lock
from numpy import array, hstack
from traits.api import Float, Int, Str, Bool, Property, Array
from pychron.hardware.core.checksum_helper import computeBCC
from pychron.hardware.core.core_device import CoreDevice
from pychron.hardware.core.data_helper import make_bitarray
from six.moves import map
from six.moves import range
STX = chr(2)
ETX = chr(3)
EOT = chr(4)
ENQ = chr(5)
DLE = chr(16)
ANSWER_ADDR = '0002'
STATUS = ['Powering On', 'Laser Off', 'Turning On', 'Laser On']
ACTION = ['Turn Off', 'Turn on', 'Single Shot', 'Run', 'Firing']
class ATLLaserControlUnit(CoreDevice):
"""
"""
energy_readback = Float
pressure_readback = Float
burst_readback = Int
status_readback = Str
action_readback = Str
firing = Bool
burst_shot = Property(Int(enter_set=True, auto_set=False), depends_on='_burst_shot')
_burst_shot = Int
reprate = Property(Int(enter_set=True, auto_set=False), depends_on='_reprate')
_reprate = Int
_was_fired = False
energies=Array
stablization_mode = None
# _timer = None
# _enabled = Bool(False)
# triggered = Bool(False)
#
# energy = Float(0)
# energymin = Constant(0.0)
# energymax = Constant(15.0)
# update_energy = Float
#
# hv = Float(11)
# hvmin = Constant(11.0)
# hvmax = Constant(16.0)
# update_hv = Float(11)
#
# reprate = Float(100)
# repratemin = Constant(100.0)
# repratemax = Constant(300.0)
# update_reprate = Float(100)
#
# trigger_modes = ['External I',
# 'External II',
# 'Internal'
# ]
# trigger_mode = Str('External I')
# stablization_modes = ['High Voltage', 'Energy']
# stablization_mode = Str('High Voltage')
#
# stop_at_low_e = Bool
#
# cathode = Float(0.0)
# reservoir = Float(0.0)
# missing_pulses = Int(0)
# halogen_filter = Float(0.0)
#
# laser_head = Float(0.0)
# laser_headmin = Constant(0.0)
# laser_headmax = Constant(7900.0)
#
# burst = Bool
# nburst = Int(enter_set=True, auto_set=False)
# cburst = Int
# def start_update_timer(self):
# '''
# '''
# self.stop_update_timer()
# self._timer = Timer(1000, self._update_parameters)
# self._timer.Start()
#
# def stop_update_timer(self):
# if self._timer:
# self._timer.Stop()
# def trigger_laser(self):
# '''
# '''
# self.start_update_timer()
#
# self.triggered = True
#
# def stop_triggering_laser(self):
# '''
# '''
# self.triggered = False
def __init__(self, *args, **kw):
super(ATLLaserControlUnit, self).__init__(*args, **kw)
self._lock = Lock()
def initialize(self, *args, **kw):
r = super(ATLLaserControlUnit, self).initialize(self, *args, **kw)
self.communicator.write_terminator = None
self._burst_shot = self.get_nburst()
#reading reprate not working correctly. check for a new ATL API
self._reprate = self.get_reprate()
v = 55
self.set_stabilization('energy', v)
return r
def set_stabilization(self, mode, v):
"""
0 0 0 0 0 0 0 0 0 0 0
b10 b9 b8 b7 b6 b5 b4 b3 b2 b1 b0
-------- gastype
------- stab mode 000 HV, 001 Energy
-- burst mode
-------- trigger mode
-- disable on low energy
"""
MODES = {'energy': '001', 'hv': '000',
'energy_w_pge': '011'}
mode = MODES.get(mode, '000')
with self._lock:
p = '000 {} 1 000 0'.format(mode).replace(' ', '')
cmd = self._build_command(1000, int(p, 2))
self._send_command(cmd, lock=False)
#v=make_bitarray(v, width=16)
cmd = self._build_command(1003, v)
self._send_command(cmd, lock=False)
def get_mean_energy(self):
return self.energies.mean()
def is_enabled(self):
return self.status_readback == 'Laser On'
def set_reprate(self, n, save=True):
lh = self._make_integer_pair(n)
if lh:
with self._lock:
cmd = self._build_command(1001, lh)
self._send_command(cmd, lock=False)
if save:
self._save_eeprom()
self._reprate = int(n)
def _make_integer_pair(self, n):
try:
n = int(n)
except (ValueError, TypeError):
return
v = make_bitarray(n, width=32)
h, l = int(v[:16], 2), int(v[16:], 2)
return l, h
def set_nburst(self, n, save=True):
if int(n) != int(self._burst_shot):
self.debug('setting nburst n={} current_value={}'.format(n, self._burst_shot))
lh = self._make_integer_pair(n)
if lh:
with self._lock:
cmd = self._build_command(22, lh)
self._send_command(cmd, lock=False)
cmd = self._build_command(1004, lh)
self._send_command(cmd, lock=False)
self._burst_shot = int(n)
if save:
self._save_eeprom()
def _save_eeprom(self, lock=False):
cmd = self._build_command(37, 1)
self._send_command(cmd, lock=lock)
def get_reprate(self, verbose=True):
self.debug('get reprate')
resp = self._send_query(1001, 1, verbose=verbose)
v = -1
if resp is not None:# and len(resp) == 4:
print(resp, len(resp))
v = int(resp, 16)
# high = resp[4:]
# low = resp[:4]
# high = make_bitarray(int(high, 16), width=16)
# low = make_bitarray(int(low, 16), width=16)
# v = int(high + low, 2)
return v
def get_nburst(self, verbose=True):
if verbose:
self.debug('get nburst')
v = 0
resp = self._send_query(22, 2, verbose=verbose)
if resp is not None and len(resp) == 8:
high = resp[4:]
low = resp[:4]
high = make_bitarray(int(high, 16), width=16)
low = make_bitarray(int(low, 16), width=16)
v = int(high + low, 2)
return v
def is_burst_mode(self, ps=None):
bit = 4
if ps is None:
ps = self.get_process_status()
return int(ps[16 - (bit + 1)])
def get_process_status(self):
#ps = '0000000000000000'
r = self._send_query(1000, 1)
self.debug('get process status {}'.format(r))
if r is not None:
r = int(r, 16)
ps = make_bitarray(r, width=16)
return ps
def set_burst_mode(self, mode, ps=None):
if not self.is_burst_mode(ps):
if ps is None:
ps = self.get_process_status()
nps = ps[:16 - 4] + str(int(mode)) + ps[-4:]
print(mode, nps)
cmd = self._build_command(1000, int(nps, 2))
self._send_command(cmd)
def laser_on(self):
# self.start_update_timer()
cmd = self._build_command(11, 1)
self._send_command(cmd)
def laser_off(self):
cmd = self._build_command(11, 0)
self._send_command(cmd)
self._enabled = False
def laser_single_shot(self):
cmd = self._build_command(11, 2)
self._send_command(cmd)
def laser_run(self):
self.debug('run laser')
self.firing = True
self.energies = array([])
cmd = self._build_command(11, 3)
self._send_command(cmd)
def laser_stop(self):
self.debug('stop laser')
cmd = self._build_command(11, 1)
self._send_command(cmd)
self.firing = False
def get_laser_status(self, verbose=True):
r = self._send_query(11, 1, verbose=verbose)
return self._parse_response(r, 1)[0]
# ===============================================================================
# gas handling
# ===============================================================================
def do_auto_vac(self):
# self.start_auto_vac()
# wait until idle
self.wait_for_idle()
# self.wait_for_gwr()
def do_auto_gas_exchange(self):
# self.start_auto_gas_exchange()
self.wait_for_idle()
# self.wait_for_gwr()
def wait_for_idle(self):
while 1:
time.sleep(0.75)
if self.is_idle():
break
def wait_for_gwr(self):
while 1:
time.sleep(0.75)
if self.waiting_for_gas_request():
break
def start_auto_vac(self):
cmd = self._build_command(14, 11)
self._send_command(cmd)
def start_auto_gas_exchange(self):
cmd = self._build_command(14, 11)
self._send_command(cmd)
def set_to_idle(self):
cmd = self._build_command(14, 11)
self._send_command(cmd)
def waiting_for_gas_request(self, verbose=False):
rq = self.get_gas_wait_request(verbose=verbose)
print(rq)
if rq is not None:
return rq[0] == 1
def is_idle(self):
status = self.get_gas_status()
if status is not None:
istatus = int(status, 16)
return istatus == 0
def get_pressure(self, verbose=False):
vs = self._send_query(9, 1, verbose=verbose)
if vs is not None:
vs = self._parse_response(vs, 1)
if vs is not None:
self.pressure_readback = vs[0] / 1000.
def get_gas_status(self):
r = self._send_query(13, 1)
return r
def get_gas_wait_request(self, verbose=True):
r = self._send_query(27, 1, verbose=verbose)
return self._parse_response(r, 1)
def open_valve(self, addr):
self.info('open valve {}'.format(addr))
def close_valve(self, addr):
self.info('close valve {}'.format(addr))
def update_parameters(self):
# energy, pressure, status, action
vs = self._send_query(8, 4, verbose=False)
if vs is not None:
vs = self._parse_response(vs, 4)
if vs is not None:
self.energy_readback = vs[0] / 10.
self.energies=hstack((self.energies[:-5], [self.energy_readback]))
self.pressure_readback = vs[1]
self.status_readback = STATUS[vs[2]]
self.action_readback = ACTION[vs[3]]
b = self.get_nburst(verbose=False)
if b is not None:
self.burst_readback = b
if self.firing:
self.debug('readback={} burst={} fired={}'.format(b, self.burst_shot, self._was_fired))
if not b or (self._was_fired and b == self.burst_shot):
self.debug('AUTO STOP LASER')
self.laser_stop()
self._was_fired = False
self._was_fired = b != self.burst_shot
def _set_answer_parameters(self, start_addr_value, answer_len,
verbose=True, ):
values = [start_addr_value, answer_len]
cmd = self._build_command(ANSWER_ADDR, values)
self._send_command(cmd, verbose=verbose)
def _build_command(self, start_addr, values):
if isinstance(start_addr, int):
start_addr = '{:04X}'.format(start_addr)
if isinstance(values, int):
values = (values,)
# values = ('{:04X}'.format(values),)
values = list(map('{:04X}'.format, values))
cmd = start_addr + ''.join(values)
cmd += ETX
BCC = computeBCC(cmd)
cmd = STX + cmd + chr(BCC)
return cmd
def _send_query(self, s, l, verbose=True):
self._set_answer_parameters(s, l, verbose=verbose)
with self._lock:
cmd = self._build_command(ANSWER_ADDR, (s, l))
self._send_command(cmd, verbose=verbose, lock=False)
# =self.ask('A'+ENQ, nchars=(l+1)*4+6)
# self._start_message()
n = (l + 1) * 4 + 6
cmd = 'a' + ENQ
r = self.ask(cmd, nchars=n, verbose=verbose)
# r = self.read(nchars=n)
self.tell(DLE + '1', verbose=verbose)
self._end_message(verbose=verbose)
return self._clean_response(r)
def _send_command(self, cmd, verbose=True, lock=True):
if lock:
self._lock.acquire()
self._start_message(verbose=verbose)
self.ask(cmd, read_terminator=DLE + '1', verbose=verbose)
self._end_message(verbose=verbose)
if lock:
self._lock.release()
def _start_message(self, verbose=True):
cmd = 'A' + ENQ
self.ask(cmd, read_terminator=DLE + '0', verbose=verbose)
def _end_message(self, verbose=True):
cmd = EOT
self.tell(cmd, verbose=verbose)
def _clean_response(self, r):
# print len(r)
handshake = r[:4]
#print handshake,handshake=='a'+DLE+'0'+STX
if handshake == 'a' + DLE + '0' + STX:
chksum = computeBCC(r[4:-1])
#print 'a={} b={} c={} d={}'.format(chksum, ord(r[-1]), chr(chksum),chr(chksum) == r[-1])
if chr(chksum) == r[-1]:
return r[8:-2]
def _parse_response(self, resp, l):
# print resp, l, len(resp),l*4
if resp is not None and len(resp) == l * 4:
return [int(resp[i:i + 4], 16) for i in range(0, len(resp) - 3, 4)]
def _get_burst_shot(self):
return self._burst_shot
def _set_burst_shot(self, v):
self.set_nburst(v)
def _get_reprate(self):
return self._reprate
def _set_reprate(self, v):
self.set_reprate(v)
# def _parse_parameter_answers(self, resp, rstartaddr, answer_len):
# '''
# '''
# #split at stx
# rargs = resp.split(STX)
# r, chk = rargs[1].split(ETX)
#
# #verify checksum
# bcc = computeBCC(r + ETX)
# if int(bcc, 16) != int(chk, 16):
# return
#
# #r example
# #0005006500000000
# #startaddr, startaddrvalue, ... ,nstartaddr_value
#
# #remove startaddr and make sure its the one we requested
# startaddr = int(r[:4], 16)
# if rstartaddr != startaddr:
# return
#
# #trim off start addr
# r = r[4:]
# #ensure len of answers correct
# if answer_len != len(r) / 4:
# return
#
# args = ()
# for i in range(0, len(r), 4):
# val = r[i:i + 4]
# args += (val,)
#
# return args
# def _update_parameter_list(self, names, s, l):
# '''
#
# '''
# resp = self._send_query(s, l)
# if resp is not None:
# args = self._parse_parameter_answers(resp, s, l)
# # kw = dict()
# for n, a in zip(names, args):
# v = int(a, 16)
# if isinstance(n, tuple):
# v = n[1](v)
# n = n[0]
# self.trait_set(n=v)
# kw[n] = v
# self.trait_set(**kw)
if __name__ == '__main__':
from pychron.core.helpers.logger_setup import logging_setup
logging_setup('atl')
a = ATLLaserControlUnit(name='ATLLaserControlUnit',
configuration_dir_name='fusions_uv')
a.bootstrap()
a.laser_off()
# ============= EOF ====================================
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import random
import shutil
import sys
import tempfile
import time
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
from smoketests import base
from smoketests import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('bundle_kernel', 'random.kernel',
'Local kernel file to use for bundling tests')
flags.DEFINE_string('bundle_image', 'random.image',
'Local image file to use for bundling tests')
TEST_PREFIX = 'test%s' % int(random.random() * 1000000)
TEST_BUCKET = '%s_bucket' % TEST_PREFIX
TEST_KEY = '%s_key' % TEST_PREFIX
TEST_GROUP = '%s_group' % TEST_PREFIX
class ImageTests(base.UserSmokeTestCase):
def test_001_can_bundle_image(self):
self.data['tempdir'] = tempfile.mkdtemp()
self.assertTrue(self.bundle_image(FLAGS.bundle_image,
self.data['tempdir']))
def test_002_can_upload_image(self):
try:
self.assertTrue(self.upload_image(TEST_BUCKET,
FLAGS.bundle_image,
self.data['tempdir']))
finally:
if os.path.exists(self.data['tempdir']):
shutil.rmtree(self.data['tempdir'])
def test_003_can_register_image(self):
image_id = self.conn.register_image('%s/%s.manifest.xml' %
(TEST_BUCKET, FLAGS.bundle_image))
self.assert_(image_id is not None)
self.data['image_id'] = image_id
def test_004_can_bundle_kernel(self):
self.assertTrue(self.bundle_image(FLAGS.bundle_kernel, kernel=True))
def test_005_can_upload_kernel(self):
self.assertTrue(self.upload_image(TEST_BUCKET, FLAGS.bundle_kernel))
def test_006_can_register_kernel(self):
kernel_id = self.conn.register_image('%s/%s.manifest.xml' %
(TEST_BUCKET, FLAGS.bundle_kernel))
self.assert_(kernel_id is not None)
self.data['kernel_id'] = kernel_id
def test_007_images_are_available_within_10_seconds(self):
for i in xrange(10):
image = self.conn.get_image(self.data['image_id'])
if image and image.state == 'available':
break
time.sleep(1)
else:
self.assert_(False) # wasn't available within 10 seconds
self.assert_(image.type == 'machine')
for i in xrange(10):
kernel = self.conn.get_image(self.data['kernel_id'])
if kernel and kernel.state == 'available':
break
time.sleep(1)
else:
self.assert_(False) # wasn't available within 10 seconds
self.assert_(kernel.type == 'kernel')
def test_008_can_describe_image_attribute(self):
attrs = self.conn.get_image_attribute(self.data['image_id'],
'launchPermission')
self.assert_(attrs.name, 'launch_permission')
def test_009_can_add_image_launch_permission(self):
image = self.conn.get_image(self.data['image_id'])
self.assertEqual(image.id, self.data['image_id'])
self.assertEqual(image.is_public, False)
self.conn.modify_image_attribute(image_id=self.data['image_id'],
operation='add',
attribute='launchPermission',
groups='all')
image = self.conn.get_image(self.data['image_id'])
self.assertEqual(image.id, self.data['image_id'])
self.assertEqual(image.is_public, True)
def test_010_can_see_launch_permission(self):
attrs = self.conn.get_image_attribute(self.data['image_id'],
'launchPermission')
self.assertEqual(attrs.name, 'launch_permission')
self.assertEqual(attrs.attrs['groups'][0], 'all')
def test_011_can_remove_image_launch_permission(self):
image = self.conn.get_image(self.data['image_id'])
self.assertEqual(image.id, self.data['image_id'])
self.assertEqual(image.is_public, True)
self.conn.modify_image_attribute(image_id=self.data['image_id'],
operation='remove',
attribute='launchPermission',
groups='all')
image = self.conn.get_image(self.data['image_id'])
self.assertEqual(image.id, self.data['image_id'])
self.assertEqual(image.is_public, False)
def test_012_private_image_shows_in_list(self):
images = self.conn.get_all_images()
image_ids = [image.id for image in images]
self.assertTrue(self.data['image_id'] in image_ids)
def test_013_user_can_deregister_kernel(self):
self.assertTrue(self.conn.deregister_image(self.data['kernel_id']))
def test_014_can_deregister_image(self):
self.assertTrue(self.conn.deregister_image(self.data['image_id']))
def test_015_can_delete_bundle(self):
self.assertTrue(self.delete_bundle_bucket(TEST_BUCKET))
class InstanceTests(base.UserSmokeTestCase):
def test_001_can_create_keypair(self):
key = self.create_key_pair(self.conn, TEST_KEY)
self.assertEqual(key.name, TEST_KEY)
def test_002_can_create_instance_with_keypair(self):
reservation = self.conn.run_instances(FLAGS.test_image,
key_name=TEST_KEY,
instance_type='m1.tiny')
self.assertEqual(len(reservation.instances), 1)
self.data['instance'] = reservation.instances[0]
def test_003_instance_runs_within_60_seconds(self):
instance = self.data['instance']
# allow 60 seconds to exit pending with IP
if not self.wait_for_running(self.data['instance']):
self.fail('instance failed to start')
self.data['instance'].update()
ip = self.data['instance'].private_ip_address
self.failIf(ip == '0.0.0.0')
if FLAGS.use_ipv6:
ipv6 = self.data['instance'].dns_name_v6
self.failIf(ipv6 is None)
def test_004_can_ping_private_ip(self):
if not self.wait_for_ping(self.data['instance'].private_ip_address):
self.fail('could not ping instance')
if FLAGS.use_ipv6:
if not self.wait_for_ping(self.data['instance'].dns_name_v6,
"ping6"):
self.fail('could not ping instance v6')
def test_005_can_ssh_to_private_ip(self):
if not self.wait_for_ssh(self.data['instance'].private_ip_address,
TEST_KEY):
self.fail('could not ssh to instance')
if FLAGS.use_ipv6:
if not self.wait_for_ssh(self.data['instance'].dns_name_v6,
TEST_KEY):
self.fail('could not ssh to instance v6')
def test_999_tearDown(self):
self.delete_key_pair(self.conn, TEST_KEY)
self.conn.terminate_instances([self.data['instance'].id])
class VolumeTests(base.UserSmokeTestCase):
def setUp(self):
super(VolumeTests, self).setUp()
self.device = '/dev/vdb'
def test_000_setUp(self):
self.create_key_pair(self.conn, TEST_KEY)
reservation = self.conn.run_instances(FLAGS.test_image,
instance_type='m1.tiny',
key_name=TEST_KEY)
self.data['instance'] = reservation.instances[0]
if not self.wait_for_running(self.data['instance']):
self.fail('instance failed to start')
self.data['instance'].update()
if not self.wait_for_ping(self.data['instance'].private_ip_address):
self.fail('could not ping instance')
if not self.wait_for_ssh(self.data['instance'].private_ip_address,
TEST_KEY):
self.fail('could not ssh to instance')
def test_001_can_create_volume(self):
volume = self.conn.create_volume(1, 'nova')
self.assertEqual(volume.size, 1)
self.data['volume'] = volume
# Give network time to find volume.
time.sleep(5)
def test_002_can_attach_volume(self):
volume = self.data['volume']
for x in xrange(10):
volume.update()
if volume.status.startswith('available'):
break
time.sleep(1)
else:
self.fail('cannot attach volume with state %s' % volume.status)
# Give volume some time to be ready.
time.sleep(5)
volume.attach(self.data['instance'].id, self.device)
# wait
for x in xrange(10):
volume.update()
if volume.status.startswith('in-use'):
break
time.sleep(1)
else:
self.fail('volume never got to in use')
self.assertTrue(volume.status.startswith('in-use'))
# Give instance time to recognize volume.
time.sleep(5)
def test_003_can_mount_volume(self):
ip = self.data['instance'].private_ip_address
conn = self.connect_ssh(ip, TEST_KEY)
# NOTE(vish): this will create a dev for images that don't have
# udev rules
stdin, stdout, stderr = conn.exec_command(
'grep %s /proc/partitions | '
'`awk \'{print "mknod /dev/"\\$4" b "\\$1" "\\$2}\'`'
% self.device.rpartition('/')[2])
exec_list = []
exec_list.append('mkdir -p /mnt/vol')
exec_list.append('/sbin/mke2fs %s' % self.device)
exec_list.append('mount %s /mnt/vol' % self.device)
exec_list.append('echo success')
stdin, stdout, stderr = conn.exec_command(' && '.join(exec_list))
out = stdout.read()
conn.close()
if not out.strip().endswith('success'):
self.fail('Unable to mount: %s %s' % (out, stderr.read()))
def test_004_can_write_to_volume(self):
ip = self.data['instance'].private_ip_address
conn = self.connect_ssh(ip, TEST_KEY)
# FIXME(devcamcar): This doesn't fail if the volume hasn't been mounted
stdin, stdout, stderr = conn.exec_command(
'echo hello > /mnt/vol/test.txt')
err = stderr.read()
conn.close()
if len(err) > 0:
self.fail('Unable to write to mount: %s' % (err))
def test_005_volume_is_correct_size(self):
ip = self.data['instance'].private_ip_address
conn = self.connect_ssh(ip, TEST_KEY)
stdin, stdout, stderr = conn.exec_command(
"cat /sys/class/block/%s/size" % self.device.rpartition('/')[2])
out = stdout.read().strip()
conn.close()
# NOTE(vish): 1G bytes / 512 bytes per block
expected_size = 1024 * 1024 * 1024 / 512
self.assertEquals('%s' % (expected_size,), out,
'Volume is not the right size: %s %s. Expected: %s' %
(out, stderr.read(), expected_size))
def test_006_me_can_umount_volume(self):
ip = self.data['instance'].private_ip_address
conn = self.connect_ssh(ip, TEST_KEY)
stdin, stdout, stderr = conn.exec_command('umount /mnt/vol')
err = stderr.read()
conn.close()
if len(err) > 0:
self.fail('Unable to unmount: %s' % (err))
def test_007_me_can_detach_volume(self):
result = self.conn.detach_volume(volume_id=self.data['volume'].id)
self.assertTrue(result)
time.sleep(5)
def test_008_me_can_delete_volume(self):
result = self.conn.delete_volume(self.data['volume'].id)
self.assertTrue(result)
def test_999_tearDown(self):
self.conn.terminate_instances([self.data['instance'].id])
self.conn.delete_key_pair(TEST_KEY)
|
|
import sublime
from sublime_plugin import WindowCommand
from ..git_command import GitCommand
INIT_REQUIRED_MSG = "Please run `git: flow init` first."
GITFLOW_CONF = ['branch.master', 'branch.develop', 'prefix.feature',
'prefix.release', 'prefix.hotfix', 'prefix.versiontag',
'prefix.support', 'origin', ]
class FlowCommon(WindowCommand, GitCommand):
"""
Common git-flow commands parent class.
Populates gitflow settings and includes useful methods
for option selection and branch retrieval.
"""
def get_flow_settings(self):
flow_ver = self.git("flow", "version")
self.flow_settings = {
'flow.version': flow_ver,
}
for conf in GITFLOW_CONF:
self.flow_settings[conf] = self.git(
"config", "gitflow.%s" % conf, throw_on_stderr=False
).strip()
def run(self, **kwargs):
self.get_flow_settings()
if not self.flow_settings['branch.master']:
self.window.show_quick_panel([INIT_REQUIRED_MSG], None)
def is_visible(self, **kwargs):
gitsavvy_settings = sublime.load_settings("GitSavvy.sublime-settings")
return gitsavvy_settings.get("show_git_flow_commands") or False
def _generic_select(self, help_text, options, callback,
no_opts="There are no branches available"):
"""
Display quick_panel with help_text as first option and options as
the rest and passes given callback to `show_quick_panel`.
In case options is empty or None displays only `no_opts` text.
"""
if not options:
self.window.show_quick_panel([no_opts], None)
else:
self.window.show_quick_panel(
[help_text] + options,
callback,
flags=sublime.MONOSPACE_FONT
)
def get_value(self, options, index):
"""
Convert a selected quick_panel index to selected option.
Ignores first option (which is the query).
"""
# If the user pressed `esc` or otherwise cancelled.
if index == -1 or index == 0:
return None
selected = options[index - 1] # skipping help query
if selected.startswith('* '):
selected = selected[2:]
return selected
def get_local_branches(self):
"""
Use get_branches (from BranchesMixin) while filtering
out remote branches and returning a list of names
"""
branches = self.get_branches()
return [b.name for b in branches if not b.remote]
class GsGitFlowInitCommand(FlowCommon):
"""
Through a series of panels, allow the user to initialize git-flow.
"""
def configure_gitflow(self, conf, value):
self.git("config", "gitflow.%s" % conf, value)
if conf.startswith('branch'):
# remove this branch from branches available to next command
self.branches = [b for b in self.branches if b != value]
def run(self, reinit=False, **kwargs):
self.get_flow_settings()
if self.flow_settings['branch.master'] and not reinit:
def confirm_reinit(index):
if index == 1: # Yes
return self.run(reinit=True)
self._generic_select('Git flow is already initialized, re-init?',
['Yes', 'No'], confirm_reinit)
return
self.remotes = list(self.get_remotes().keys())
self._generic_select('Remote to use as origin in git flow?',
self.remotes, self.on_origin_selected,
no_opts="There are no remotes available.")
def on_origin_selected(self, index):
value = self.get_value(self.remotes, index)
if not value:
return
self.configure_gitflow('origin', value)
self.branches = self.get_local_branches()
self._generic_select('Branch for production releases (master)',
self.branches, self.on_master_selected)
def on_master_selected(self, index):
value = self.get_value(self.branches, index)
if not value:
return
self.configure_gitflow('branch.master', value)
if not self.branches:
self._generic_select('No branches found, create branch "develop"?',
['Yes', 'No'], self.create_develop_branch)
self._generic_select('Branch for "next release" development',
self.branches, self.on_develop_selected)
def create_develop_branch(self, index):
if index == 1:
self.git('branch', 'develop')
self.branches = self.get_local_branches()
self.on_develop_selected(1)
def on_develop_selected(self, index):
value = self.get_value(self.branches, index)
if not value:
return
# TODO: create develop branch if does not exist yet
self.configure_gitflow('branch.develop', value)
self.window.show_input_panel("Feature branches prefix?: ", "feature/",
self.on_feature_selected, None, None)
def on_feature_selected(self, value):
self.configure_gitflow('prefix.feature', value)
self.window.show_input_panel("Release branches prefix?: ", "release/",
self.on_release_selectes, None, None)
def on_release_selectes(self, value):
self.configure_gitflow('prefix.release', value)
self.window.show_input_panel("Hotfix branches prefix?: ", "hotfix/",
self.on_hotfix_selected, None, None)
def on_hotfix_selected(self, value):
self.configure_gitflow('prefix.hotfix', value)
self.window.show_input_panel("Support branches prefix?: ", "support/",
self.on_support_selected, None, None)
def on_support_selected(self, value):
self.configure_gitflow('prefix.support', value)
self.window.show_input_panel("Version tag prefix?: ", " ",
self.on_versiontag_selected, None, None)
def on_versiontag_selected(self, tag):
self.configure_gitflow('prefix.versiontag', tag)
sublime.status_message("git flow initialized")
class CompleteMixin(object):
"""
These are the final methods called after setup, which call the actual
git-flow command and display a `status_message` update.
"""
def complete_flow(self, name=None):
self.git("flow", self.flow, self.command, name)
self.show_status_update()
def show_status_update(self):
sublime.status_message("%s %sed, checked out %s" %
(self.flow.capitalize(), self.command,
self.get_current_branch_name()))
class GenericStartMixin(CompleteMixin):
"""
A common `run` method for flow X "start" commands.
"""
command = 'start'
def run(self, **kwargs):
super(GenericStartMixin, self).run(**kwargs)
self.prefix = self.flow_settings[self.prefix_setting]
self.window.show_input_panel(self.query, "", self.complete_flow,
None, None)
class GenericSelectTargetBranch(object):
"""
A useful helper class to prompt for confirmation (if on a branch
belonging to flow) or prompt to select a branch if not.
"""
def run(self, name=None, **kwargs):
super(GenericSelectTargetBranch, self).run(**kwargs)
self.prefix = self.flow_settings[self.prefix_setting]
self.curbranch = self.get_current_branch_name()
if name is None:
if self.curbranch.startswith(self.prefix):
self.cur_name = name = self.curbranch.replace(self.prefix, '')
else:
self.branches = [b.replace(self.prefix, '')
for b in self.get_local_branches()
if b.startswith(self.prefix)]
self._generic_select(
self.name_prompt,
self.branches,
self.on_name_selected,
)
self._generic_select(self.query % name, ['Yes', 'No'],
self.on_select_current)
def on_select_current(self, index):
if index != 1:
return None
return self.complete_flow(name=self.cur_name)
def on_name_selected(self, index):
value = self.get_value(self.branches, index)
if not value:
return
return self.complete_flow(name=value)
class GenericFinishMixin(CompleteMixin, GenericSelectTargetBranch):
command = 'finish'
class GenericPublishMixin(CompleteMixin, GenericSelectTargetBranch):
command = 'publish'
def show_status_update(self):
sublime.status_message("%s %sed" % (self.flow.capitalize(),
self.command))
class GenericTrackCommand(CompleteMixin, FlowCommon):
"""
Common mixin to prompt for branch to track and call `complete_flow`.
"""
command = 'track'
def run(self, name=None, **kwargs):
super(GenericTrackCommand, self).run(**kwargs)
if name:
self.complete_flow(name)
self.window.show_input_panel(self.query, "", self.complete_flow,
None, None)
class GsGitFlowFeatureStartCommand(GenericStartMixin, FlowCommon):
prefix_setting = 'prefix.feature'
query = "Feature name?: "
flow = "feature"
class GsGitFlowFeatureFinishCommand(GenericFinishMixin, FlowCommon):
prefix_setting = 'prefix.feature'
query = 'Finish feature: %s?'
name_prompt = 'Finish which feature?'
flow = "feature"
class GsGitFlowFeaturePublishCommand(GenericPublishMixin, FlowCommon):
prefix_setting = 'prefix.feature'
query = 'Publish feature: %s?'
name_prompt = 'Publish which feature?'
flow = "feature"
class GsGitFlowFeatureTrackCommand(GenericTrackCommand, FlowCommon):
query = 'Track which feature?:'
flow = "feature"
class GsGitFlowFeaturePullCommand(CompleteMixin, FlowCommon):
"""
This command first prompts for a remote name and then a feature to pull,
before completing the flow.
"""
prefix_setting = 'prefix.feature'
query = 'Pull which feature?:'
flow = "feature"
command = 'pull'
def run(self, name=None, **kwargs):
super(GsGitFlowFeaturePullCommand, self).run(**kwargs)
self.remotes = list(self.get_remotes().keys())
self._generic_select('Remote to pull feature from?',
self.remotes, self.on_remote_selected,
no_opts="There are no remotes available.")
def on_remote_selected(self, index):
value = self.get_value(self.remotes, index)
if not value:
return
self.remote = value
self.window.show_input_panel(self.query, "", self.complete_flow,
None, None)
def complete_flow(self, name=None):
self.git("flow", self.flow, "pull", self.remote, name)
self.show_status_update()
class GsGitFlowReleaseStartCommand(GenericStartMixin, FlowCommon):
prefix_setting = 'prefix.release'
query = "Release version?: "
flow = "release"
class GsGitFlowReleaseFinishCommand(GenericFinishMixin, FlowCommon):
prefix_setting = 'prefix.release'
query = 'Finish release: %s?'
name_prompt = 'Finish which release?'
flow = "release"
class GsGitFlowReleasePublishCommand(GenericPublishMixin, FlowCommon):
prefix_setting = 'prefix.release'
query = 'Publish release: %s?'
name_prompt = 'Publish which release?'
flow = "release"
class GsGitFlowReleaseTrackCommand(GenericTrackCommand, FlowCommon):
query = 'Track which release?:'
flow = "release"
class GsGitFlowHotfixStartCommand(GenericStartMixin, FlowCommon):
prefix_setting = 'prefix.hotfix'
query = "Hotfix name?: "
flow = "hotfix"
class GsGitFlowHotfixFinishCommand(GenericFinishMixin, FlowCommon):
prefix_setting = 'prefix.hotfix'
query = 'Finish hotfix: %s?'
name_prompt = 'Finish which hotfix?'
flow = "hotfix"
class GsGitFlowHotfixPublishCommand(GenericPublishMixin, FlowCommon):
prefix_setting = 'prefix.hotfix'
query = 'Publish hotfix: %s?'
name_prompt = 'Publish which hotfix?'
flow = "hotfix"
class GsGitFlowSupportStartCommand(GenericStartMixin, FlowCommon):
prefix_setting = 'prefix.support'
query = "Support name?: "
flow = "support"
|
|
from __future__ import absolute_import, division, print_function
import re
from functools import partial
from tornado import httputil
from tornado.httpserver import _CallableAdapter
from tornado.escape import url_escape, url_unescape, utf8
from tornado.log import app_log
from tornado.util import basestring_type, import_object, re_unescape, unicode_type
try:
import typing # noqa
except ImportError:
pass
class Router(httputil.HTTPServerConnectionDelegate):
"""Abstract router interface."""
def find_handler(self, request, **kwargs):
# type: (httputil.HTTPServerRequest, typing.Any)->httputil.HTTPMessageDelegate
"""Must be implemented to return an appropriate instance of `~.httputil.HTTPMessageDelegate`
that can serve the request.
Routing implementations may pass additional kwargs to extend the routing logic.
:arg httputil.HTTPServerRequest request: current HTTP request.
:arg kwargs: additional keyword arguments passed by routing implementation.
:returns: an instance of `~.httputil.HTTPMessageDelegate` that will be used to
process the request.
"""
raise NotImplementedError()
def start_request(self, server_conn, request_conn):
return _RoutingDelegate(self, server_conn, request_conn)
class ReversibleRouter(Router):
"""Abstract router interface for routers that can handle named routes
and support reversing them to original urls.
"""
def reverse_url(self, name, *args):
"""Returns url string for a given route name and arguments
or ``None`` if no match is found.
:arg str name: route name.
:arg args: url parameters.
:returns: parametrized url string for a given route name (or ``None``).
"""
raise NotImplementedError()
class _RoutingDelegate(httputil.HTTPMessageDelegate):
def __init__(self, router, server_conn, request_conn):
self.server_conn = server_conn
self.request_conn = request_conn
self.delegate = None
self.router = router # type: Router
def headers_received(self, start_line, headers):
request = httputil.HTTPServerRequest(
connection=self.request_conn,
server_connection=self.server_conn,
start_line=start_line, headers=headers)
self.delegate = self.router.find_handler(request)
return self.delegate.headers_received(start_line, headers)
def data_received(self, chunk):
return self.delegate.data_received(chunk)
def finish(self):
self.delegate.finish()
def on_connection_close(self):
self.delegate.on_connection_close()
class RuleRouter(Router):
"""Rule-based router implementation."""
def __init__(self, rules=None):
"""Constructs a router from an ordered list of rules::
RuleRouter([
Rule(PathMatches("/handler"), Target),
# ... more rules
])
You can also omit explicit `Rule` constructor and use tuples of arguments::
RuleRouter([
(PathMatches("/handler"), Target),
])
`PathMatches` is a default matcher, so the example above can be simplified::
RuleRouter([
("/handler", Target),
])
In the examples above, ``Target`` can be a nested `Router` instance, an instance of
`~.httputil.HTTPServerConnectionDelegate` or an old-style callable, accepting a request argument.
:arg rules: a list of `Rule` instances or tuples of `Rule`
constructor arguments.
"""
self.rules = [] # type: typing.List[Rule]
if rules:
self.add_rules(rules)
def add_rules(self, rules):
"""Appends new rules to the router.
:arg rules: a list of Rule instances (or tuples of arguments, which are
passed to Rule constructor).
"""
for rule in rules:
if isinstance(rule, (tuple, list)):
assert len(rule) in (2, 3, 4)
if isinstance(rule[0], basestring_type):
rule = Rule(PathMatches(rule[0]), *rule[1:])
else:
rule = Rule(*rule)
self.rules.append(self.process_rule(rule))
def process_rule(self, rule):
"""Override this method for additional preprocessing of each rule.
:arg Rule rule: a rule to be processed.
:returns: the same or modified Rule instance.
"""
return rule
def find_handler(self, request, **kwargs):
for rule in self.rules:
target_params = rule.matcher.match(request)
if target_params is not None:
if rule.target_kwargs:
target_params['target_kwargs'] = rule.target_kwargs
delegate = self.get_target_delegate(
rule.target, request, **target_params)
if delegate is not None:
return delegate
return None
def get_target_delegate(self, target, request, **target_params):
"""Returns an instance of `~.httputil.HTTPMessageDelegate` for a
Rule's target. This method is called by `~.find_handler` and can be
extended to provide additional target types.
:arg target: a Rule's target.
:arg httputil.HTTPServerRequest request: current request.
:arg target_params: additional parameters that can be useful
for `~.httputil.HTTPMessageDelegate` creation.
"""
if isinstance(target, Router):
return target.find_handler(request, **target_params)
elif isinstance(target, httputil.HTTPServerConnectionDelegate):
return target.start_request(request.server_connection, request.connection)
elif callable(target):
return _CallableAdapter(
partial(target, **target_params), request.connection
)
return None
class ReversibleRuleRouter(ReversibleRouter, RuleRouter):
"""A rule-based router that implements ``reverse_url`` method.
Each rule added to this router may have a ``name`` attribute that can be
used to reconstruct an original uri. The actual reconstruction takes place
in a rule's matcher (see `Matcher.reverse`).
"""
def __init__(self, rules=None):
self.named_rules = {} # type: typing.Dict[str]
super(ReversibleRuleRouter, self).__init__(rules)
def process_rule(self, rule):
rule = super(ReversibleRuleRouter, self).process_rule(rule)
if rule.name:
if rule.name in self.named_rules:
app_log.warning(
"Multiple handlers named %s; replacing previous value",
rule.name)
self.named_rules[rule.name] = rule
return rule
def reverse_url(self, name, *args):
if name in self.named_rules:
return self.named_rules[name].matcher.reverse(*args)
for rule in self.rules:
if isinstance(rule.target, ReversibleRouter):
reversed_url = rule.target.reverse_url(name, *args)
if reversed_url is not None:
return reversed_url
return None
class Rule(object):
"""A routing rule."""
def __init__(self, matcher, target, target_kwargs=None, name=None):
"""Constructs a Rule instance.
:arg Matcher matcher: a `Matcher` instance used for determining
whether the rule should be considered a match for a specific
request.
:arg target: a Rule's target (typically a ``RequestHandler`` or
`~.httputil.HTTPServerConnectionDelegate` subclass or even a nested `Router`,
depending on routing implementation).
:arg dict target_kwargs: a dict of parameters that can be useful
at the moment of target instantiation (for example, ``status_code``
for a ``RequestHandler`` subclass). They end up in
``target_params['target_kwargs']`` of `RuleRouter.get_target_delegate`
method.
:arg str name: the name of the rule that can be used to find it
in `ReversibleRouter.reverse_url` implementation.
"""
if isinstance(target, str):
# import the Module and instantiate the class
# Must be a fully qualified name (module.ClassName)
target = import_object(target)
self.matcher = matcher # type: Matcher
self.target = target
self.target_kwargs = target_kwargs if target_kwargs else {}
self.name = name
def reverse(self, *args):
return self.matcher.reverse(*args)
def __repr__(self):
return '%s(%r, %s, kwargs=%r, name=%r)' % \
(self.__class__.__name__, self.matcher,
self.target, self.target_kwargs, self.name)
class Matcher(object):
"""Represents a matcher for request features."""
def match(self, request):
"""Matches current instance against the request.
:arg httputil.HTTPServerRequest request: current HTTP request
:returns: a dict of parameters to be passed to the target handler
(for example, ``handler_kwargs``, ``path_args``, ``path_kwargs``
can be passed for proper `~.web.RequestHandler` instantiation).
An empty dict is a valid (and common) return value to indicate a match
when the argument-passing features are not used.
``None`` must be returned to indicate that there is no match."""
raise NotImplementedError()
def reverse(self, *args):
"""Reconstructs full url from matcher instance and additional arguments."""
return None
class AnyMatches(Matcher):
"""Matches any request."""
def match(self, request):
return {}
class HostMatches(Matcher):
"""Matches requests from hosts specified by ``host_pattern`` regex."""
def __init__(self, host_pattern):
if isinstance(host_pattern, basestring_type):
if not host_pattern.endswith("$"):
host_pattern += "$"
self.host_pattern = re.compile(host_pattern)
else:
self.host_pattern = host_pattern
def match(self, request):
if self.host_pattern.match(request.host_name):
return {}
return None
class DefaultHostMatches(Matcher):
"""Matches requests from host that is equal to application's default_host.
Always returns no match if ``X-Real-Ip`` header is present.
"""
def __init__(self, application, host_pattern):
self.application = application
self.host_pattern = host_pattern
def match(self, request):
# Look for default host if not behind load balancer (for debugging)
if "X-Real-Ip" not in request.headers:
if self.host_pattern.match(self.application.default_host):
return {}
return None
class PathMatches(Matcher):
"""Matches requests with paths specified by ``path_pattern`` regex."""
def __init__(self, path_pattern):
if isinstance(path_pattern, basestring_type):
if not path_pattern.endswith('$'):
path_pattern += '$'
self.regex = re.compile(path_pattern)
else:
self.regex = path_pattern
assert len(self.regex.groupindex) in (0, self.regex.groups), \
("groups in url regexes must either be all named or all "
"positional: %r" % self.regex.pattern)
self._path, self._group_count = self._find_groups()
def match(self, request):
match = self.regex.match(request.path)
if match is None:
return None
if not self.regex.groups:
return {}
path_args, path_kwargs = [], {}
# Pass matched groups to the handler. Since
# match.groups() includes both named and
# unnamed groups, we want to use either groups
# or groupdict but not both.
if self.regex.groupindex:
path_kwargs = dict(
(str(k), _unquote_or_none(v))
for (k, v) in match.groupdict().items())
else:
path_args = [_unquote_or_none(s) for s in match.groups()]
return dict(path_args=path_args, path_kwargs=path_kwargs)
def reverse(self, *args):
if self._path is None:
raise ValueError("Cannot reverse url regex " + self.regex.pattern)
assert len(args) == self._group_count, "required number of arguments " \
"not found"
if not len(args):
return self._path
converted_args = []
for a in args:
if not isinstance(a, (unicode_type, bytes)):
a = str(a)
converted_args.append(url_escape(utf8(a), plus=False))
return self._path % tuple(converted_args)
def _find_groups(self):
"""Returns a tuple (reverse string, group count) for a url.
For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method
would return ('/%s/%s/', 2).
"""
pattern = self.regex.pattern
if pattern.startswith('^'):
pattern = pattern[1:]
if pattern.endswith('$'):
pattern = pattern[:-1]
if self.regex.groups != pattern.count('('):
# The pattern is too complicated for our simplistic matching,
# so we can't support reversing it.
return None, None
pieces = []
for fragment in pattern.split('('):
if ')' in fragment:
paren_loc = fragment.index(')')
if paren_loc >= 0:
pieces.append('%s' + fragment[paren_loc + 1:])
else:
try:
unescaped_fragment = re_unescape(fragment)
except ValueError as exc:
# If we can't unescape part of it, we can't
# reverse this url.
return (None, None)
pieces.append(unescaped_fragment)
return ''.join(pieces), self.regex.groups
class URLSpec(Rule):
"""Specifies mappings between URLs and handlers.
.. versionchanged: 4.5
`URLSpec` is now a subclass of a `Rule` with `PathMatches` matcher and is preserved for
backwards compatibility.
"""
def __init__(self, pattern, handler, kwargs=None, name=None):
"""Parameters:
* ``pattern``: Regular expression to be matched. Any capturing
groups in the regex will be passed in to the handler's
get/post/etc methods as arguments (by keyword if named, by
position if unnamed. Named and unnamed capturing groups may
may not be mixed in the same rule).
* ``handler``: `~.web.RequestHandler` subclass to be invoked.
* ``kwargs`` (optional): A dictionary of additional arguments
to be passed to the handler's constructor.
* ``name`` (optional): A name for this handler. Used by
`~.web.Application.reverse_url`.
"""
super(URLSpec, self).__init__(PathMatches(pattern), handler, kwargs, name)
self.regex = self.matcher.regex
self.handler_class = self.target
self.kwargs = kwargs
def __repr__(self):
return '%s(%r, %s, kwargs=%r, name=%r)' % \
(self.__class__.__name__, self.regex.pattern,
self.handler_class, self.kwargs, self.name)
def _unquote_or_none(s):
"""None-safe wrapper around url_unescape to handle unmatched optional
groups correctly.
Note that args are passed as bytes so the handler can decide what
encoding to use.
"""
if s is None:
return s
return url_unescape(s, encoding=None, plus=False)
|
|
# Copyright (c) 2014 Mirantis, Inc.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cgi
import os
import tempfile
import jsonschema
from keystoneclient import exceptions as keystone_ex
from keystoneclient import service_catalog
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_log import versionutils
import six
from webob import exc
import murano.api.v1
from murano.api.v1 import validation_schemas
from murano.common import exceptions
from murano.common import policy
import murano.common.utils as murano_utils
from murano.common import wsgi
from murano.db.catalog import api as db_api
from murano.common.i18n import _, _LW
from murano.packages import exceptions as pkg_exc
from murano.packages import load_utils
from muranoclient.glance import client as glare_client
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
SUPPORTED_PARAMS = murano.api.v1.SUPPORTED_PARAMS
LIST_PARAMS = murano.api.v1.LIST_PARAMS
ORDER_VALUES = murano.api.v1.ORDER_VALUES
PKG_PARAMS_MAP = murano.api.v1.PKG_PARAMS_MAP
OPERATOR_VALUES = murano.api.v1.OPERATOR_VALUES
def _check_content_type(req, content_type):
try:
req.get_content_type((content_type,))
except exceptions.UnsupportedContentType:
msg = _("Content-Type must be '{type}'").format(type=content_type)
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _get_filters(query_params):
filters = {}
for param_pair in query_params:
k, v = param_pair
if k not in SUPPORTED_PARAMS:
LOG.warning(_LW("Search by parameter '{name}' "
"is not supported. Skipping it.").format(name=k))
continue
if k in LIST_PARAMS:
if v.startswith('in:') and k in OPERATOR_VALUES:
in_value = v[len('in:'):]
try:
filters[k] = murano_utils.split_for_quotes(in_value)
except ValueError as err:
LOG.warning(_LW("Search by parameter '{name}' "
"caused an {message} error."
"Skipping it.").format(name=k,
message=err))
else:
filters.setdefault(k, []).append(v)
else:
filters[k] = v
order_by = filters.get('order_by', [])
for i in order_by[:]:
if ORDER_VALUES and i not in ORDER_VALUES:
filters['order_by'].remove(i)
LOG.warning(_LW("Value of 'order_by' parameter is not valid. "
"Allowed values are: {values}. Skipping it.")
.format(values=", ".join(ORDER_VALUES)))
return filters
def _validate_body(body):
"""Check multipart/form-data has two parts
Check multipart/form-data has two parts: text (which is json string and
should parsed into dictionary in serializer) and file, which stores as
cgi.FieldStorage instance. Also validate file size doesn't exceed
the limit: seek to the end of the file, get the position of EOF and
reset the file position to the beginning
"""
def check_file_size(f):
mb_limit = CONF.murano.package_size_limit
pkg_size_limit = mb_limit * 1024 * 1024
f.seek(0, 2)
size = f.tell()
f.seek(0)
if size > pkg_size_limit:
raise exc.HTTPBadRequest(explanation=_(
'Uploading file is too large. '
'The limit is {0} Mb').format(mb_limit))
if len(body) > 2:
msg = _("'multipart/form-data' request body should contain 1 or 2 "
"parts: json string and zip archive. Current body consists "
"of {amount} part(s)").format(amount=len(body.keys()))
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
file_obj = None
package_meta = None
for part in body.values():
if isinstance(part, cgi.FieldStorage):
file_obj = part
check_file_size(file_obj.file)
if isinstance(part, dict):
package_meta = part
if file_obj is None:
msg = _('There is no file package with application description')
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
return file_obj, package_meta
class Controller(object):
"""WSGI controller for application catalog resource in Murano v1 API."""
def _validate_limit(self, value):
if value is None:
return
try:
value = int(value)
except ValueError:
msg = _("Limit param must be an integer")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
if value <= 0:
msg = _("Limit param must be positive")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
return value
def update(self, req, body, package_id):
"""List of allowed changes
List of allowed changes:
{ "op": "add", "path": "/tags", "value": [ "foo", "bar" ] }
{ "op": "add", "path": "/categories", "value": [ "foo", "bar" ] }
{ "op": "remove", "path": "/tags" }
{ "op": "remove", "path": "/categories" }
{ "op": "replace", "path": "/tags", "value": ["foo", "bar"] }
{ "op": "replace", "path": "/is_public", "value": true }
{ "op": "replace", "path": "/description",
"value":"New description" }
{ "op": "replace", "path": "/name", "value": "New name" }
"""
policy.check("modify_package", req.context, {'package_id': package_id})
pkg_to_update = db_api.package_get(package_id, req.context)
if pkg_to_update.is_public:
policy.check("manage_public_package", req.context)
_check_content_type(req, 'application/murano-packages-json-patch')
if not isinstance(body, list):
msg = _('Request body must be a JSON array of operation objects.')
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
for change in body:
if 'is_public' in change['path']:
if change['value'] is True and not pkg_to_update.is_public:
policy.check('publicize_package', req.context)
if 'name' in change['path']:
if len(change['value']) > 80:
msg = _('Package name should be 80 characters maximum')
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
package = db_api.package_update(package_id, body, req.context)
return package.to_dict()
def get(self, req, package_id):
policy.check("get_package", req.context, {'package_id': package_id})
package = db_api.package_get(package_id, req.context)
return package.to_dict()
def search(self, req):
policy.check("get_package", req.context)
manage_public = True
try:
policy.check("manage_public_package", req.context)
except exc.HTTPForbidden:
manage_public = False
filters = _get_filters(req.GET.items())
limit = self._validate_limit(filters.get('limit'))
if limit is None:
limit = CONF.murano.limit_param_default
limit = min(CONF.murano.api_limit_max, limit)
result = {}
catalog = req.GET.pop('catalog', '').lower() == 'true'
packages = db_api.package_search(
filters, req.context, manage_public, limit, catalog=catalog)
if len(packages) == limit:
result['next_marker'] = packages[-1].id
result['packages'] = [package.to_dict() for package in packages]
return result
def upload(self, req, body=None):
"""Upload new file archive
Upload new file archive for the new package
together with package metadata.
"""
policy.check("upload_package", req.context)
_check_content_type(req, 'multipart/form-data')
file_obj, package_meta = _validate_body(body)
if package_meta:
try:
jsonschema.validate(package_meta,
validation_schemas.PKG_UPLOAD_SCHEMA)
except jsonschema.ValidationError as e:
msg = _("Package schema is not valid: {reason}").format(
reason=e)
LOG.exception(msg)
raise exc.HTTPBadRequest(explanation=msg)
else:
package_meta = {}
if package_meta.get('is_public'):
policy.check('publicize_package', req.context)
with tempfile.NamedTemporaryFile(delete=False) as tempf:
LOG.debug("Storing package archive in a temporary file")
content = file_obj.file.read()
if not content:
msg = _("Uploading file can't be empty")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
tempf.write(content)
package_meta['archive'] = content
try:
with load_utils.load_from_file(
tempf.name, target_dir=None,
drop_dir=True) as pkg_to_upload:
# extend dictionary for update db
for k, v in six.iteritems(PKG_PARAMS_MAP):
if hasattr(pkg_to_upload, k):
package_meta[v] = getattr(pkg_to_upload, k)
if len(package_meta['name']) > 80:
msg = _('Package name should be 80 characters maximum')
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
try:
package = db_api.package_upload(
package_meta, req.context.tenant)
except db_exc.DBDuplicateEntry:
msg = _('Package with specified full '
'name is already registered')
LOG.exception(msg)
raise exc.HTTPConflict(msg)
return package.to_dict()
except pkg_exc.PackageLoadError as e:
msg = _("Couldn't load package from file: {reason}").format(
reason=e)
LOG.exception(msg)
raise exc.HTTPBadRequest(explanation=msg)
finally:
LOG.debug("Deleting package archive temporary file")
os.remove(tempf.name)
def get_ui(self, req, package_id):
if CONF.engine.packages_service == 'murano':
target = {'package_id': package_id}
policy.check("get_package", req.context, target)
package = db_api.package_get(package_id, req.context)
return package.ui_definition
else:
g_client = self._get_glare_client(req)
blob_data = g_client.artifacts.download_blob(package_id, 'archive')
with tempfile.NamedTemporaryFile() as tempf:
for chunk in blob_data:
tempf.write(chunk)
tempf.file.flush()
os.fsync(tempf.file.fileno())
with load_utils.load_from_file(tempf.name, target_dir=None,
drop_dir=True) as pkg:
return pkg.ui
def get_logo(self, req, package_id):
target = {'package_id': package_id}
policy.check("get_package", req.context, target)
package = db_api.package_get(package_id, req.context)
return package.logo
def get_supplier_logo(self, req, package_id):
package = db_api.package_get(package_id, req.context)
return package.supplier_logo
def download(self, req, package_id):
target = {'package_id': package_id}
policy.check("download_package", req.context, target)
package = db_api.package_get(package_id, req.context)
return package.archive
def delete(self, req, package_id):
target = {'package_id': package_id}
policy.check("delete_package", req.context, target)
package = db_api.package_get(package_id, req.context)
if package.is_public:
policy.check("manage_public_package", req.context, target)
db_api.package_delete(package_id, req.context)
def get_category(self, req, category_id):
policy.check("get_category", req.context)
category = db_api.category_get(category_id, packages=True)
return category.to_dict()
@versionutils.deprecated(as_of=versionutils.deprecated.LIBERTY,
in_favor_of='categories.list()')
def show_categories(self, req):
policy.check("get_category", req.context)
categories = db_api.categories_list()
return {'categories': [category.name for category in categories]}
def list_categories(self, req):
"""List all categories
List all categories with pagination and sorting
Acceptable filter params:
:param sort_keys: an array of fields used to sort the list
:param sort_dir: the direction of the sort ('asc' or 'desc')
:param limit: the number of categories to list
:param marker: the ID of the last item in the previous page
"""
def _get_category_filters(req):
query_params = {}
valid_query_params = ['sort_keys', 'sort_dir', 'limit', 'marker']
for key, value in req.GET.items():
if key not in valid_query_params:
raise exc.HTTPBadRequest(
_('Bad value passed to filter. '
'Got {key}, expected:{valid}').format(
key=key, valid=', '.join(valid_query_params)))
if key == 'sort_keys':
available_sort_keys = ['name', 'created',
'updated', 'package_count', 'id']
value = [v.strip() for v in value.split(',')]
for sort_key in value:
if sort_key not in available_sort_keys:
raise exc.HTTPBadRequest(
explanation=_('Invalid sort key: {sort_key}. '
'Must be one of the following: '
'{available}').format(
sort_key=sort_key,
available=', '.join(available_sort_keys)))
if key == 'sort_dir':
if value not in ['asc', 'desc']:
msg = _('Invalid sort direction: {0}').format(value)
raise exc.HTTPBadRequest(explanation=msg)
query_params[key] = value
return query_params
policy.check("get_category", req.context)
filters = _get_category_filters(req)
marker = filters.get('marker')
limit = self._validate_limit(filters.get('limit'))
result = {}
categories = db_api.categories_list(filters,
limit=limit,
marker=marker)
if len(categories) == limit:
result['next_marker'] = categories[-1].id
result['categories'] = [category.to_dict() for category in categories]
return result
def add_category(self, req, body=None):
policy.check("add_category", req.context)
category_name = body.get('name')
if not category_name:
raise exc.HTTPBadRequest(
explanation='Please, specify a name of the category to create')
if len(category_name) > 80:
msg = _('Category name should be 80 characters maximum')
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
try:
category = db_api.category_add(category_name)
except db_exc.DBDuplicateEntry:
msg = _('Category with specified name is already exist')
LOG.error(msg)
raise exc.HTTPConflict(explanation=msg)
return category.to_dict()
def delete_category(self, req, category_id):
target = {'category_id': category_id}
policy.check("delete_category", req.context, target)
category = db_api.category_get(category_id, packages=True)
if category.packages:
msg = _("It's impossible to delete categories assigned "
"to the package, uploaded to the catalog")
raise exc.HTTPForbidden(explanation=msg)
db_api.category_delete(category_id)
def _get_glare_client(self, request):
glare_settings = CONF.glare
token = request.context.auth_token
url = glare_settings.url
if not url:
url = self._get_glare_url(request)
client = glare_client.Client(
endpoint=url, token=token, insecure=glare_settings.insecure,
key_file=glare_settings.key_file or None,
ca_file=glare_settings.ca_file or None,
cert_file=glare_settings.cert_file or None,
type_name='murano',
type_version=1)
return client
def _get_glare_url(self, request):
sc = request.context.service_catalog
token = request.context.auth_token
try:
return service_catalog.ServiceCatalogV2(
{'serviceCatalog': sc}).url_for(
service_type='artifact',
endpoint_type=CONF.glare.endpoint_type,
region_name=CONF.home_region)
except keystone_ex.EndpointNotFound:
return service_catalog.ServiceCatalogV3(
token,
{'catalog': sc}).url_for(
service_type='artifact',
endpoint_type=CONF.glare.endpoint_type,
region_name=CONF.home_region)
def create_resource():
specific_content_types = {
'get_ui': ['text/plain'],
'download': ['application/octet-stream'],
'get_logo': ['application/octet-stream'],
'get_supplier_logo': ['application/octet-stream']}
deserializer = wsgi.RequestDeserializer(
specific_content_types=specific_content_types)
return wsgi.Resource(Controller(), deserializer=deserializer)
|
|
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
#
# progressbar - Text progressbar library for python.
# Copyright (c) 2005 Nilton Volpato
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Text progressbar library for python.
This library provides a text mode progressbar. This is tipically used
to display the progress of a long running operation, providing a
visual clue that processing is underway.
The ProgressBar class manages the progress, and the format of the line
is given by a number of widgets. A widget is an object that may
display diferently depending on the state of the progress. There are
three types of widget:
- a string, which always shows itself;
- a ProgressBarWidget, which may return a diferent value every time
it's update method is called; and
- a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it
expands to fill the remaining width of the line.
The progressbar module is very easy to use, yet very powerful. And
automatically supports features like auto-resizing when available.
"""
__author__ = "Nilton Volpato"
__author_email__ = "first-name dot last-name @ gmail.com"
__date__ = "2006-05-07"
__version__ = "2.2"
# Changelog
#
# 2006-05-07: v2.2 fixed bug in windows
# 2005-12-04: v2.1 autodetect terminal width, added start method
# 2005-12-04: v2.0 everything is now a widget (wow!)
# 2005-12-03: v1.0 rewrite using widgets
# 2005-06-02: v0.5 rewrite
# 2004-??-??: v0.1 first version
import sys
import time
from array import array
try:
from fcntl import ioctl
import termios
except ImportError:
pass
import signal
import threading
class ProgressBarWidget(object):
"""This is an element of ProgressBar formatting.
The ProgressBar object will call it's update value when an update
is needed. It's size may change between call, but the results will
not be good if the size changes drastically and repeatedly.
"""
def update(self, pbar):
"""Returns the string representing the widget.
The parameter pbar is a reference to the calling ProgressBar,
where one can access attributes of the class for knowing how
the update must be made.
At least this function must be overriden."""
pass
class SimpleMessageWidget(ProgressBarWidget):
"A message to be printed"
message = ""
def __init__(self, message=""):
self.message = message
def update(self, pbar):
return self.message
SimpleMessage = SimpleMessageWidget
class ProgressBarWidgetHFill(object):
"""This is a variable width element of ProgressBar formatting.
The ProgressBar object will call it's update value, informing the
width this object must the made. This is like TeX \\hfill, it will
expand to fill the line. You can use more than one in the same
line, and they will all have the same width, and together will
fill the line.
"""
def update(self, pbar, width):
"""Returns the string representing the widget.
The parameter pbar is a reference to the calling ProgressBar,
where one can access attributes of the class for knowing how
the update must be made. The parameter width is the total
horizontal width the widget must have.
At least this function must be overriden."""
pass
class ETA(ProgressBarWidget):
"Widget for the Estimated Time of Arrival"
def format_time(self, seconds):
return time.strftime('%H:%M:%S', time.gmtime(seconds))
def update(self, pbar):
if pbar.currval == 0:
return 'ETA: --:--:--'
elif pbar.finished:
return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
else:
elapsed = pbar.seconds_elapsed
eta = elapsed * pbar.maxval / pbar.currval - elapsed
return 'ETA: %s' % self.format_time(eta)
class FileTransferSpeed(ProgressBarWidget):
"Widget for showing the transfer speed (useful for file transfers)."
def __init__(self):
self.fmt = '%6.2f %s'
self.units = ['B','K','M','G','T','P']
def update(self, pbar):
if pbar.seconds_elapsed < 2e-6:#== 0:
bps = 0.0
else:
bps = float(pbar.currval) / pbar.seconds_elapsed
spd = bps
for u in self.units:
if spd < 1000:
break
spd /= 1000
return self.fmt % (spd, u+'/s')
class RotatingMarker(ProgressBarWidget):
"A rotating marker for filling the bar of progress."
def __init__(self, markers='|/-\\'):
self.markers = markers
self.curmark = -1
def update(self, pbar):
if pbar.finished:
return self.markers[0]
self.curmark = (self.curmark + 1)%len(self.markers)
return self.markers[self.curmark]
class Percentage(ProgressBarWidget):
"Just the percentage done."
def update(self, pbar):
return '%3d%%' % pbar.percentage()
class Bar(ProgressBarWidgetHFill):
"The bar of progress. It will strech to fill the line."
def __init__(self, marker='#', left='|', right='|'):
self.marker = marker
self.left = left
self.right = right
def _format_marker(self, pbar):
if isinstance(self.marker, (str, unicode)):
return self.marker
else:
return self.marker.update(pbar)
def update(self, pbar, width):
percent = pbar.percentage()
cwidth = width - len(self.left) - len(self.right)
marked_width = int(percent * cwidth / 100)
m = self._format_marker(pbar)
bar = (self.left + (m*marked_width).ljust(cwidth) + self.right)
return bar
class ReverseBar(Bar):
"The reverse bar of progress, or bar of regress. :)"
def update(self, pbar, width):
percent = pbar.percentage()
cwidth = width - len(self.left) - len(self.right)
marked_width = int(percent * cwidth / 100)
m = self._format_marker(pbar)
bar = (self.left + (m*marked_width).rjust(cwidth) + self.right)
return bar
default_widgets = [Percentage(), ' ', Bar()]
class ProgressBar(object):
"""This is the ProgressBar class, it updates and prints the bar.
The term_width parameter may be an integer. Or None, in which case
it will try to guess it, if it fails it will default to 80 columns.
The simple use is like this:
>>> pbar = ProgressBar().start()
>>> for i in xrange(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
But anything you want to do is possible (well, almost anything).
You can supply different widgets of any type in any order. And you
can even write your own widgets! There are many widgets already
shipped and you should experiment with them.
When implementing a widget update method you may access any
attribute or function of the ProgressBar object calling the
widget's update method. The most important attributes you would
like to access are:
- currval: current value of the progress, 0 <= currval <= maxval
- maxval: maximum (and final) value of the progress
- finished: True if the bar is have finished (reached 100%), False o/w
- start_time: first time update() method of ProgressBar was called
- seconds_elapsed: seconds elapsed since start_time
- percentage(): percentage of the progress (this is a method)
"""
def __init__(self, maxval=100, widgets=default_widgets, term_width=None,
fd=sys.stderr):
assert maxval > 0
self.maxval = maxval
self.widgets = widgets
self.fd = fd
self.signal_set = False
if term_width is None:
try:
self.handle_resize(None,None)
signal.signal(signal.SIGWINCH, self.handle_resize)
self.signal_set = True
except:
self.term_width = 79
else:
self.term_width = term_width
self.currval = 0
self.finished = False
self.prev_percentage = -1
self.start_time = None
self.seconds_elapsed = 0
self.update_lock = threading.Lock()
def handle_resize(self, signum, frame):
h,w=array('h', ioctl(self.fd,termios.TIOCGWINSZ,'\0'*8))[:2]
self.term_width = w
def percentage(self):
"Returns the percentage of the progress."
return self.currval*100.0 / self.maxval
def _format_widgets(self):
r = []
hfill_inds = []
num_hfill = 0
currwidth = 0
for i, w in enumerate(self.widgets):
if isinstance(w, ProgressBarWidgetHFill):
r.append(w)
hfill_inds.append(i)
num_hfill += 1
elif isinstance(w, (str, unicode)):
r.append(w)
currwidth += len(w)
else:
weval = w.update(self)
currwidth += len(weval)
r.append(weval)
for iw in hfill_inds:
r[iw] = r[iw].update(self, (self.term_width-currwidth)/num_hfill)
return r
def _format_line(self):
return ''.join(self._format_widgets()).ljust(self.term_width)
def _need_update(self):
return int(self.percentage()) != int(self.prev_percentage)
def update(self, value, backwards_allowed = False):
"Updates the progress bar to a new value."
# make sure no two threads update the bar at the same time
#print("Acquiring ProgressBar.update lock")
self.update_lock.acquire()
assert 0 <= value <= self.maxval
if self.currval < value:
do_update = True
elif self.currval > value and backwards_allowed:
do_update = True
else:
do_update = False
if do_update:
self.currval = value
if not self._need_update() or self.finished:
#print("Releasing ProgressBar.update lock")
self.update_lock.release()
return
if not self.start_time:
self.start_time = time.time()
self.seconds_elapsed = time.time() - self.start_time
self.prev_percentage = self.percentage()
if value != self.maxval:
self.fd.write(self._format_line() + '\r')
else:
self.finished = True
self.fd.write(self._format_line() + '\n')
else:
# no update needed
pass
#print("Releasing ProgressBar.update lock")
self.update_lock.release()
return
def start(self):
"""Start measuring time, and prints the bar at 0%.
It returns self so you can use it like this:
>>> pbar = ProgressBar().start()
>>> for i in xrange(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
"""
self.update(0)
return self
def finish(self):
"""Used to tell the progress is finished."""
self.update(self.maxval)
if self.signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
class ProgressBarWithMessage():
def __init__(self, maxval, message=""):
self.bar_message = SimpleMessageWidget(message)
bar_widgets = [Percentage(), self.bar_message, Bar(), ETA()]
self.bar = ProgressBar(widgets = bar_widgets, maxval=maxval)
return
def start(self,):
return self.bar.start()
def finish(self):
return self.bar.finish()
def set_message(self, message):
return self.bar_message.update(message)
def update(self, value):
return self.bar.update(value)
# end of class ProgressBarWithMessage
if __name__=='__main__':
def example1():
widgets = ['Test: ', Percentage(), ' ', Bar(marker=RotatingMarker()),
' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
for i in range(1000000):
# do something
pbar.update(10*i+1)
pbar.finish()
print
def example2():
class CrazyFileTransferSpeed(FileTransferSpeed):
"It's bigger between 45 and 80 percent"
def update(self, pbar):
if 45 < pbar.percentage() < 80:
return 'Bigger Now ' + FileTransferSpeed.update(self,pbar)
else:
return FileTransferSpeed.update(self,pbar)
widgets = [CrazyFileTransferSpeed(),' <<<', Bar(), '>>> ', Percentage(),' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=10000000)
# maybe do something
pbar.start()
for i in range(2000000):
# do something
pbar.update(5*i+1)
pbar.finish()
print
def example3():
widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')]
pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
for i in range(1000000):
# do something
pbar.update(10*i+1)
pbar.finish()
print
def example4():
widgets = ['Test: ', Percentage(), ' ',
Bar(marker='0',left='[',right=']'),
' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets, maxval=500)
pbar.start()
for i in range(100,500+1,50):
time.sleep(0.2)
pbar.update(i)
pbar.finish()
print
example1()
example2()
example3()
example4()
|
|
import networkx as nx
import numpy as np
import scipy.sparse as sp
import logging
from collections import defaultdict
def stochastic_normalization(matrix):
logging.info('Creating google matrix...')
matrix = matrix.tolil()
try:
matrix.setdiag(0)
except TypeError:
matrix.setdiag(np.zeros(matrix.shape[0]))
matrix = matrix.tocsr()
d = matrix.sum(axis=1).getA1()
nzs = np.where(d > 0)
d[nzs] = 1 / d[nzs]
matrix = (sp.diags(d, 0).tocsc().dot(matrix)).transpose()
logging.info('Google matrix created.')
return matrix
def page_rank(matrix, start_nodes,
epsilon=1e-6,
max_steps=10000,
damping=0.85,
spread_step=10,
spread_percent=0.5,
try_shrink=True):
assert(len(start_nodes)) > 0
# this method assumes that column sums are all equal to 1 (stochastic normalizaition!)
size = matrix.shape[0]
if start_nodes is None:
start_nodes = range(size)
nz = size
else:
nz = len(start_nodes)
start_vec = np.zeros((size, 1))
start_vec[start_nodes] = 1
start_rank = start_vec / len(start_nodes)
rank_vec = start_vec / len(start_nodes)
# calculate the max spread:
shrink = False
which = np.zeros(0)
if try_shrink:
v = start_vec / len(start_nodes)
steps = 0
while nz < size * spread_percent and steps < spread_step:
steps += 1
v += matrix.dot(v)
nz_new = np.count_nonzero(v)
if nz_new == nz:
shrink = True
break
nz = nz_new
rr = np.arange(matrix.shape[0])
which = (v[rr] > 0).reshape(size)
if shrink:
start_rank = start_rank[which]
rank_vec = rank_vec[which]
matrix = matrix[:, which][which, :]
diff = np.Inf
steps = 0
while diff > epsilon and steps < max_steps: # not converged yet
steps += 1
new_rank = matrix.dot(rank_vec)
rank_sum = np.sum(new_rank)
if rank_sum < 0.999999999:
new_rank += start_rank * (1 - rank_sum)
new_rank = damping * new_rank + (1 - damping) * start_rank
new_diff = np.linalg.norm(rank_vec - new_rank, 1)
diff = new_diff
rank_vec = new_rank
if try_shrink and shrink:
ret = np.zeros(size)
ret[which] = rank_vec.reshape(rank_vec.shape[0])
ret[start_nodes] = 0
return ret.flatten()
else:
rank_vec[start_nodes] = 0
return rank_vec.flatten()
def nx_pagerank(network, node_list, enriched_nodes):
enriched_nodes = set(enriched_nodes)
matrix = nx.to_scipy_sparse_matrix(network, dtype=float, nodelist=node_list, format='csr')
enriched_indices = [i for i in range(len(node_list)) if node_list[i] in enriched_nodes]
normalized_matrix = stochastic_normalization(matrix)
pr = page_rank(normalized_matrix, enriched_indices, epsilon=1e-10, damping=0.99)
pr_dict = {}
for i in range(len(node_list)):
pr_dict[node_list[i]] = pr[i]
return pr, pr_dict
def shrink_by_pr(network, node_list, pr, percentage, enriched_symbols, interdependent_relations, naive_removal=False):
if percentage < 1:
new_node_list = []
for node_index, node in enumerate(node_list):
if node not in enriched_symbols:
new_node_list.append((node, pr[node_index]))
new_node_list.sort(key=lambda x: x[1], reverse=True)
# threshold = new_node_list[int(percentage * len(new_node_list))]
belows = defaultdict(set)
for x in network:
for node in network.edge[x]:
belows[node].add(x)
for node, score in new_node_list[int(percentage * len(new_node_list)):]:
if node not in enriched_symbols:
if naive_removal:
network.remove_node(node)
else:
remove_regular(network, node, belows, interdependent_relations)
def remove_regular(network, node, belows, interdependent_relations):
# below = [x for x in network if node in network.edge[x]]
# print set(below) == set(belows[node])
below = belows[node]
above = network.edge[node].keys()
relations = set([network.edge[x][node]['type'] for x in below])
relations.update([network.edge[node][x]['type'] for x in above])
if 'annotated_by' in relations:
relations.remove('annotated_by')
examples = [x for x in below if network.edge[x][node]['type'] == 'annotated_by']
for general_relation, specific_relation in interdependent_relations:
# this is to take care of compositums of relations, such as part_of and is_a, which compose into part_of.
# in that context, part of is more general, is_a is more specific.
relations.remove(general_relation)
relations.remove(specific_relation)
general_below = [x for x in below if network.edge[x][node]['type'] == general_relation]
general_above = [x for x in above if network.edge[x][node]['type'] == general_relation]
specific_below = [x for x in below if network.edge[x][node]['type'] == specific_relation]
specific_above = [x for x in above if network.edge[x][node]['type'] == specific_relation]
for upper in general_above:
for lower in general_below + specific_below:
network.add_edge(lower, upper, type=general_relation)
belows[upper].add(lower)
belows[upper].remove(node)
for upper in specific_above:
for lower in general_below:
network.add_edge(lower, upper, type=general_relation)
belows[upper].add(lower)
for lower in specific_below:
network.add_edge(lower, upper, type=specific_relation)
belows[upper].add(lower)
for example in examples:
network.add_edge(example, upper, type='annotated_by')
belows[upper].add(example)
belows[upper].remove(node)
for relation in relations:
r_below = [x for x in below if network.edge[x][node]['type'] == relation]
r_above = [x for x in above if network.edge[node][x]['type'] == relation]
for upper in r_above:
for lower in r_below:
network.add_edge(lower, upper, type=relation)
belows[upper].add(lower)
for example in examples:
network.add_edge(example, upper, type='annotated_by')
belows[upper].add(example)
belows[upper].remove(node)
network.remove_node(node)
def shrink_hyper_by_pr(network, node_list, pr, percentage, enriched_symbols):
if percentage < 1:
new_node_list = []
for node_index, node in enumerate(node_list):
if not node.startswith('r_') and not node.startswith('a_') and not network[node].values()[0]['type'] == 'predicate':
new_node_list.append((node, pr[node_index]))
new_node_list.sort(key=lambda x: x[1], reverse=True)
# threshold = new_node_list[int(percentage * len(new_node_list))]
for node, score in new_node_list[int(percentage * len(new_node_list)):]:
if node not in enriched_symbols:
remove_hyper(network, node)
# # network.remove_node(node)
# else:
# for node in enriched_symbols:
# if node in network:
# network.remove_node(node)
def remove_hyper(network, node):
relations = defaultdict(list)
annotations = []
to_delete = []
for edge in network.edge[node]:
if edge.startswith('r_'):
key = [y for y in network.edge[edge] if network.edge[edge][y]['type'] == 'predicate'].pop()
relations[key].append(edge)
elif edge.startswith('a_'):
to_delete.append(edge)
annotations.append([x for x in network.edge[edge] if network.edge[edge][x]['type'] == 'object'].pop())
for relation in relations:
subject_to = []
object_to = []
for edge in relations[relation]:
if network.edge[edge][node]['type'] == 'subject':
subject_to.append([x for x in network.edge[edge] if network.edge[edge][x]['type'] == 'object'].pop())
elif network.edge[edge][node]['type'] == 'object':
object_to.append([x for x in network.edge[edge] if network.edge[edge][x]['type'] == 'subject'].pop())
else:
raise Exception('This should not happen')
network.remove_node(edge)
for object in subject_to:
for subject in object_to:
relation_node = 'r_n_%s-%s' % (subject[-7:], object[-7:])
if relation_node in network:
assert object in network.edge[relation_node]
assert subject in network.edge[relation_node]
assert relation in network.edge[relation_node]
else:
network.add_node(relation_node)
network.add_edge(relation_node, object, type='object')
network.add_edge(relation_node, subject, type='subject')
network.add_edge(relation_node, relation, type='predicate')
for annotation in annotations:
annotation_node = 'a_n_%s-%s' % (annotation.split('#')[-1], object[-7:])
if annotation_node in network:
assert object in network.edge[annotation_node]
assert annotation in network.edge[annotation_node]
assert 'annotates' in network.edge[annotation_node]
else:
network.add_node(annotation_node)
network.add_edge(annotation_node, object, type='subject')
network.add_edge(annotation_node, annotation, type='object')
network.add_edge(annotation_node, 'annotates', type='predicate')
for x in to_delete:
network.remove_node(x)
network.remove_node(node)
def label_propagation_normalization(matrix):
matrix = matrix.tocsr()
try:
matrix.setdiag(0)
except TypeError:
matrix.setdiag(np.zeros(matrix.shape[0]))
d = matrix.sum(axis=1).getA1()
nzs = np.where(d > 0)
d[nzs] = 1 / np.sqrt(d[nzs])
dm = sp.diags(d, 0).tocsc()
return dm.dot(matrix).dot(dm)
def label_propagation(graph_matrix, class_matrix, alpha, epsilon=1e-12, max_steps=10000):
# This method assumes the label-propagation normalization and a symmetric matrix with no rank sinks.
steps = 0
diff = np.inf
current_labels = class_matrix
while diff > epsilon and steps < max_steps:
# print steps
# print diff
steps += 1
new_labels = alpha * graph_matrix.dot(current_labels) + (1 - alpha) * class_matrix
diff = np.linalg.norm(new_labels - current_labels) / np.linalg.norm(new_labels)
current_labels = new_labels
return current_labels
if __name__=='__main__':
while True:
g = nx.DiGraph()
g.add_edge(1, 2, type='is_a')
g.add_edge(2, 3, type='is_a')
g.add_edge(3, 4, type='is_a')
g.add_edge(5, 2, type='is_a')
g.add_edge(6, 3, type='is_a')
g.add_edge(7, 4, type='is_a')
g.add_edge(6, 7, type='is_a')
s = [0.9, 0.7, 0.6, 0.2, 0.8, 0.4, 0.5]
shrink_by_pr(g, [1,2,3,4,5,6,7], s, 0.55, set())
|
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
__author__ = 'Marko A. Rodriguez (http://markorodriguez.com)'
import datetime
import time
import json
import uuid
from mock import Mock
import six
from gremlin_python.statics import *
from gremlin_python.structure.graph import Vertex, Edge, Property, VertexProperty, Graph, Path
from gremlin_python.structure.io.graphsonV2d0 import GraphSONWriter, GraphSONReader, GraphSONUtil
import gremlin_python.structure.io.graphsonV2d0
from gremlin_python.process.traversal import P
from gremlin_python.process.strategies import SubgraphStrategy
from gremlin_python.process.graph_traversal import __
class TestGraphSONReader(object):
graphson_reader = GraphSONReader()
def test_number_input(self):
x = self.graphson_reader.readObject(json.dumps({
"@type": "g:Int32",
"@value": 31
}))
assert isinstance(x, int)
assert 31 == x
##
x = self.graphson_reader.readObject(json.dumps({
"@type": "g:Int64",
"@value": 31
}))
assert isinstance(x, long)
assert long(31) == x
##
x = self.graphson_reader.readObject(json.dumps({
"@type": "g:Float",
"@value": 31.3
}))
assert isinstance(x, float)
assert 31.3 == x
##
x = self.graphson_reader.readObject(json.dumps({
"@type": "g:Double",
"@value": 31.2
}))
assert isinstance(x, float)
assert 31.2 == x
def test_graph(self):
vertex = self.graphson_reader.readObject("""
{"@type":"g:Vertex", "@value":{"id":{"@type":"g:Int32","@value":1},"label":"person","outE":{"created":[{"id":{"@type":"g:Int32","@value":9},"inV":{"@type":"g:Int32","@value":3},"properties":{"weight":{"@type":"g:Double","@value":0.4}}}],"knows":[{"id":{"@type":"g:Int32","@value":7},"inV":{"@type":"g:Int32","@value":2},"properties":{"weight":{"@type":"g:Double","@value":0.5}}},{"id":{"@type":"g:Int32","@value":8},"inV":{"@type":"g:Int32","@value":4},"properties":{"weight":{"@type":"g:Double","@value":1.0}}}]},"properties":{"name":[{"id":{"@type":"g:Int64","@value":0},"value":"marko"}],"age":[{"id":{"@type":"g:Int64","@value":1},"value":{"@type":"g:Int32","@value":29}}]}}}""")
assert isinstance(vertex, Vertex)
assert "person" == vertex.label
assert 1 == vertex.id
assert isinstance(vertex.id, int)
assert vertex == Vertex(1)
##
vertex = self.graphson_reader.readObject("""
{"@type":"g:Vertex", "@value":{"id":{"@type":"g:Float","@value":45.23}}}""")
assert isinstance(vertex, Vertex)
assert 45.23 == vertex.id
assert isinstance(vertex.id, FloatType)
assert "vertex" == vertex.label
assert vertex == Vertex(45.23)
##
vertex_property = self.graphson_reader.readObject("""
{"@type":"g:VertexProperty", "@value":{"id":"anId","label":"aKey","value":true,"vertex":{"@type":"g:Int32","@value":9}}}""")
assert isinstance(vertex_property, VertexProperty)
assert "anId" == vertex_property.id
assert "aKey" == vertex_property.label
assert vertex_property.value
assert vertex_property.vertex == Vertex(9)
##
vertex_property = self.graphson_reader.readObject("""
{"@type":"g:VertexProperty", "@value":{"id":{"@type":"g:Int32","@value":1},"label":"name","value":"marko"}}""")
assert isinstance(vertex_property, VertexProperty)
assert 1 == vertex_property.id
assert "name" == vertex_property.label
assert "marko" == vertex_property.value
assert vertex_property.vertex is None
##
edge = self.graphson_reader.readObject("""
{"@type":"g:Edge", "@value":{"id":{"@type":"g:Int64","@value":17},"label":"knows","inV":"x","outV":"y","inVLabel":"xLab","properties":{"aKey":"aValue","bKey":true}}}""")
# print edge
assert isinstance(edge, Edge)
assert 17 == edge.id
assert "knows" == edge.label
assert edge.inV == Vertex("x", "xLabel")
assert edge.outV == Vertex("y", "vertex")
##
property = self.graphson_reader.readObject("""
{"@type":"g:Property", "@value":{"key":"aKey","value":{"@type":"g:Int64","@value":17},"element":{"@type":"g:Edge","@value":{"id":{"@type":"g:Int64","@value":122},"label":"knows","inV":"x","outV":"y","inVLabel":"xLab"}}}}""")
# print property
assert isinstance(property, Property)
assert "aKey" == property.key
assert 17 == property.value
assert Edge(122, Vertex("x"), "knows", Vertex("y")) == property.element
def test_path(self):
path = self.graphson_reader.readObject(
"""{"@type":"g:Path","@value":{"labels":[["a"],["b","c"],[]],"objects":[{"@type":"g:Vertex","@value":{"id":{"@type":"g:Int32","@value":1},"label":"person","properties":{"name":[{"@type":"g:VertexProperty","@value":{"id":{"@type":"g:Int64","@value":0},"value":"marko","label":"name"}}],"age":[{"@type":"g:VertexProperty","@value":{"id":{"@type":"g:Int64","@value":1},"value":{"@type":"g:Int32","@value":29},"label":"age"}}]}}},{"@type":"g:Vertex","@value":{"id":{"@type":"g:Int32","@value":3},"label":"software","properties":{"name":[{"@type":"g:VertexProperty","@value":{"id":{"@type":"g:Int64","@value":4},"value":"lop","label":"name"}}],"lang":[{"@type":"g:VertexProperty","@value":{"id":{"@type":"g:Int64","@value":5},"value":"java","label":"lang"}}]}}},"lop"]}}"""
)
assert isinstance(path, Path)
assert "path[v[1], v[3], lop]" == str(path)
assert Vertex(1) == path[0]
assert Vertex(1) == path["a"]
assert "lop" == path[2]
assert 3 == len(path)
def test_custom_mapping(self):
# extended mapping
class X(object):
pass
type_string = "test:Xtype"
override_string = "g:Int64"
serdes = Mock()
reader = GraphSONReader(deserializer_map={type_string: serdes})
assert type_string in reader.deserializers
# base dicts are not modified
assert type_string not in gremlin_python.structure.io.graphsonV2d0._deserializers
x = X()
o = reader.toObject({GraphSONUtil.TYPE_KEY: type_string, GraphSONUtil.VALUE_KEY: x})
serdes.objectify.assert_called_once_with(x, reader)
assert o is serdes.objectify()
# overridden mapping
type_string = "g:Int64"
serdes = Mock()
reader = GraphSONReader(deserializer_map={type_string: serdes, override_string: serdes})
assert gremlin_python.structure.io.graphsonV2d0._deserializers[type_string] is not reader.deserializers[type_string]
value = 3
o = reader.toObject({GraphSONUtil.TYPE_KEY: type_string, GraphSONUtil.VALUE_KEY: value})
serdes.objectify.assert_called_once_with(value, reader)
assert o is serdes.objectify()
def test_datetime(self):
expected = datetime.datetime(2016, 12, 14, 16, 14, 36, 295000)
pts = time.mktime((expected.year, expected.month, expected.day,
expected.hour, expected.minute, expected.second,
-1, -1, -1)) + expected.microsecond / 1e6
timestamp = int(round(pts * 1000))
dt = self.graphson_reader.readObject(json.dumps({"@type": "g:Date", "@value": timestamp}))
assert isinstance(dt, datetime.datetime)
# TINKERPOP-1848
assert dt == expected
def test_timestamp(self):
dt = self.graphson_reader.readObject(json.dumps({"@type": "g:Timestamp", "@value": 1481750076295}))
assert isinstance(dt, timestamp)
assert float(dt) == 1481750076.295
def test_uuid(self):
prop = self.graphson_reader.readObject(
json.dumps({'@type': 'g:UUID', '@value': "41d2e28a-20a4-4ab0-b379-d810dede3786"}))
assert isinstance(prop, uuid.UUID)
assert str(prop) == '41d2e28a-20a4-4ab0-b379-d810dede3786'
class TestGraphSONWriter(object):
graphson_writer = GraphSONWriter()
graphson_reader = GraphSONReader()
def test_number_output(self):
assert {"@type": "g:Int64", "@value": 2} == json.loads(self.graphson_writer.writeObject(long(2)))
assert {"@type": "g:Int32", "@value": 1} == json.loads(self.graphson_writer.writeObject(1))
assert {"@type": "g:Double", "@value": 3.2} == json.loads(self.graphson_writer.writeObject(3.2))
assert """true""" == self.graphson_writer.writeObject(True)
def test_numbers(self):
assert {"@type": "g:Int64", "@value": 2} == json.loads(self.graphson_writer.writeObject(long(2)))
assert {"@type": "g:Int32", "@value": 1} == json.loads(self.graphson_writer.writeObject(1))
assert {"@type": "g:Double", "@value": 3.2} == json.loads(self.graphson_writer.writeObject(3.2))
assert """true""" == self.graphson_writer.writeObject(True)
def test_P(self):
result = {'@type': 'g:P',
'@value': {
'predicate': 'and',
'value': [{
'@type': 'g:P',
'@value': {
'predicate': 'or',
'value': [{
'@type': 'g:P',
'@value': {'predicate': 'lt', 'value': 'b'}
},
{'@type': 'g:P', '@value': {'predicate': 'gt', 'value': 'c'}}
]
}
},
{'@type': 'g:P', '@value': {'predicate': 'neq', 'value': 'd'}}]}}
assert result == json.loads(
self.graphson_writer.writeObject(P.lt("b").or_(P.gt("c")).and_(P.neq("d"))))
result = {'@type': 'g:P', '@value': {'predicate':'within','value': [{"@type": "g:Int32", "@value": 1},{"@type": "g:Int32", "@value": 2}]}}
assert result == json.loads(self.graphson_writer.writeObject(P.within([1,2])))
def test_strategies(self):
# we have a proxy model for now given that we don't want to have to have g:XXX all registered on the Gremlin traversal machine (yet)
assert {"@type": "g:SubgraphStrategy", "@value": {}} == json.loads(
self.graphson_writer.writeObject(SubgraphStrategy))
assert {"@type": "g:SubgraphStrategy", "@value": {
"vertices": {"@type": "g:Bytecode", "@value": {"step": [["has", "name", "marko"]]}}}} == json.loads(
self.graphson_writer.writeObject(SubgraphStrategy(vertices=__.has("name", "marko"))))
def test_graph(self):
# TODO: this assert is not compatible with python 3 and now that we test with both 2 and 3 it fails
assert {"@type": "g:Vertex", "@value": {"id": {"@type": "g:Int64", "@value": 12}, "label": "person"}} == json.loads(self.graphson_writer.writeObject(Vertex(long(12), "person")))
assert {"@type": "g:Edge", "@value": {"id": {"@type": "g:Int32", "@value": 7},
"outV": {"@type": "g:Int32", "@value": 0},
"outVLabel": "person",
"label": "knows",
"inV": {"@type": "g:Int32", "@value": 1},
"inVLabel": "dog"}} == json.loads(
self.graphson_writer.writeObject(Edge(7, Vertex(0, "person"), "knows", Vertex(1, "dog"))))
assert {"@type": "g:VertexProperty", "@value": {"id": "blah", "label": "keyA", "value": True,
"vertex": "stephen"}} == json.loads(
self.graphson_writer.writeObject(VertexProperty("blah", "keyA", True, Vertex("stephen"))))
assert {"@type": "g:Property",
"@value": {"key": "name", "value": "marko", "element": {"@type": "g:VertexProperty",
"@value": {
"vertex": "vertexId",
"id": {"@type": "g:Int32", "@value": 1234},
"label": "aKey"}}}} == json.loads(
self.graphson_writer.writeObject(
Property("name", "marko", VertexProperty(1234, "aKey", 21345, Vertex("vertexId")))))
vertex = self.graphson_reader.readObject(self.graphson_writer.writeObject(Vertex(1, "person")))
assert 1 == vertex.id
assert "person" == vertex.label
edge = self.graphson_reader.readObject(
self.graphson_writer.writeObject(Edge(3, Vertex(1, "person"), "knows", Vertex(2, "dog"))))
assert "knows" == edge.label
assert 3 == edge.id
assert 1 == edge.outV.id
assert 2 == edge.inV.id
vertex_property = self.graphson_reader.readObject(
self.graphson_writer.writeObject(VertexProperty(1, "age", 32, Vertex(1))))
assert 1 == vertex_property.id
assert "age" == vertex_property.key
assert 32 == vertex_property.value
property = self.graphson_reader.readObject(self.graphson_writer.writeObject(Property("age", 32.2, Edge(1,Vertex(2),"knows",Vertex(3)))))
assert "age" == property.key
assert 32.2 == property.value
def test_custom_mapping(self):
# extended mapping
class X(object):
pass
serdes = Mock()
writer = GraphSONWriter(serializer_map={X: serdes})
assert X in writer.serializers
# base dicts are not modified
assert X not in gremlin_python.structure.io.graphsonV2d0._serializers
obj = X()
d = writer.toDict(obj)
serdes.dictify.assert_called_once_with(obj, writer)
assert d is serdes.dictify()
# overridden mapping
serdes = Mock()
writer = GraphSONWriter(serializer_map={int: serdes})
assert gremlin_python.structure.io.graphsonV2d0._serializers[int] is not writer.serializers[int]
value = 3
d = writer.toDict(value)
serdes.dictify.assert_called_once_with(value, writer)
assert d is serdes.dictify()
def test_write_long(self):
mapping = self.graphson_writer.toDict(1)
assert mapping['@type'] == 'g:Int32'
assert mapping['@value'] == 1
mapping = self.graphson_writer.toDict(long(1))
assert mapping['@type'] == 'g:Int64'
assert mapping['@value'] == 1
def test_datetime(self):
expected = json.dumps({"@type": "g:Date", "@value": 1481750076295}, separators=(',', ':'))
dt = datetime.datetime.fromtimestamp(1481750076295 / 1000.0)
output = self.graphson_writer.writeObject(dt)
assert expected == output
def test_timestamp(self):
expected = json.dumps({"@type": "g:Timestamp", "@value": 1481750076295}, separators=(',', ':'))
ts = timestamp(1481750076295 / 1000.0)
output = self.graphson_writer.writeObject(ts)
assert expected == output
def test_uuid(self):
expected = json.dumps({'@type': 'g:UUID', '@value': "41d2e28a-20a4-4ab0-b379-d810dede3786"}, separators=(',', ':'))
prop = uuid.UUID("41d2e28a-20a4-4ab0-b379-d810dede3786")
output = self.graphson_writer.writeObject(prop)
assert expected == output
class TestFunctionalGraphSONIO(object):
"""Functional IO tests"""
def test_timestamp(self, remote_connection_v2):
g = Graph().traversal().withRemote(remote_connection_v2)
ts = timestamp(1481750076295 / 1000)
resp = g.addV('test_vertex').property('ts', ts)
resp = resp.toList()
vid = resp[0].id
try:
ts_prop = g.V(vid).properties('ts').toList()[0]
assert isinstance(ts_prop.value, timestamp)
assert ts_prop.value == ts
finally:
g.V(vid).drop().iterate()
def test_datetime(self, remote_connection_v2):
g = Graph().traversal().withRemote(remote_connection_v2)
dt = datetime.datetime.fromtimestamp(1481750076295 / 1000)
resp = g.addV('test_vertex').property('dt', dt).toList()
vid = resp[0].id
try:
dt_prop = g.V(vid).properties('dt').toList()[0]
assert isinstance(dt_prop.value, datetime.datetime)
assert dt_prop.value == dt
finally:
g.V(vid).drop().iterate()
def test_uuid(self, remote_connection_v2):
g = Graph().traversal().withRemote(remote_connection_v2)
uid = uuid.UUID("41d2e28a-20a4-4ab0-b379-d810dede3786")
resp = g.addV('test_vertex').property('uuid', uid).toList()
vid = resp[0].id
try:
uid_prop = g.V(vid).properties('uuid').toList()[0]
assert isinstance(uid_prop.value, uuid.UUID)
assert uid_prop.value == uid
finally:
g.V(vid).drop().iterate()
|
|
# Copyright 2011 University of Southern California
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import webob
from nova.api.openstack.compute.contrib import flavorextraspecs \
as flavorextraspecs_v2
from nova.api.openstack.compute.plugins.v3 import flavors_extraspecs \
as flavorextraspecs_v21
import nova.db
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.objects import test_flavor
def return_create_flavor_extra_specs(context, flavor_id, extra_specs):
return stub_flavor_extra_specs()
def return_flavor_extra_specs(context, flavor_id):
return stub_flavor_extra_specs()
def return_flavor_extra_specs_item(context, flavor_id, key):
return {key: stub_flavor_extra_specs()[key]}
def return_empty_flavor_extra_specs(context, flavor_id):
return {}
def delete_flavor_extra_specs(context, flavor_id, key):
pass
def stub_flavor_extra_specs():
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
return specs
class FlavorsExtraSpecsTestV21(test.TestCase):
bad_request = exception.ValidationError
flavorextraspecs = flavorextraspecs_v21
def _get_request(self, url, use_admin_context=False):
req_url = '/flavors/' + url
return fakes.HTTPRequestV3.blank(req_url,
use_admin_context=use_admin_context)
def setUp(self):
super(FlavorsExtraSpecsTestV21, self).setUp()
fakes.stub_out_key_pair_funcs(self.stubs)
self.controller = self.flavorextraspecs.FlavorExtraSpecsController()
def test_index(self):
flavor = dict(test_flavor.fake_flavor,
extra_specs={'key1': 'value1'})
req = self._get_request('1/os-extra_specs')
with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
mock_get.return_value = flavor
res_dict = self.controller.index(req, 1)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
def test_index_no_data(self):
self.stubs.Set(nova.db, 'flavor_extra_specs_get',
return_empty_flavor_extra_specs)
req = self._get_request('1/os-extra_specs')
res_dict = self.controller.index(req, 1)
self.assertEqual(0, len(res_dict['extra_specs']))
def test_show(self):
flavor = dict(test_flavor.fake_flavor,
extra_specs={'key5': 'value5'})
req = self._get_request('1/os-extra_specs/key5')
with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
mock_get.return_value = flavor
res_dict = self.controller.show(req, 1, 'key5')
self.assertEqual('value5', res_dict['key5'])
def test_show_spec_not_found(self):
self.stubs.Set(nova.db, 'flavor_extra_specs_get',
return_empty_flavor_extra_specs)
req = self._get_request('1/os-extra_specs/key6')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, 1, 'key6')
def test_not_found_because_flavor(self):
req = self._get_request('1/os-extra_specs/key5',
use_admin_context=True)
with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
mock_get.side_effect = exception.FlavorNotFound(flavor_id='1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, 1, 'key5')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, 1, 'key5', body={'key5': 'value5'})
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1, 'key5')
req = self._get_request('1/os-extra_specs', use_admin_context=True)
with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
mock_get.side_effect = exception.FlavorNotFound(flavor_id='1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, 1, body={'extra_specs': {'key5': 'value5'}})
def test_delete(self):
flavor = dict(test_flavor.fake_flavor,
extra_specs={'key5': 'value5'})
self.stubs.Set(nova.db, 'flavor_extra_specs_delete',
delete_flavor_extra_specs)
req = self._get_request('1/os-extra_specs/key5',
use_admin_context=True)
with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
mock_get.return_value = flavor
self.controller.delete(req, 1, 'key5')
def test_delete_no_admin(self):
self.stubs.Set(nova.db, 'flavor_extra_specs_delete',
delete_flavor_extra_specs)
req = self._get_request('1/os-extra_specs/key5')
self.assertRaises(exception.Forbidden, self.controller.delete,
req, 1, 'key 5')
def test_delete_spec_not_found(self):
req = self._get_request('1/os-extra_specs/key6',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1, 'key6')
def test_create(self):
self.stubs.Set(nova.db,
'flavor_extra_specs_update_or_create',
return_create_flavor_extra_specs)
body = {"extra_specs": {"key1": "value1", "key2": 0.5, "key3": 5}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
res_dict = self.controller.create(req, 1, body=body)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
self.assertEqual(0.5, res_dict['extra_specs']['key2'])
self.assertEqual(5, res_dict['extra_specs']['key3'])
def test_create_no_admin(self):
self.stubs.Set(nova.db,
'flavor_extra_specs_update_or_create',
return_create_flavor_extra_specs)
body = {"extra_specs": {"key1": "value1"}}
req = self._get_request('1/os-extra_specs')
self.assertRaises(exception.Forbidden, self.controller.create,
req, 1, body=body)
def test_create_flavor_not_found(self):
def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
raise exception.FlavorNotFound(flavor_id='')
self.stubs.Set(nova.db,
'flavor_extra_specs_update_or_create',
fake_instance_type_extra_specs_update_or_create)
body = {"extra_specs": {"key1": "value1"}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, 1, body=body)
def test_create_flavor_db_duplicate(self):
def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
raise exception.FlavorExtraSpecUpdateCreateFailed(id=1, retries=5)
self.stubs.Set(nova.db,
'flavor_extra_specs_update_or_create',
fake_instance_type_extra_specs_update_or_create)
body = {"extra_specs": {"key1": "value1"}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
self.assertRaises(webob.exc.HTTPConflict, self.controller.create,
req, 1, body=body)
def _test_create_bad_request(self, body):
self.stubs.Set(nova.db,
'flavor_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = self._get_request('1/os-extra_specs', use_admin_context=True)
self.assertRaises(self.bad_request, self.controller.create,
req, 1, body=body)
def test_create_empty_body(self):
self._test_create_bad_request('')
def test_create_non_dict_extra_specs(self):
self._test_create_bad_request({"extra_specs": "non_dict"})
def test_create_non_string_key(self):
self._test_create_bad_request({"extra_specs": {None: "value1"}})
def test_create_non_string_value(self):
self._test_create_bad_request({"extra_specs": {"key1": None}})
def test_create_zero_length_key(self):
self._test_create_bad_request({"extra_specs": {"": "value1"}})
def test_create_long_key(self):
key = "a" * 256
self._test_create_bad_request({"extra_specs": {key: "value1"}})
def test_create_long_value(self):
value = "a" * 256
self._test_create_bad_request({"extra_specs": {"key1": value}})
@mock.patch('nova.db.flavor_extra_specs_update_or_create')
def test_create_really_long_integer_value(self, mock_flavor_extra_specs):
value = 10 ** 1000
mock_flavor_extra_specs.side_effects = return_create_flavor_extra_specs
req = self._get_request('1/os-extra_specs', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, 1, body={"extra_specs": {"key1": value}})
@mock.patch('nova.db.flavor_extra_specs_update_or_create')
def test_create_invalid_specs_key(self, mock_flavor_extra_specs):
invalid_keys = ("key1/", "<key>", "$$akey$", "!akey", "")
mock_flavor_extra_specs.side_effects = return_create_flavor_extra_specs
for key in invalid_keys:
body = {"extra_specs": {key: "value1"}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
self.assertRaises(self.bad_request, self.controller.create,
req, 1, body=body)
@mock.patch('nova.db.flavor_extra_specs_update_or_create')
def test_create_valid_specs_key(self, mock_flavor_extra_specs):
valid_keys = ("key1", "month.price", "I_am-a Key", "finance:g2")
mock_flavor_extra_specs.side_effects = return_create_flavor_extra_specs
for key in valid_keys:
body = {"extra_specs": {key: "value1"}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
res_dict = self.controller.create(req, 1, body=body)
self.assertEqual('value1', res_dict['extra_specs'][key])
def test_update_item(self):
self.stubs.Set(nova.db,
'flavor_extra_specs_update_or_create',
return_create_flavor_extra_specs)
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
res_dict = self.controller.update(req, 1, 'key1', body=body)
self.assertEqual('value1', res_dict['key1'])
def test_update_item_no_admin(self):
self.stubs.Set(nova.db,
'flavor_extra_specs_update_or_create',
return_create_flavor_extra_specs)
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1')
self.assertRaises(exception.Forbidden, self.controller.update,
req, 1, 'key1', body=body)
def _test_update_item_bad_request(self, body):
self.stubs.Set(nova.db,
'flavor_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
self.assertRaises(self.bad_request, self.controller.update,
req, 1, 'key1', body=body)
def test_update_item_empty_body(self):
self._test_update_item_bad_request('')
def test_update_item_too_many_keys(self):
body = {"key1": "value1", "key2": "value2"}
self._test_update_item_bad_request(body)
def test_update_item_non_dict_extra_specs(self):
self._test_update_item_bad_request("non_dict")
def test_update_item_non_string_key(self):
self._test_update_item_bad_request({None: "value1"})
def test_update_item_non_string_value(self):
self._test_update_item_bad_request({"key1": None})
def test_update_item_zero_length_key(self):
self._test_update_item_bad_request({"": "value1"})
def test_update_item_long_key(self):
key = "a" * 256
self._test_update_item_bad_request({key: "value1"})
def test_update_item_long_value(self):
value = "a" * 256
self._test_update_item_bad_request({"key1": value})
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(nova.db,
'flavor_extra_specs_update_or_create',
return_create_flavor_extra_specs)
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/bad', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'bad', body=body)
def test_update_flavor_not_found(self):
def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
raise exception.FlavorNotFound(flavor_id='')
self.stubs.Set(nova.db,
'flavor_extra_specs_update_or_create',
fake_instance_type_extra_specs_update_or_create)
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, 1, 'key1', body=body)
def test_update_flavor_db_duplicate(self):
def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
raise exception.FlavorExtraSpecUpdateCreateFailed(id=1, retries=5)
self.stubs.Set(nova.db,
'flavor_extra_specs_update_or_create',
fake_instance_type_extra_specs_update_or_create)
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPConflict, self.controller.update,
req, 1, 'key1', body=body)
def test_update_really_long_integer_value(self):
value = 10 ** 1000
self.stubs.Set(nova.db,
'flavor_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'key1', body={"key1": value})
class FlavorsExtraSpecsTestV2(FlavorsExtraSpecsTestV21):
bad_request = webob.exc.HTTPBadRequest
flavorextraspecs = flavorextraspecs_v2
def _get_request(self, url, use_admin_context=False):
req_url = '/v2/fake/flavors/' + url
return fakes.HTTPRequest.blank(req_url,
use_admin_context=use_admin_context)
class FlavorsExtraSpecsXMLSerializerTest(test.TestCase):
def test_serializer(self):
serializer = flavorextraspecs_v2.ExtraSpecsTemplate()
expected = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<extra_specs><key1>value1</key1></extra_specs>')
text = serializer.serialize(dict(extra_specs={"key1": "value1"}))
self.assertEqual(text, expected)
def test_show_update_serializer(self):
serializer = flavorextraspecs_v2.ExtraSpecTemplate()
expected = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<extra_spec key="key1">value1</extra_spec>')
text = serializer.serialize(dict({"key1": "value1"}))
self.assertEqual(text, expected)
def test_serializer_with_colon_tagname(self):
# Our test object to serialize
obj = {'extra_specs': {'foo:bar': '999'}}
serializer = flavorextraspecs_v2.ExtraSpecsTemplate()
expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
'<extra_specs><foo:bar xmlns:foo="foo">999</foo:bar>'
'</extra_specs>'))
result = serializer.serialize(obj)
self.assertEqual(expected_xml, result)
|
|
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hashlib import md5
import unittest
import uuid
import random
import shutil
from collections import defaultdict
from test.probe.common import ECProbeTest, Body
from swift.common import direct_client
from swift.common.storage_policy import EC_POLICY
from swift.common.manager import Manager
from swift.obj import reconstructor
from swiftclient import client
class TestReconstructorRevert(ECProbeTest):
def setUp(self):
super(TestReconstructorRevert, self).setUp()
self.container_name = 'container-%s' % uuid.uuid4()
self.object_name = 'object-%s' % uuid.uuid4()
# sanity
self.assertEqual(self.policy.policy_type, EC_POLICY)
self.reconstructor = Manager(["object-reconstructor"])
def proxy_get(self):
# GET object
headers, body = client.get_object(self.url, self.token,
self.container_name,
self.object_name,
resp_chunk_size=64 * 2 ** 10)
resp_checksum = md5()
for chunk in body:
resp_checksum.update(chunk)
return resp_checksum.hexdigest()
def direct_get(self, node, part):
req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
headers, data = direct_client.direct_get_object(
node, part, self.account, self.container_name,
self.object_name, headers=req_headers,
resp_chunk_size=64 * 2 ** 20)
hasher = md5()
for chunk in data:
hasher.update(chunk)
return hasher.hexdigest()
def test_revert_object(self):
# create EC container
headers = {'X-Storage-Policy': self.policy.name}
client.put_container(self.url, self.token, self.container_name,
headers=headers)
# get our node lists
opart, onodes = self.object_ring.get_nodes(
self.account, self.container_name, self.object_name)
hnodes = self.object_ring.get_more_nodes(opart)
# kill 2 a parity count number of primary nodes so we can
# force data onto handoffs, we do that by renaming dev dirs
# to induce 507
p_dev1 = self.device_dir('object', onodes[0])
p_dev2 = self.device_dir('object', onodes[1])
self.kill_drive(p_dev1)
self.kill_drive(p_dev2)
# PUT object
contents = Body()
headers = {'x-object-meta-foo': 'meta-foo'}
headers_post = {'x-object-meta-bar': 'meta-bar'}
client.put_object(self.url, self.token, self.container_name,
self.object_name, contents=contents,
headers=headers)
client.post_object(self.url, self.token, self.container_name,
self.object_name, headers=headers_post)
del headers_post['X-Auth-Token'] # WTF, where did this come from?
# these primaries can't servce the data any more, we expect 507
# here and not 404 because we're using mount_check to kill nodes
for onode in (onodes[0], onodes[1]):
try:
self.direct_get(onode, opart)
except direct_client.DirectClientException as err:
self.assertEqual(err.http_status, 507)
else:
self.fail('Node data on %r was not fully destoryed!' %
(onode,))
# now take out another primary
p_dev3 = self.device_dir('object', onodes[2])
self.kill_drive(p_dev3)
# this node can't servce the data any more
try:
self.direct_get(onodes[2], opart)
except direct_client.DirectClientException as err:
self.assertEqual(err.http_status, 507)
else:
self.fail('Node data on %r was not fully destoryed!' %
(onode,))
# make sure we can still GET the object and its correct
# we're now pulling from handoffs and reconstructing
etag = self.proxy_get()
self.assertEqual(etag, contents.etag)
# rename the dev dirs so they don't 507 anymore
self.revive_drive(p_dev1)
self.revive_drive(p_dev2)
self.revive_drive(p_dev3)
# fire up reconstructor on handoff nodes only
for hnode in hnodes:
hnode_id = (hnode['port'] - 6000) / 10
self.reconstructor.once(number=hnode_id)
# first three primaries have data again
for onode in (onodes[0], onodes[2]):
self.direct_get(onode, opart)
# check meta
meta = client.head_object(self.url, self.token,
self.container_name,
self.object_name)
for key in headers_post:
self.assertTrue(key in meta)
self.assertEqual(meta[key], headers_post[key])
# handoffs are empty
for hnode in hnodes:
try:
self.direct_get(hnode, opart)
except direct_client.DirectClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail('Node data on %r was not fully destoryed!' %
(hnode,))
def test_delete_propogate(self):
# create EC container
headers = {'X-Storage-Policy': self.policy.name}
client.put_container(self.url, self.token, self.container_name,
headers=headers)
# get our node lists
opart, onodes = self.object_ring.get_nodes(
self.account, self.container_name, self.object_name)
hnodes = self.object_ring.get_more_nodes(opart)
p_dev2 = self.device_dir('object', onodes[1])
# PUT object
contents = Body()
client.put_object(self.url, self.token, self.container_name,
self.object_name, contents=contents)
# now lets shut one down
self.kill_drive(p_dev2)
# delete on the ones that are left
client.delete_object(self.url, self.token,
self.container_name,
self.object_name)
# spot check a node
try:
self.direct_get(onodes[0], opart)
except direct_client.DirectClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail('Node data on %r was not fully destoryed!' %
(onodes[0],))
# enable the first node again
self.revive_drive(p_dev2)
# propagate the delete...
# fire up reconstructor on handoff nodes only
for hnode in hnodes:
hnode_id = (hnode['port'] - 6000) / 10
self.reconstructor.once(number=hnode_id)
# check the first node to make sure its gone
try:
self.direct_get(onodes[1], opart)
except direct_client.DirectClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail('Node data on %r was not fully destoryed!' %
(onodes[0]))
# make sure proxy get can't find it
try:
self.proxy_get()
except Exception as err:
self.assertEqual(err.http_status, 404)
else:
self.fail('Node data on %r was not fully destoryed!' %
(onodes[0]))
def test_reconstruct_from_reverted_fragment_archive(self):
headers = {'X-Storage-Policy': self.policy.name}
client.put_container(self.url, self.token, self.container_name,
headers=headers)
# get our node lists
opart, onodes = self.object_ring.get_nodes(
self.account, self.container_name, self.object_name)
# find a primary server that only has one of it's devices in the
# primary node list
group_nodes_by_config = defaultdict(list)
for n in onodes:
group_nodes_by_config[self.config_number(n)].append(n)
for config_number, node_list in group_nodes_by_config.items():
if len(node_list) == 1:
break
else:
self.fail('ring balancing did not use all available nodes')
primary_node = node_list[0]
# ... and 507 it's device
primary_device = self.device_dir('object', primary_node)
self.kill_drive(primary_device)
# PUT object
contents = Body()
etag = client.put_object(self.url, self.token, self.container_name,
self.object_name, contents=contents)
self.assertEqual(contents.etag, etag)
# fix the primary device and sanity GET
self.revive_drive(primary_device)
self.assertEqual(etag, self.proxy_get())
# find a handoff holding the fragment
for hnode in self.object_ring.get_more_nodes(opart):
try:
reverted_fragment_etag = self.direct_get(hnode, opart)
except direct_client.DirectClientException as err:
if err.http_status != 404:
raise
else:
break
else:
self.fail('Unable to find handoff fragment!')
# we'll force the handoff device to revert instead of potentially
# racing with rebuild by deleting any other fragments that may be on
# the same server
handoff_fragment_etag = None
for node in onodes:
if self.is_local_to(node, hnode):
# we'll keep track of the etag of this fragment we're removing
# in case we need it later (queue forshadowing music)...
try:
handoff_fragment_etag = self.direct_get(node, opart)
except direct_client.DirectClientException as err:
if err.http_status != 404:
raise
# this just means our handoff device was on the same
# machine as the primary!
continue
# use the primary nodes device - not the hnode device
part_dir = self.storage_dir('object', node, part=opart)
shutil.rmtree(part_dir, True)
# revert from handoff device with reconstructor
self.reconstructor.once(number=self.config_number(hnode))
# verify fragment reverted to primary server
self.assertEqual(reverted_fragment_etag,
self.direct_get(primary_node, opart))
# now we'll remove some data on one of the primary node's partners
partner = random.choice(reconstructor._get_partners(
primary_node['index'], onodes))
try:
rebuilt_fragment_etag = self.direct_get(partner, opart)
except direct_client.DirectClientException as err:
if err.http_status != 404:
raise
# partner already had it's fragment removed
if (handoff_fragment_etag is not None and
self.is_local_to(hnode, partner)):
# oh, well that makes sense then...
rebuilt_fragment_etag = handoff_fragment_etag
else:
# I wonder what happened?
self.fail('Partner inexplicably missing fragment!')
part_dir = self.storage_dir('object', partner, part=opart)
shutil.rmtree(part_dir, True)
# sanity, it's gone
try:
self.direct_get(partner, opart)
except direct_client.DirectClientException as err:
if err.http_status != 404:
raise
else:
self.fail('successful GET of removed partner fragment archive!?')
# and force the primary node to do a rebuild
self.reconstructor.once(number=self.config_number(primary_node))
# and validate the partners rebuilt_fragment_etag
try:
self.assertEqual(rebuilt_fragment_etag,
self.direct_get(partner, opart))
except direct_client.DirectClientException as err:
if err.http_status != 404:
raise
else:
self.fail('Did not find rebuilt fragment on partner node')
if __name__ == "__main__":
unittest.main()
|
|
"""Copyright 2009 Chris Davis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from functools import reduce
import pytz
from six import text_type
from six.moves.urllib.parse import unquote_plus
from datetime import datetime
from django.conf import settings
from graphite.carbonlink import CarbonLink
from graphite.compat import HttpResponse, HttpResponseBadRequest
from graphite.errors import InputParameterError, handleInputParameterError
from graphite.logger import log
from graphite.render.attime import parseATTime
from graphite.storage import STORE, extractForwardHeaders
from graphite.user_util import getProfile
from graphite.util import epoch, json, pickle, msgpack
def index_json(request):
queryParams = request.GET.copy()
queryParams.update(request.POST)
try:
jsonp = queryParams.get('jsonp', False)
requestContext = {
'localOnly': int( queryParams.get('local', 0) ),
'forwardHeaders': extractForwardHeaders(request),
}
matches = STORE.get_index(requestContext)
except Exception:
log.exception()
return json_response_for(request, [], jsonp=jsonp, status=500)
return json_response_for(request, matches, jsonp=jsonp)
def queryParamAsInt(queryParams, name, default):
if name not in queryParams:
return default
try:
return int(queryParams[name])
except Exception as e:
raise InputParameterError('Invalid int value {value} for param {name}: {err}'.format(
value=repr(queryParams[name]),
name=name,
err=str(e)))
@handleInputParameterError
def find_view(request):
"View for finding metrics matching a given pattern"
queryParams = request.GET.copy()
queryParams.update(request.POST)
format = queryParams.get('format', 'treejson')
leaves_only = queryParamAsInt(queryParams, 'leavesOnly', 0)
local_only = queryParamAsInt(queryParams, 'local', 0)
wildcards = queryParamAsInt(queryParams, 'wildcards', 0)
tzinfo = pytz.timezone(settings.TIME_ZONE)
if 'tz' in queryParams:
try:
value = queryParams['tz']
tzinfo = pytz.timezone(value)
except pytz.UnknownTimeZoneError:
pass
except Exception as e:
raise InputParameterError(
'Invalid value {value} for param tz: {err}'
.format(value=repr(value), err=str(e)))
if 'now' in queryParams:
try:
value = queryParams['now']
now = parseATTime(value, tzinfo)
except Exception as e:
raise InputParameterError(
'Invalid value {value} for param now: {err}'
.format(value=repr(value), err=str(e)))
else:
now = datetime.now(tzinfo)
if 'from' in queryParams and str(queryParams['from']) != '-1':
try:
value = queryParams['from']
fromTime = int(epoch(parseATTime(value, tzinfo, now)))
except Exception as e:
raise InputParameterError(
'Invalid value {value} for param from: {err}'
.format(value=repr(value), err=str(e)))
else:
fromTime = -1
if 'until' in queryParams and str(queryParams['until']) != '-1':
try:
value = queryParams['until']
untilTime = int(epoch(parseATTime(value, tzinfo, now)))
except Exception as e:
raise InputParameterError(
'Invalid value {value} for param until: {err}'
.format(value=repr(value), err=str(e)))
else:
untilTime = -1
nodePosition = queryParamAsInt(queryParams, 'position', -1)
jsonp = queryParams.get('jsonp', False)
forward_headers = extractForwardHeaders(request)
if fromTime == -1:
fromTime = None
if untilTime == -1:
untilTime = None
automatic_variants = queryParamAsInt(queryParams, 'automatic_variants', 0)
try:
query = str(queryParams['query'])
except KeyError:
raise InputParameterError('Missing required parameter \'query\'')
if query == '':
raise InputParameterError('Required parameter \'query\' is empty')
if '.' in query:
base_path = query.rsplit('.', 1)[0] + '.'
else:
base_path = ''
if format == 'completer':
query = query.replace('..', '*.')
if not query.endswith('*'):
query += '*'
if automatic_variants:
query_parts = query.split('.')
for i,part in enumerate(query_parts):
if ',' in part and '{' not in part:
query_parts[i] = '{%s}' % part
query = '.'.join(query_parts)
try:
matches = list(STORE.find(
query, fromTime, untilTime,
local=local_only,
headers=forward_headers,
leaves_only=leaves_only,
))
except Exception:
log.exception()
raise
log.info('find_view query=%s local_only=%s matches=%d' % (query, local_only, len(matches)))
matches.sort(key=lambda node: node.name)
log.info("received remote find request: pattern=%s from=%s until=%s local_only=%s format=%s matches=%d" % (query, fromTime, untilTime, local_only, format, len(matches)))
if format == 'treejson':
profile = getProfile(request)
content = tree_json(matches, base_path, wildcards=profile.advancedUI or wildcards)
response = json_response_for(request, content, jsonp=jsonp)
elif format == 'nodelist':
content = nodes_by_position(matches, nodePosition)
response = json_response_for(request, content, jsonp=jsonp)
elif format == 'pickle':
content = pickle_nodes(matches)
response = HttpResponse(content, content_type='application/pickle')
elif format == 'msgpack':
content = msgpack_nodes(matches)
response = HttpResponse(content, content_type='application/x-msgpack')
elif format == 'json':
content = json_nodes(matches)
response = json_response_for(request, content, jsonp=jsonp)
elif format == 'completer':
results = []
for node in matches:
node_info = dict(path=node.path, name=node.name, is_leaf=str(int(node.is_leaf)))
if not node.is_leaf:
node_info['path'] += '.'
results.append(node_info)
if len(results) > 1 and wildcards:
wildcardNode = {'name' : '*'}
results.append(wildcardNode)
response = json_response_for(request, { 'metrics' : results }, jsonp=jsonp)
else:
return HttpResponseBadRequest(
content="Invalid value for 'format' parameter",
content_type='text/plain')
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
def expand_view(request):
"View for expanding a pattern into matching metric paths"
queryParams = request.GET.copy()
queryParams.update(request.POST)
local_only = int( queryParams.get('local', 0) )
group_by_expr = int( queryParams.get('groupByExpr', 0) )
leaves_only = int( queryParams.get('leavesOnly', 0) )
jsonp = queryParams.get('jsonp', False)
forward_headers = extractForwardHeaders(request)
results = {}
for query in queryParams.getlist('query'):
results[query] = set()
for node in STORE.find(query, local=local_only, headers=forward_headers):
if node.is_leaf or not leaves_only:
results[query].add( node.path )
# Convert our results to sorted lists because sets aren't json-friendly
if group_by_expr:
for query, matches in results.items():
results[query] = sorted(matches)
else:
results = sorted( reduce(set.union, results.values(), set()) )
result = {
'results' : results
}
response = json_response_for(request, result, jsonp=jsonp)
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
def get_metadata_view(request):
queryParams = request.GET.copy()
queryParams.update(request.POST)
key = queryParams.get('key')
metrics = queryParams.getlist('metric')
jsonp = queryParams.get('jsonp', False)
results = {}
for metric in metrics:
try:
results[metric] = CarbonLink.get_metadata(metric, key)
except Exception:
log.exception()
results[metric] = dict(error="Unexpected error occurred in CarbonLink.get_metadata(%s, %s)" % (metric, key))
return json_response_for(request, results, jsonp=jsonp)
def set_metadata_view(request):
results = {}
if request.method == 'GET':
metric = request.GET['metric']
key = request.GET['key']
value = request.GET['value']
try:
results[metric] = CarbonLink.set_metadata(metric, key, value)
except Exception:
log.exception()
results[metric] = dict(error="Unexpected error occurred in CarbonLink.set_metadata(%s, %s)" % (metric, key))
elif request.method == 'POST':
if request.META.get('CONTENT_TYPE') == 'application/json':
operations = json.loads( request.body )
else:
operations = json.loads( request.POST['operations'] )
for op in operations:
metric = None
try:
metric, key, value = op['metric'], op['key'], op['value']
results[metric] = CarbonLink.set_metadata(metric, key, value)
except Exception:
log.exception()
if metric:
results[metric] = dict(error="Unexpected error occurred in bulk CarbonLink.set_metadata(%s)" % metric)
else:
results = dict(error='Invalid request method')
return json_response_for(request, results)
def tree_json(nodes, base_path, wildcards=False):
results = []
branchNode = {
'allowChildren': 1,
'expandable': 1,
'leaf': 0,
}
leafNode = {
'allowChildren': 0,
'expandable': 0,
'leaf': 1,
}
#Add a wildcard node if appropriate
if len(nodes) > 1 and wildcards:
wildcardNode = {'text' : '*', 'id' : base_path + '*'}
if any(not n.is_leaf for n in nodes):
wildcardNode.update(branchNode)
else:
wildcardNode.update(leafNode)
results.append(wildcardNode)
found = set()
results_leaf = []
results_branch = []
for node in nodes: #Now let's add the matching children
if node.name in found:
continue
found.add(node.name)
resultNode = {
'text' : unquote_plus(str(node.name)),
'id' : base_path + str(node.name),
}
if node.is_leaf:
resultNode.update(leafNode)
results_leaf.append(resultNode)
else:
resultNode.update(branchNode)
results_branch.append(resultNode)
results.extend(results_branch)
results.extend(results_leaf)
return results
def nodes_by_position(matches, position):
found = set()
for metric in matches:
nodes = metric.path.split('.')
found.add(nodes[position])
results = { 'nodes' : sorted(found) }
return results
def pickle_nodes(nodes):
nodes_info = []
for node in nodes:
info = dict(path=node.path, is_leaf=node.is_leaf)
if node.is_leaf:
info['intervals'] = node.intervals
nodes_info.append(info)
return pickle.dumps(nodes_info, protocol=-1)
def msgpack_nodes(nodes):
nodes_info = []
# make sure everything is unicode in python 2.x and 3.x
for node in nodes:
info = {
text_type('path'): text_type(node.path),
text_type('is_leaf'): node.is_leaf,
}
if node.is_leaf:
info[text_type('intervals')] = [interval.tuple for interval in node.intervals]
nodes_info.append(info)
return msgpack.dumps(nodes_info, use_bin_type=True)
def json_nodes(nodes):
nodes_info = []
for node in nodes:
info = dict(path=node.path, is_leaf=node.is_leaf)
if node.is_leaf:
info['intervals'] = [{'start': i.start, 'end': i.end} for i in node.intervals]
nodes_info.append(info)
return sorted(nodes_info, key=lambda item: item['path'])
def json_response_for(request, data, content_type='application/json', jsonp=False, **kwargs):
accept = request.META.get('HTTP_ACCEPT', 'application/json')
ensure_ascii = accept == 'application/json'
pretty = bool(request.POST.get('pretty', request.GET.get('pretty')))
content = json.dumps(data, ensure_ascii=ensure_ascii, indent=(2 if pretty else None))
if jsonp:
content = "%s(%s)" % (jsonp, content)
content_type = 'text/javascript'
if not ensure_ascii:
content_type += ';charset=utf-8'
return HttpResponse(content, content_type=content_type, **kwargs)
|
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright 2009 Red Hat, Inc -
# written by seth vidal skvidal at fedoraproject.org
import os
import sys
import fnmatch
import time
import yumbased
import shutil
from bz2 import BZ2File
from urlgrabber import grabber
import tempfile
import stat
import fcntl
import subprocess
from select import select
from yum import misc, Errors
from yum.repoMDObject import RepoMD, RepoData
from yum.sqlutils import executeSQL
from yum.packageSack import MetaSack
from yum.packages import YumAvailablePackage
import rpmUtils.transaction
from utils import _, errorprint, MDError, lzma, _available_compression
import readMetadata
try:
import sqlite3 as sqlite
except ImportError:
import sqlite
try:
import sqlitecachec
except ImportError:
pass
from utils import _gzipOpen, compressFile, compressOpen, checkAndMakeDir, GzipFile, \
checksum_and_rename, split_list_into_equal_chunks
from utils import num_cpus_online
import deltarpms
__version__ = '0.9.9'
class MetaDataConfig(object):
def __init__(self):
self.quiet = False
self.verbose = False
self.profile = False
self.excludes = []
self.baseurl = None
self.groupfile = None
self.sumtype = 'sha256'
self.pretty = False
self.cachedir = None
self.use_cache = False
self.basedir = os.getcwd()
self.checkts = False
self.split = False
self.update = False
self.deltas = False # do the deltarpm thing
# where to put the .drpms - defaults to 'drpms' inside 'repodata'
self.deltadir = None
self.delta_relative = 'drpms/'
self.oldpackage_paths = [] # where to look for the old packages -
self.deltafile = 'prestodelta.xml'
self.num_deltas = 1 # number of older versions to delta (max)
self.max_delta_rpm_size = 100000000
self.update_md_path = None
self.skip_stat = False
self.database = True
self.outputdir = None
self.file_patterns = ['.*bin\/.*', '^\/etc\/.*', '^\/usr\/lib\/sendmail$']
self.dir_patterns = ['.*bin\/.*', '^\/etc\/.*']
self.skip_symlinks = False
self.pkglist = []
self.database_only = False
self.primaryfile = 'primary.xml'
self.filelistsfile = 'filelists.xml'
self.otherfile = 'other.xml'
self.repomdfile = 'repomd.xml'
self.tempdir = '.repodata'
self.finaldir = 'repodata'
self.olddir = '.olddata'
self.mdtimestamp = 0
self.directory = None
self.directories = []
self.changelog_limit = None # needs to be an int or None
self.unique_md_filenames = True
self.additional_metadata = {} # dict of 'type':'filename'
self.revision = str(int(time.time()))
self.content_tags = [] # flat list of strings (like web 2.0 tags)
self.distro_tags = []# [(cpeid(None allowed), human-readable-string)]
self.repo_tags = []# strings, forwhatever they are worth
self.read_pkgs_list = None # filepath/name to write out list of pkgs
# read in this run of createrepo
self.collapse_glibc_requires = True
self.worker_cmd = '/usr/share/createrepo/worker.py'
#self.worker_cmd = './worker.py' # helpful when testing
self.retain_old_md = 0
self.compress_type = 'compat'
class SimpleMDCallBack(object):
def errorlog(self, thing):
print >> sys.stderr, thing
def log(self, thing):
print thing
def progress(self, item, current, total):
sys.stdout.write('\r' + ' ' * 80)
sys.stdout.write("\r%d/%d - %s" % (current, total, item))
sys.stdout.flush()
class MetaDataGenerator:
def __init__(self, config_obj=None, callback=None):
self.conf = config_obj
if config_obj == None:
self.conf = MetaDataConfig()
if not callback:
self.callback = SimpleMDCallBack()
else:
self.callback = callback
self.ts = rpmUtils.transaction.initReadOnlyTransaction()
self.pkgcount = 0
self.current_pkg = 0
self.files = []
self.rpmlib_reqs = {}
self.read_pkgs = []
self.compat_compress = False
if not self.conf.directory and not self.conf.directories:
raise MDError, "No directory given on which to run."
if self.conf.compress_type == 'compat':
self.compat_compress = True
self.conf.compress_type = None
if not self.conf.compress_type:
self.conf.compress_type = 'gz'
if self.conf.compress_type not in utils._available_compression:
raise MDError, "Compression %s not available: Please choose from: %s" \
% (self.conf.compress_type, ', '.join(utils._available_compression))
if not self.conf.directories: # just makes things easier later
self.conf.directories = [self.conf.directory]
if not self.conf.directory: # ensure we have both in the config object
self.conf.directory = self.conf.directories[0]
# the cachedir thing:
if self.conf.cachedir:
self.conf.use_cache = True
# this does the dir setup we need done
self._parse_directory()
self._test_setup_dirs()
def _parse_directory(self):
"""pick up the first directory given to us and make sure we know
where things should go"""
if os.path.isabs(self.conf.directory):
self.conf.basedir = os.path.dirname(self.conf.directory)
self.conf.relative_dir = os.path.basename(self.conf.directory)
else:
self.conf.basedir = os.path.realpath(self.conf.basedir)
self.conf.relative_dir = self.conf.directory
self.package_dir = os.path.join(self.conf.basedir,
self.conf.relative_dir)
if not self.conf.outputdir:
self.conf.outputdir = os.path.join(self.conf.basedir,
self.conf.relative_dir)
def _test_setup_dirs(self):
# start the sanity/stupidity checks
for mydir in self.conf.directories:
if os.path.isabs(mydir):
testdir = mydir
else:
if mydir.startswith('../'):
testdir = os.path.realpath(mydir)
else:
testdir = os.path.join(self.conf.basedir, mydir)
if not os.path.exists(testdir):
raise MDError, _('Directory %s must exist') % mydir
if not os.path.isdir(testdir):
raise MDError, _('%s must be a directory') % mydir
if not os.access(self.conf.outputdir, os.W_OK):
raise MDError, _('Directory %s must be writable.') % self.conf.outputdir
temp_output = os.path.join(self.conf.outputdir, self.conf.tempdir)
if not checkAndMakeDir(temp_output):
raise MDError, _('Cannot create/verify %s') % temp_output
temp_final = os.path.join(self.conf.outputdir, self.conf.finaldir)
if not checkAndMakeDir(temp_final):
raise MDError, _('Cannot create/verify %s') % temp_final
if self.conf.database:
# do flock test on temp_final, temp_output
# if it fails raise MDError
for direc in [temp_final, temp_output]:
f = open(direc + '/locktest', 'w')
try:
fcntl.flock(f.fileno(), fcntl.LOCK_EX)
except (OSError, IOError), e:
raise MDError, _("Could not create exclusive lock in %s and sqlite database generation enabled. Is this path on nfs? Is your lockd running?") % direc
else:
os.unlink(direc + '/locktest')
if self.conf.deltas:
temp_delta = os.path.join(self.conf.outputdir,
self.conf.delta_relative)
if not checkAndMakeDir(temp_delta):
raise MDError, _('Cannot create/verify %s') % temp_delta
self.conf.deltadir = temp_delta
if os.path.exists(os.path.join(self.conf.outputdir, self.conf.olddir)):
raise MDError, _('Old data directory exists, please remove: %s') % self.conf.olddir
# make sure we can write to where we want to write to:
# and pickup the mdtimestamps while we're at it
direcs = ['tempdir' , 'finaldir']
if self.conf.deltas:
direcs.append('deltadir')
for direc in direcs:
filepath = os.path.join(self.conf.outputdir, getattr(self.conf,
direc))
if os.path.exists(filepath):
if not os.access(filepath, os.W_OK):
raise MDError, _('error in must be able to write to metadata dir:\n -> %s') % filepath
if self.conf.checkts:
# checking for repodata/repomd.xml - not just the data dir
rxml = filepath + '/repomd.xml'
if os.path.exists(rxml):
timestamp = os.path.getctime(rxml)
if timestamp > self.conf.mdtimestamp:
self.conf.mdtimestamp = timestamp
if self.conf.groupfile:
a = self.conf.groupfile
if self.conf.split:
a = os.path.join(self.package_dir, self.conf.groupfile)
elif not os.path.isabs(a):
a = os.path.join(self.package_dir, self.conf.groupfile)
if not os.path.exists(a):
raise MDError, _('Error: groupfile %s cannot be found.' % a)
self.conf.groupfile = a
if self.conf.cachedir:
a = self.conf.cachedir
if not os.path.isabs(a):
a = os.path.join(self.conf.outputdir, a)
if not checkAndMakeDir(a):
raise MDError, _('Error: cannot open/write to cache dir %s' % a)
self.conf.cachedir = a
def _os_path_walk(self, top, func, arg):
"""Directory tree walk with callback function.
copy of os.path.walk, fixes the link/stating problem
"""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = os.path.join(top, name)
if os.path.isdir(name):
self._os_path_walk(name, func, arg)
def getFileList(self, directory, ext):
"""Return all files in path matching ext, store them in filelist,
recurse dirs. Returns a list object"""
extlen = len(ext)
def extension_visitor(filelist, dirname, names):
for fn in names:
fn = os.path.join(dirname, fn)
if os.path.isdir(fn):
continue
if self.conf.skip_symlinks and os.path.islink(fn):
continue
elif fn[-extlen:].lower() == '%s' % (ext):
filelist.append(fn[len(startdir):])
filelist = []
startdir = directory + '/'
self._os_path_walk(startdir, extension_visitor, filelist)
return filelist
def errorlog(self, thing):
"""subclass this if you want something different...."""
errorprint(thing)
def checkTimeStamps(self):
"""check the timestamp of our target dir. If it is not newer than
the repodata return False, else True"""
if self.conf.checkts and self.conf.mdtimestamp:
dn = os.path.join(self.conf.basedir, self.conf.directory)
files = self.getFileList(dn, '.rpm')
files = self.trimRpms(files)
for f in files:
fn = os.path.join(self.conf.basedir, self.conf.directory, f)
if not os.path.exists(fn):
self.callback.errorlog(_('cannot get to file: %s') % fn)
if os.path.getctime(fn) > self.conf.mdtimestamp:
return False
return True
return False
def trimRpms(self, files):
badrpms = []
for rpm_file in files:
for glob in self.conf.excludes:
if fnmatch.fnmatch(rpm_file, glob):
if rpm_file not in badrpms:
badrpms.append(rpm_file)
for rpm_file in badrpms:
if rpm_file in files:
files.remove(rpm_file)
return files
def _setup_old_metadata_lookup(self):
"""sets up the .oldData object for handling the --update call. Speeds
up generating updates for new metadata"""
#FIXME - this only actually works for single dirs. It will only
# function for the first dir passed to --split, not all of them
# this needs to be fixed by some magic in readMetadata.py
# using opts.pkgdirs as a list, I think.
if self.conf.update:
#build the paths
opts = {
'verbose' : self.conf.verbose,
'pkgdir' : os.path.normpath(self.package_dir)
}
if self.conf.skip_stat:
opts['do_stat'] = False
if self.conf.update_md_path:
norm_u_md_path = os.path.normpath(self.conf.update_md_path)
u_md_repodata_path = norm_u_md_path + '/repodata'
if not os.path.exists(u_md_repodata_path):
msg = _('Warning: could not open update_md_path: %s') % u_md_repodata_path
self.callback.errorlog(msg)
old_repo_path = os.path.normpath(norm_u_md_path)
else:
old_repo_path = self.conf.outputdir
#and scan the old repo
self.oldData = readMetadata.MetadataIndex(old_repo_path, opts)
def _setup_grabber(self):
if not hasattr(self, '_grabber'):
self._grabber = grabber.URLGrabber()
return self._grabber
grabber = property(fget = lambda self: self._setup_grabber())
def doPkgMetadata(self):
"""all the heavy lifting for the package metadata"""
if self.conf.update:
self._setup_old_metadata_lookup()
# rpms we're going to be dealing with
if self.conf.pkglist:
packages = self.conf.pkglist
else:
packages = self.getFileList(self.package_dir, '.rpm')
if not isinstance(packages, MetaSack):
packages = self.trimRpms(packages)
self.pkgcount = len(packages)
try:
self.openMetadataDocs()
self.writeMetadataDocs(packages)
self.closeMetadataDocs()
except (IOError, OSError), e:
raise MDError, _('Cannot access/write repodata files: %s') % e
def openMetadataDocs(self):
if self.conf.database_only:
self.setup_sqlite_dbs()
else:
self.primaryfile = self._setupPrimary()
self.flfile = self._setupFilelists()
self.otherfile = self._setupOther()
if self.conf.deltas:
self.deltafile = self._setupDelta()
def _setupPrimary(self):
# setup the primary metadata file
# FIXME - make this be conf.compress_type once y-m-p is fixed
fpz = self.conf.primaryfile + '.' + 'gz'
primaryfilepath = os.path.join(self.conf.outputdir, self.conf.tempdir,
fpz)
fo = compressOpen(primaryfilepath, 'w', 'gz')
fo.write('<?xml version="1.0" encoding="UTF-8"?>\n')
fo.write('<metadata xmlns="http://linux.duke.edu/metadata/common"' \
' xmlns:rpm="http://linux.duke.edu/metadata/rpm" packages="%s">' %
self.pkgcount)
return fo
def _setupFilelists(self):
# setup the filelist file
# FIXME - make this be conf.compress_type once y-m-p is fixed
fpz = self.conf.filelistsfile + '.' + 'gz'
filelistpath = os.path.join(self.conf.outputdir, self.conf.tempdir,
fpz)
fo = compressOpen(filelistpath, 'w', 'gz')
fo.write('<?xml version="1.0" encoding="UTF-8"?>\n')
fo.write('<filelists xmlns="http://linux.duke.edu/metadata/filelists"' \
' packages="%s">' % self.pkgcount)
return fo
def _setupOther(self):
# setup the other file
# FIXME - make this be conf.compress_type once y-m-p is fixed
fpz = self.conf.otherfile + '.' + 'gz'
otherfilepath = os.path.join(self.conf.outputdir, self.conf.tempdir,
fpz)
fo = compressOpen(otherfilepath, 'w', 'gz')
fo.write('<?xml version="1.0" encoding="UTF-8"?>\n')
fo.write('<otherdata xmlns="http://linux.duke.edu/metadata/other"' \
' packages="%s">' %
self.pkgcount)
return fo
def _setupDelta(self):
# setup the other file
fpz = self.conf.deltafile + '.' + self.conf.compress_type
deltafilepath = os.path.join(self.conf.outputdir, self.conf.tempdir,
fpz)
fo = compressOpen(deltafilepath, 'w', self.conf.compress_type)
fo.write('<?xml version="1.0" encoding="UTF-8"?>\n')
fo.write('<prestodelta>\n')
return fo
def read_in_package(self, rpmfile, pkgpath=None, reldir=None):
"""rpmfile == relative path to file from self.packge_dir"""
baseurl = self.conf.baseurl
if not pkgpath:
pkgpath = self.package_dir
if not rpmfile.strip():
raise MDError, "Blank filename passed in, skipping"
if rpmfile.find("://") != -1:
if not hasattr(self, 'tempdir'):
self.tempdir = tempfile.mkdtemp()
pkgname = os.path.basename(rpmfile)
baseurl = os.path.dirname(rpmfile)
reldir = self.tempdir
dest = os.path.join(self.tempdir, pkgname)
if not self.conf.quiet:
self.callback.log('\nDownloading %s' % rpmfile)
try:
rpmfile = self.grabber.urlgrab(rpmfile, dest)
except grabber.URLGrabError, e:
raise MDError, "Unable to retrieve remote package %s: %s" % (
rpmfile, e)
else:
rpmfile = '%s/%s' % (pkgpath, rpmfile)
external_data = { '_cachedir': self.conf.cachedir,
'_baseurl': baseurl,
'_reldir': reldir,
'_packagenumber': self.current_pkg,
'_collapse_libc_requires':self.conf.collapse_glibc_requires,
}
try:
po = yumbased.CreateRepoPackage(self.ts, rpmfile,
sumtype=self.conf.sumtype,
external_data = external_data)
except Errors.MiscError, e:
raise MDError, "Unable to open package: %s" % e
for r in po.requires_print:
if r.startswith('rpmlib('):
self.rpmlib_reqs[r] = 1
if po.checksum in (None, ""):
raise MDError, "No Package ID found for package %s, not going to" \
" add it" % po
return po
def writeMetadataDocs(self, pkglist=[], pkgpath=None):
if not pkglist:
pkglist = self.conf.pkglist
if not pkgpath:
directory = self.conf.directory
else:
directory = pkgpath
# for worker/forked model
# iterate the pkglist - see which ones are handled by --update and let them
# go on their merry way
newpkgs = []
keptpkgs = []
if self.conf.update:
# if we're in --update mode then only act on the new/changed pkgs
for pkg in pkglist:
self.current_pkg += 1
#see if we can pull the nodes from the old repo
#print self.oldData.basenodes.keys()
old_pkg = pkg
if pkg.find("://") != -1:
old_pkg = os.path.basename(pkg)
old_po = self.oldData.getNodes(old_pkg)
if old_po: # we have a match in the old metadata
if self.conf.verbose:
self.callback.log(_("Using data from old metadata for %s")
% pkg)
keptpkgs.append((pkg, old_po))
#FIXME - if we're in update and we have deltas enabled
# check the presto data for this pkg and write its info back out
# to our deltafile
continue
else:
newpkgs.append(pkg)
else:
newpkgs = pkglist
# setup our reldir
if not pkgpath:
reldir = os.path.join(self.conf.basedir, directory)
else:
reldir = pkgpath
# filter out those pkgs which are not files - but are pkgobjects
pkgfiles = []
for pkg in newpkgs:
po = None
if isinstance(pkg, YumAvailablePackage):
po = pkg
self.read_pkgs.append(po.localPkg())
# if we're dealing with remote pkgs - pitch it over to doing
# them one at a time, for now.
elif pkg.find('://') != -1:
po = self.read_in_package(pkg, pkgpath=pkgpath, reldir=reldir)
self.read_pkgs.append(pkg)
if po:
keptpkgs.append((pkg, po))
continue
pkgfiles.append(pkg)
keptpkgs.sort(reverse=True)
# keptkgs is a list of (filename, po), pkgfiles is a list if filenames.
# Need to write them in sorted(filename) order. We loop over pkgfiles,
# inserting keptpkgs in right spots (using the upto argument).
def save_keptpkgs(upto):
while keptpkgs and (upto is None or keptpkgs[-1][0] < upto):
filename, po = keptpkgs.pop()
# reset baseurl in the old pkg
po.basepath = self.conf.baseurl
self.primaryfile.write(po.xml_dump_primary_metadata())
self.flfile.write(po.xml_dump_filelists_metadata())
self.otherfile.write(po.xml_dump_other_metadata(
clog_limit=self.conf.changelog_limit))
if pkgfiles:
# divide that list by the number of workers and fork off that many
# workers to tmpdirs
# waitfor the workers to finish and as each one comes in
# open the files they created and write them out to our metadata
# add up the total pkg counts and return that value
self._worker_tmp_path = tempfile.mkdtemp() # setting this in the base object so we can clean it up later
if self.conf.workers < 1:
self.conf.workers = min(num_cpus_online(), len(pkgfiles))
pkgfiles.sort()
worker_chunks = split_list_into_equal_chunks(pkgfiles, self.conf.workers)
worker_cmd_dict = {}
worker_jobs = {}
base_worker_cmdline = [self.conf.worker_cmd,
'--pkgoptions=_reldir=%s' % reldir,
'--pkgoptions=_collapse_libc_requires=%s' % self.conf.collapse_glibc_requires,
'--pkgoptions=_cachedir=%s' % self.conf.cachedir,
'--pkgoptions=_baseurl=%s' % self.conf.baseurl,
'--globalopts=clog_limit=%s' % self.conf.changelog_limit,
'--globalopts=sumtype=%s' % self.conf.sumtype, ]
if self.conf.quiet:
base_worker_cmdline.append('--quiet')
if self.conf.verbose:
base_worker_cmdline.append('--verbose')
for worker_num in range(self.conf.workers):
pkl = self._worker_tmp_path + '/pkglist-%s' % worker_num
f = open(pkl, 'w')
f.write('\n'.join(worker_chunks[worker_num]))
f.close()
workercmdline = []
workercmdline.extend(base_worker_cmdline)
workercmdline.append('--pkglist=%s/pkglist-%s' % (self._worker_tmp_path, worker_num))
worker_cmd_dict[worker_num] = workercmdline
for (num, cmdline) in worker_cmd_dict.items():
if not self.conf.quiet:
self.callback.log("Spawning worker %s with %s pkgs" % (num,
len(worker_chunks[num])))
job = subprocess.Popen(cmdline, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
worker_jobs[num] = job
files = self.primaryfile, self.flfile, self.otherfile
def log_messages(num):
job = worker_jobs[num]
while True:
# check stdout and stderr
for stream in select((job.stdout, job.stderr), (), ())[0]:
line = stream.readline()
if line: break
else:
return # EOF, EOF
if stream is job.stdout:
if line.startswith('*** '):
# get data, save to local files
for out, size in zip(files, line[4:].split()):
out.write(stream.read(int(size)))
return
self.callback.log('Worker %s: %s' % (num, line.rstrip()))
else:
self.callback.errorlog('Worker %s: %s' % (num, line.rstrip()))
for i, pkg in enumerate(pkgfiles):
# insert cached packages
save_keptpkgs(pkg)
# save output to local files
log_messages(i % self.conf.workers)
for (num, job) in worker_jobs.items():
# process remaining messages on stderr
log_messages(num)
if job.wait() != 0:
msg = "Worker exited with non-zero value: %s. Fatal." % job.returncode
self.callback.errorlog(msg)
raise MDError, msg
if not self.conf.quiet:
self.callback.log("Workers Finished")
for pkgfile in pkgfiles:
if self.conf.deltas:
try:
po = self.read_in_package(pkgfile, pkgpath=pkgpath, reldir=reldir)
self._do_delta_rpm_package(po)
except MDError, e:
errorprint(e)
continue
self.read_pkgs.append(pkgfile)
save_keptpkgs(None) # append anything left
return self.current_pkg
def closeMetadataDocs(self):
# save them up to the tmp locations:
if not self.conf.quiet:
self.callback.log(_('Saving Primary metadata'))
if self.conf.database_only:
self.md_sqlite.pri_cx.close()
else:
self.primaryfile.write('\n</metadata>')
self.primaryfile.close()
if not self.conf.quiet:
self.callback.log(_('Saving file lists metadata'))
if self.conf.database_only:
self.md_sqlite.file_cx.close()
else:
self.flfile.write('\n</filelists>')
self.flfile.close()
if not self.conf.quiet:
self.callback.log(_('Saving other metadata'))
if self.conf.database_only:
self.md_sqlite.other_cx.close()
else:
self.otherfile.write('\n</otherdata>')
self.otherfile.close()
if self.conf.deltas:
deltam_st = time.time()
if not self.conf.quiet:
self.callback.log(_('Saving delta metadata'))
self.deltafile.write(self.generate_delta_xml())
self.deltafile.write('\n</prestodelta>')
self.deltafile.close()
if self.conf.profile:
self.callback.log('deltam time: %0.3f' % (time.time() - deltam_st))
def _do_delta_rpm_package(self, pkg):
"""makes the drpms, if possible, for this package object.
returns the presto/delta xml metadata as a string
"""
drpm_pkg_time = time.time()
# duck and cover if the pkg.size is > whatever
if int(pkg.size) > self.conf.max_delta_rpm_size:
if not self.conf.quiet:
self.callback.log("Skipping %s package " \
"that is > max_delta_rpm_size" % pkg)
return
# generate a list of all the potential 'old rpms'
opd = self._get_old_package_dict()
# for each of our old_package_paths -
# make a drpm from the newest of that pkg
# get list of potential candidates which are likely to match
for d in self.conf.oldpackage_paths:
pot_cand = []
if d not in opd:
continue
for fn in opd[d]:
if os.path.basename(fn).startswith(pkg.name):
pot_cand.append(fn)
candidates = []
for fn in pot_cand:
try:
thispo = yumbased.CreateRepoPackage(self.ts, fn,
sumtype=self.conf.sumtype)
except Errors.MiscError, e:
continue
if (thispo.name, thispo.arch) != (pkg.name, pkg.arch):
# not the same, doesn't matter
continue
if thispo == pkg: #exactly the same, doesn't matter
continue
if thispo.EVR >= pkg.EVR: # greater or equal, doesn't matter
continue
candidates.append(thispo)
candidates.sort()
candidates.reverse()
for delta_p in candidates[0:self.conf.num_deltas]:
#make drpm of pkg and delta_p
dt_st = time.time()
drpmfn = deltarpms.create_drpm(delta_p, pkg, self.conf.deltadir)
if not self.conf.quiet or self.conf.profile:
self.callback.log('created drpm from %s to %s: %s in %0.3f' % (
delta_p, pkg, drpmfn, (time.time() - dt_st)))
if self.conf.profile:
self.callback.log('total drpm time for %s: %0.3f' % (pkg,
(time.time() - drpm_pkg_time)))
def _get_old_package_dict(self):
if hasattr(self, '_old_package_dict'):
return self._old_package_dict
self._old_package_dict = {}
for d in self.conf.oldpackage_paths:
for f in self.getFileList(d, '.rpm'):
fp = d + '/' + f
fpstat = os.stat(fp)
if int(fpstat[stat.ST_SIZE]) > self.conf.max_delta_rpm_size:
self.callback.log("Skipping %s package " \
"that is > max_delta_rpm_size" % f)
continue
if not self._old_package_dict.has_key(d):
self._old_package_dict[d] = []
self._old_package_dict[d].append(d + '/' + f)
return self._old_package_dict
def generate_delta_xml(self):
"""take the delta rpm output dir, process all the drpm files
produce the text output for the presto/delta xml metadata"""
# go through the drpm dir
# for each file -store the drpm info in a dict based on its target. Just
# appending the output. for each of the keys in the dict, return
# the tag for the target + each of the drpm infos + closure for the target
# tag
targets = {}
results = []
for drpm_fn in self.getFileList(self.conf.deltadir, '.drpm'):
drpm_rel_fn = os.path.normpath(self.conf.delta_relative +
'/' + drpm_fn) # this is annoying
drpm_po = yumbased.CreateRepoPackage(self.ts,
self.conf.deltadir + '/' + drpm_fn, sumtype=self.conf.sumtype)
drpm = deltarpms.DeltaRPMPackage(drpm_po, self.conf.outputdir,
drpm_rel_fn)
if not targets.has_key(drpm_po.pkgtup):
targets[drpm_po.pkgtup] = []
targets[drpm_po.pkgtup].append(drpm.xml_dump_metadata())
for (n, a, e, v, r) in targets.keys():
results.append(""" <newpackage name="%s" epoch="%s" version="%s" release="%s" arch="%s">\n""" % (
n, e, v, r, a))
results.extend(targets[(n,a,e,v,r)])
# for src in targets[(n, a, e, v, r)]:
# results.append(src)
results.append(" </newpackage>\n")
return ' '.join(results)
def _createRepoDataObject(self, mdfile, mdtype, compress=True,
compress_type=None, attribs={}):
"""return random metadata as RepoData object to be added to RepoMD
mdfile = complete path to file
mdtype = the metadata type to use
compress = compress the file before including it
"""
# copy the file over here
sfile = os.path.basename(mdfile)
fo = open(mdfile, 'r')
outdir = os.path.join(self.conf.outputdir, self.conf.tempdir)
if not compress_type:
compress_type = self.conf.compress_type
if compress:
sfile = '%s.%s' % (sfile, compress_type)
outfn = os.path.join(outdir, sfile)
output = compressOpen(outfn, mode='wb', compress_type=compress_type)
else:
outfn = os.path.join(outdir, sfile)
output = open(outfn, 'w')
output.write(fo.read())
output.close()
fo.seek(0)
open_csum = misc.checksum(self.conf.sumtype, fo)
fo.close()
if self.conf.unique_md_filenames:
(csum, outfn) = checksum_and_rename(outfn, self.conf.sumtype)
sfile = os.path.basename(outfn)
else:
if compress:
csum = misc.checksum(self.conf.sumtype, outfn)
else:
csum = open_csum
thisdata = RepoData()
thisdata.type = mdtype
thisdata.location = (self.conf.baseurl, os.path.join(self.conf.finaldir, sfile))
thisdata.checksum = (self.conf.sumtype, csum)
if compress:
thisdata.openchecksum = (self.conf.sumtype, open_csum)
thisdata.size = str(os.stat(outfn).st_size)
thisdata.timestamp = str(int(os.stat(outfn).st_mtime))
for (k, v) in attribs.items():
setattr(thisdata, k, str(v))
return thisdata
def doRepoMetadata(self):
"""wrapper to generate the repomd.xml file that stores the info
on the other files"""
repomd = RepoMD('repoid')
repomd.revision = self.conf.revision
repopath = os.path.join(self.conf.outputdir, self.conf.tempdir)
repofilepath = os.path.join(repopath, self.conf.repomdfile)
if self.conf.content_tags:
repomd.tags['content'] = self.conf.content_tags
if self.conf.distro_tags:
repomd.tags['distro'] = self.conf.distro_tags
# NOTE - test out the cpeid silliness here
if self.conf.repo_tags:
repomd.tags['repo'] = self.conf.repo_tags
sumtype = self.conf.sumtype
workfiles = [(self.conf.otherfile, 'other',),
(self.conf.filelistsfile, 'filelists'),
(self.conf.primaryfile, 'primary')]
if self.conf.deltas:
workfiles.append((self.conf.deltafile, 'prestodelta'))
if self.conf.database:
if not self.conf.quiet: self.callback.log('Generating sqlite DBs')
try:
dbversion = str(sqlitecachec.DBVERSION)
except AttributeError:
dbversion = '9'
#FIXME - in theory some sort of try/except here
rp = sqlitecachec.RepodataParserSqlite(repopath, repomd.repoid, None)
for (rpm_file, ftype) in workfiles:
# when we fix y-m-p and non-gzipped xml files - then we can make this just add
# self.conf.compress_type
if ftype in ('other', 'filelists', 'primary'):
rpm_file = rpm_file + '.' + 'gz'
elif rpm_file.find('.') != -1 and rpm_file.split('.')[-1] not in _available_compression:
rpm_file = rpm_file + '.' + self.conf.compress_type
complete_path = os.path.join(repopath, rpm_file)
zfo = compressOpen(complete_path)
# This is misc.checksum() done locally so we can get the size too.
data = misc.Checksums([sumtype])
while data.read(zfo, 2**16):
pass
uncsum = data.hexdigest(sumtype)
unsize = len(data)
zfo.close()
csum = misc.checksum(sumtype, complete_path)
timestamp = os.stat(complete_path)[8]
db_csums = {}
db_compressed_sums = {}
if self.conf.database:
if ftype in ['primary', 'filelists', 'other']:
if self.conf.verbose:
self.callback.log("Starting %s db creation: %s" % (ftype,
time.ctime()))
if ftype == 'primary':
#FIXME - in theory some sort of try/except here
# TypeError appears to be raised, sometimes :(
rp.getPrimary(complete_path, csum)
elif ftype == 'filelists':
#FIXME and here
rp.getFilelists(complete_path, csum)
elif ftype == 'other':
#FIXME and here
rp.getOtherdata(complete_path, csum)
if ftype in ['primary', 'filelists', 'other']:
tmp_result_name = '%s.xml.gz.sqlite' % ftype
tmp_result_path = os.path.join(repopath, tmp_result_name)
good_name = '%s.sqlite' % ftype
resultpath = os.path.join(repopath, good_name)
# compat compression for rhel5 compatibility from fedora :(
compress_type = self.conf.compress_type
if self.compat_compress:
compress_type = 'bz2'
# rename from silly name to not silly name
os.rename(tmp_result_path, resultpath)
compressed_name = '%s.%s' % (good_name, compress_type)
result_compressed = os.path.join(repopath, compressed_name)
db_csums[ftype] = misc.checksum(sumtype, resultpath)
# compress the files
compressFile(resultpath, result_compressed, compress_type)
# csum the compressed file
db_compressed_sums[ftype] = misc.checksum(sumtype,
result_compressed)
# timestamp+size the uncompressed file
un_stat = os.stat(resultpath)
# remove the uncompressed file
os.unlink(resultpath)
if self.conf.unique_md_filenames:
csum_compressed_name = '%s-%s.%s' % (
db_compressed_sums[ftype], good_name, compress_type)
csum_result_compressed = os.path.join(repopath,
csum_compressed_name)
os.rename(result_compressed, csum_result_compressed)
result_compressed = csum_result_compressed
compressed_name = csum_compressed_name
# timestamp+size the compressed file
db_stat = os.stat(result_compressed)
# add this data as a section to the repomdxml
db_data_type = '%s_db' % ftype
data = RepoData()
data.type = db_data_type
data.location = (self.conf.baseurl,
os.path.join(self.conf.finaldir, compressed_name))
data.checksum = (sumtype, db_compressed_sums[ftype])
data.timestamp = str(int(db_stat.st_mtime))
data.size = str(db_stat.st_size)
data.opensize = str(un_stat.st_size)
data.openchecksum = (sumtype, db_csums[ftype])
data.dbversion = dbversion
if self.conf.verbose:
self.callback.log("Ending %s db creation: %s" % (ftype,
time.ctime()))
repomd.repoData[data.type] = data
data = RepoData()
data.type = ftype
data.checksum = (sumtype, csum)
data.timestamp = str(timestamp)
data.size = str(os.stat(os.path.join(repopath, rpm_file)).st_size)
data.opensize = str(unsize)
data.openchecksum = (sumtype, uncsum)
if self.conf.unique_md_filenames:
if ftype in ('primary', 'filelists', 'other'):
compress = 'gz'
else:
compress = self.conf.compress_type
main_name = '.'.join(rpm_file.split('.')[:-1])
res_file = '%s-%s.%s' % (csum, main_name, compress)
orig_file = os.path.join(repopath, rpm_file)
dest_file = os.path.join(repopath, res_file)
os.rename(orig_file, dest_file)
else:
res_file = rpm_file
rpm_file = res_file
href = os.path.join(self.conf.finaldir, rpm_file)
data.location = (self.conf.baseurl, href)
repomd.repoData[data.type] = data
if not self.conf.quiet and self.conf.database:
self.callback.log('Sqlite DBs complete')
if self.conf.groupfile is not None:
mdcontent = self._createRepoDataObject(self.conf.groupfile, 'group_gz')
repomd.repoData[mdcontent.type] = mdcontent
mdcontent = self._createRepoDataObject(self.conf.groupfile, 'group',
compress=False)
repomd.repoData[mdcontent.type] = mdcontent
if self.conf.additional_metadata:
for md_type, md_file in self.conf.additional_metadata.items():
mdcontent = self._createRepoDataObject(md_file, md_type)
repomd.repoData[mdcontent.type] = mdcontent
# FIXME - disabled until we decide how best to use this
#if self.rpmlib_reqs:
# rpmlib = reporoot.newChild(rpmns, 'lib', None)
# for r in self.rpmlib_reqs.keys():
# req = rpmlib.newChild(rpmns, 'requires', r)
# save it down
try:
fo = open(repofilepath, 'w')
fo.write(repomd.dump_xml())
fo.close()
except (IOError, OSError, TypeError), e:
self.callback.errorlog(
_('Error saving temp file for repomd.xml: %s') % repofilepath)
self.callback.errorlog('Error was: %s') % str(e)
fo.close()
raise MDError, 'Could not save temp file: %s' % repofilepath
def doFinalMove(self):
"""move the just-created repodata from .repodata to repodata
also make sure to preserve any files we didn't mess with in the
metadata dir"""
output_final_dir = os.path.join(self.conf.outputdir, self.conf.finaldir)
output_old_dir = os.path.join(self.conf.outputdir, self.conf.olddir)
if os.path.exists(output_final_dir):
try:
os.rename(output_final_dir, output_old_dir)
except:
raise MDError, _('Error moving final %s to old dir %s' % (
output_final_dir, output_old_dir))
output_temp_dir = os.path.join(self.conf.outputdir, self.conf.tempdir)
try:
os.rename(output_temp_dir, output_final_dir)
except:
# put the old stuff back
os.rename(output_old_dir, output_final_dir)
raise MDError, _('Error moving final metadata into place')
for f in ['primaryfile', 'filelistsfile', 'otherfile', 'repomdfile',
'groupfile']:
if getattr(self.conf, f):
fn = os.path.basename(getattr(self.conf, f))
else:
continue
oldfile = os.path.join(output_old_dir, fn)
if os.path.exists(oldfile):
try:
os.remove(oldfile)
except OSError, e:
raise MDError, _(
'Could not remove old metadata file: %s: %s') % (oldfile, e)
old_to_remove = []
old_pr = []
old_fl = []
old_ot = []
old_pr_db = []
old_fl_db = []
old_ot_db = []
for f in os.listdir(output_old_dir):
oldfile = os.path.join(output_old_dir, f)
finalfile = os.path.join(output_final_dir, f)
for (end,lst) in (('-primary.sqlite', old_pr_db), ('-primary.xml', old_pr),
('-filelists.sqlite', old_fl_db), ('-filelists.xml', old_fl),
('-other.sqlite', old_ot_db), ('-other.xml', old_ot)):
fn = '.'.join(f.split('.')[:-1])
if fn.endswith(end):
lst.append(oldfile)
break
# make a list of the old metadata files we don't want to remove.
for lst in (old_pr, old_fl, old_ot, old_pr_db, old_fl_db, old_ot_db):
sortlst = sorted(lst, key=lambda x: os.path.getmtime(x),
reverse=True)
for thisf in sortlst[self.conf.retain_old_md:]:
old_to_remove.append(thisf)
for f in os.listdir(output_old_dir):
oldfile = os.path.join(output_old_dir, f)
finalfile = os.path.join(output_final_dir, f)
fn = '.'.join(f.split('.')[:-1])
if fn in ('filelists.sqlite', 'other.sqlite',
'primary.sqlite') or oldfile in old_to_remove:
try:
os.remove(oldfile)
except (OSError, IOError), e:
raise MDError, _(
'Could not remove old metadata file: %s: %s') % (oldfile, e)
continue
if os.path.exists(finalfile):
# Hmph? Just leave it alone, then.
try:
if os.path.isdir(oldfile):
shutil.rmtree(oldfile)
else:
os.remove(oldfile)
except OSError, e:
raise MDError, _(
'Could not remove old metadata file: %s: %s') % (oldfile, e)
else:
try:
os.rename(oldfile, finalfile)
except OSError, e:
msg = _('Could not restore old non-metadata file: %s -> %s') % (oldfile, finalfile)
msg += _('Error was %s') % e
raise MDError, msg
self._cleanup_tmp_repodata_dir()
self._cleanup_update_tmp_dir()
self._write_out_read_pkgs_list()
def _cleanup_update_tmp_dir(self):
if not self.conf.update:
return
shutil.rmtree(self.oldData._repo.basecachedir, ignore_errors=True)
shutil.rmtree(self.oldData._repo.base_persistdir, ignore_errors=True)
def _write_out_read_pkgs_list(self):
# write out the read_pkgs_list file with self.read_pkgs
if self.conf.read_pkgs_list:
try:
fo = open(self.conf.read_pkgs_list, 'w')
fo.write('\n'.join(self.read_pkgs))
fo.flush()
fo.close()
except (OSError, IOError), e:
self.errorlog(_('Could not write out readpkgs list: %s')
% self.conf.read_pkgs_list)
self.errorlog(_('Error was %s') % e)
def _cleanup_tmp_repodata_dir(self):
output_old_dir = os.path.join(self.conf.outputdir, self.conf.olddir)
output_temp_dir = os.path.join(self.conf.outputdir, self.conf.tempdir)
for dirbase in (self.conf.olddir, self.conf.tempdir):
dirpath = os.path.join(self.conf.outputdir, dirbase)
if os.path.exists(dirpath):
try:
os.rmdir(dirpath)
except OSError, e:
self.errorlog(_('Could not remove temp metadata dir: %s')
% dirbase)
self.errorlog(_('Error was %s') % e)
self.errorlog(_('Please clean up this directory manually.'))
# our worker tmp path
if hasattr(self, '_worker_tmp_path') and os.path.exists(self._worker_tmp_path):
shutil.rmtree(self._worker_tmp_path, ignore_errors=True)
def setup_sqlite_dbs(self, initdb=True):
"""sets up the sqlite dbs w/table schemas and db_infos"""
destdir = os.path.join(self.conf.outputdir, self.conf.tempdir)
try:
self.md_sqlite = MetaDataSqlite(destdir)
except sqlite.OperationalError, e:
raise MDError, _('Cannot create sqlite databases: %s.\n'\
'Maybe you need to clean up a .repodata dir?') % e
class SplitMetaDataGenerator(MetaDataGenerator):
"""takes a series of dirs and creates repodata for all of them
most commonly used with -u media:// - if no outputdir is specified
it will create the repodata in the first dir in the list of dirs
"""
def __init__(self, config_obj=None, callback=None):
MetaDataGenerator.__init__(self, config_obj=config_obj, callback=None)
def _getFragmentUrl(self, url, fragment):
import urlparse
urlparse.uses_fragment.append('media')
if not url:
return url
(scheme, netloc, path, query, fragid) = urlparse.urlsplit(url)
return urlparse.urlunsplit((scheme, netloc, path, query, str(fragment)))
def doPkgMetadata(self):
"""all the heavy lifting for the package metadata"""
if len(self.conf.directories) == 1:
MetaDataGenerator.doPkgMetadata(self)
return
if self.conf.update:
self._setup_old_metadata_lookup()
filematrix = {}
for mydir in self.conf.directories:
if os.path.isabs(mydir):
thisdir = mydir
else:
if mydir.startswith('../'):
thisdir = os.path.realpath(mydir)
else:
thisdir = os.path.join(self.conf.basedir, mydir)
filematrix[mydir] = self.getFileList(thisdir, '.rpm')
# pkglist is a bit different for split media, as we have to know
# which dir. it belongs to. So we walk the dir. and then filter.
# We could be faster by not walking the dir. ... but meh.
if self.conf.pkglist:
pkglist = set(self.conf.pkglist)
pkgs = []
for fname in filematrix[mydir]:
if fname not in pkglist:
continue
pkgs.append(fname)
filematrix[mydir] = pkgs
self.trimRpms(filematrix[mydir])
self.pkgcount += len(filematrix[mydir])
mediano = 1
self.current_pkg = 0
self.conf.baseurl = self._getFragmentUrl(self.conf.baseurl, mediano)
try:
self.openMetadataDocs()
for mydir in self.conf.directories:
self.conf.baseurl = self._getFragmentUrl(self.conf.baseurl, mediano)
self.writeMetadataDocs(filematrix[mydir], mydir)
mediano += 1
self.conf.baseurl = self._getFragmentUrl(self.conf.baseurl, 1)
self.closeMetadataDocs()
except (IOError, OSError), e:
raise MDError, _('Cannot access/write repodata files: %s') % e
class MetaDataSqlite(object):
def __init__(self, destdir):
self.pri_sqlite_file = os.path.join(destdir, 'primary.sqlite')
self.pri_cx = sqlite.Connection(self.pri_sqlite_file)
self.file_sqlite_file = os.path.join(destdir, 'filelists.sqlite')
self.file_cx = sqlite.Connection(self.file_sqlite_file)
self.other_sqlite_file = os.path.join(destdir, 'other.sqlite')
self.other_cx = sqlite.Connection(self.other_sqlite_file)
self.primary_cursor = self.pri_cx.cursor()
self.filelists_cursor = self.file_cx.cursor()
self.other_cursor = self.other_cx.cursor()
self.create_primary_db()
self.create_filelists_db()
self.create_other_db()
def create_primary_db(self):
# make the tables
schema = [
"""PRAGMA synchronous="OFF";""",
"""pragma locking_mode="EXCLUSIVE";""",
"""CREATE TABLE conflicts ( name TEXT, flags TEXT, epoch TEXT, version TEXT, release TEXT, pkgKey INTEGER );""",
"""CREATE TABLE db_info (dbversion INTEGER, checksum TEXT);""",
"""CREATE TABLE files ( name TEXT, type TEXT, pkgKey INTEGER);""",
"""CREATE TABLE obsoletes ( name TEXT, flags TEXT, epoch TEXT, version TEXT, release TEXT, pkgKey INTEGER );""",
"""CREATE TABLE packages ( pkgKey INTEGER PRIMARY KEY, pkgId TEXT, name TEXT, arch TEXT, version TEXT, epoch TEXT, release TEXT, summary TEXT, description TEXT, url TEXT, time_file INTEGER, time_build INTEGER, rpm_license TEXT, rpm_vendor TEXT, rpm_group TEXT, rpm_buildhost TEXT, rpm_sourcerpm TEXT, rpm_header_start INTEGER, rpm_header_end INTEGER, rpm_packager TEXT, size_package INTEGER, size_installed INTEGER, size_archive INTEGER, location_href TEXT, location_base TEXT, checksum_type TEXT);""",
"""CREATE TABLE provides ( name TEXT, flags TEXT, epoch TEXT, version TEXT, release TEXT, pkgKey INTEGER );""",
"""CREATE TABLE requires ( name TEXT, flags TEXT, epoch TEXT, version TEXT, release TEXT, pkgKey INTEGER , pre BOOL DEFAULT FALSE);""",
"""CREATE INDEX filenames ON files (name);""",
"""CREATE INDEX packageId ON packages (pkgId);""",
"""CREATE INDEX packagename ON packages (name);""",
"""CREATE INDEX pkgconflicts on conflicts (pkgKey);""",
"""CREATE INDEX pkgobsoletes on obsoletes (pkgKey);""",
"""CREATE INDEX pkgprovides on provides (pkgKey);""",
"""CREATE INDEX pkgrequires on requires (pkgKey);""",
"""CREATE INDEX providesname ON provides (name);""",
"""CREATE INDEX requiresname ON requires (name);""",
"""CREATE TRIGGER removals AFTER DELETE ON packages
BEGIN
DELETE FROM files WHERE pkgKey = old.pkgKey;
DELETE FROM requires WHERE pkgKey = old.pkgKey;
DELETE FROM provides WHERE pkgKey = old.pkgKey;
DELETE FROM conflicts WHERE pkgKey = old.pkgKey;
DELETE FROM obsoletes WHERE pkgKey = old.pkgKey;
END;""",
"""INSERT into db_info values (%s, 'direct_create');""" % sqlitecachec.DBVERSION,
]
for cmd in schema:
executeSQL(self.primary_cursor, cmd)
def create_filelists_db(self):
schema = [
"""PRAGMA synchronous="OFF";""",
"""pragma locking_mode="EXCLUSIVE";""",
"""CREATE TABLE db_info (dbversion INTEGER, checksum TEXT);""",
"""CREATE TABLE filelist ( pkgKey INTEGER, dirname TEXT, filenames TEXT, filetypes TEXT);""",
"""CREATE TABLE packages ( pkgKey INTEGER PRIMARY KEY, pkgId TEXT);""",
"""CREATE INDEX dirnames ON filelist (dirname);""",
"""CREATE INDEX keyfile ON filelist (pkgKey);""",
"""CREATE INDEX pkgId ON packages (pkgId);""",
"""CREATE TRIGGER remove_filelist AFTER DELETE ON packages
BEGIN
DELETE FROM filelist WHERE pkgKey = old.pkgKey;
END;""",
"""INSERT into db_info values (%s, 'direct_create');""" % sqlitecachec.DBVERSION,
]
for cmd in schema:
executeSQL(self.filelists_cursor, cmd)
def create_other_db(self):
schema = [
"""PRAGMA synchronous="OFF";""",
"""pragma locking_mode="EXCLUSIVE";""",
"""CREATE TABLE changelog ( pkgKey INTEGER, author TEXT, date INTEGER, changelog TEXT);""",
"""CREATE TABLE db_info (dbversion INTEGER, checksum TEXT);""",
"""CREATE TABLE packages ( pkgKey INTEGER PRIMARY KEY, pkgId TEXT);""",
"""CREATE INDEX keychange ON changelog (pkgKey);""",
"""CREATE INDEX pkgId ON packages (pkgId);""",
"""CREATE TRIGGER remove_changelogs AFTER DELETE ON packages
BEGIN
DELETE FROM changelog WHERE pkgKey = old.pkgKey;
END;""",
"""INSERT into db_info values (%s, 'direct_create');""" % sqlitecachec.DBVERSION,
]
for cmd in schema:
executeSQL(self.other_cursor, cmd)
|
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 ~ 2014 Andy Stewart
#
# Author: Andy Stewart <[email protected]>
# Maintainer: Andy Stewart <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from PyQt5 import QtCore
from PyQt5.QtNetwork import QNetworkCookieJar, QNetworkCookie
from PyQt5.QtCore import QCoreApplication, QEvent
from PyQt5.QtNetwork import QNetworkProxy
if os.name == 'posix':
QCoreApplication.setAttribute(QtCore.Qt.AA_X11InitThreads, True)
from PyQt5.QtWebKitWidgets import QWebView, QWebPage
from PyQt5.QtWebKit import QWebSettings
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import QUrl, Qt
from PyQt5 import QtGui
import time
import os
from epc.server import ThreadingEPCServer
import threading
from PyQt5.QtWidgets import QWidget
from PyQt5.QtGui import QPainter, QImage
import functools
from utils import get_parent_dir
from xutils import get_xlib_display, grab_focus, ActiveWindowWatcher, get_parent_window_id
from send_key import send_string
class postGui(QtCore.QObject):
throughThread = QtCore.pyqtSignal(object, object)
def __init__(self, inclass=True):
super(postGui, self).__init__()
self.throughThread.connect(self.onSignalReceived)
self.inclass = inclass
def __call__(self, func):
self._func = func
@functools.wraps(func)
def objCall(*args, **kwargs):
self.emitSignal(args, kwargs)
return objCall
def emitSignal(self, args, kwargs):
self.throughThread.emit(args, kwargs)
def onSignalReceived(self, args, kwargs):
if self.inclass:
obj, args = args[0], args[1:]
self._func(obj, *args, **kwargs)
else:
self._func(*args, **kwargs)
class WebPage(QWebPage):
def __init__(self):
super(WebPage, self).__init__()
def acceptNavigationRequest(self, frame, request, type):
# Handle myself if got user event.
if type == QWebPage.NavigationTypeLinkClicked:
if self.view().press_ctrl_flag:
call_method("open-url", [request.url().toString()])
else:
self.view().load(request.url())
# Return False to stop default behavior.
return False
# Otherwise, use default behavior.
return QWebPage.acceptNavigationRequest(self, frame, request, type)
def javaScriptConsoleMessage(self, msg, lineNumber, sourceID):
global print_console_info
if print_console_info:
call_message("JsConsole(%s:%d): %s" % (sourceID, lineNumber, msg))
class BrowserBuffer(QWebView):
redrawScreenshot = QtCore.pyqtSignal(object)
updateProgress = QtCore.pyqtSignal()
def __init__(self, buffer_id, buffer_width, buffer_height):
super(BrowserBuffer, self).__init__()
self.buffer_id = buffer_id
self.buffer_width = buffer_width
self.buffer_height = buffer_height
self.setPage(WebPage())
self.page().mainFrame().setScrollBarPolicy(Qt.Horizontal, Qt.ScrollBarAlwaysOff)
cookie_jar.restore_cookies()
self.page().networkAccessManager().setCookieJar(cookie_jar)
self.page().userAgentForUrl = self.customize_user_agent
self.settings().setUserStyleSheetUrl(QUrl.fromLocalFile(os.path.join(get_parent_dir(__file__), "theme.css")))
self.settings().setAttribute(QWebSettings.PluginsEnabled, True)
self.settings().setAttribute(QWebSettings.JavascriptEnabled, True)
self.settings().setAttribute(QWebSettings.JavascriptCanOpenWindows, True)
self.settings().setFontFamily(QWebSettings.StandardFont, "Sans")
self.adjust_size(self.buffer_width, self.buffer_height)
self.view_dict = {}
self.titleChanged.connect(self.change_title)
self.press_ctrl_flag = False
self.loading_flag = False
self.loading_percent = 0
self.loadFinished.connect(self.handle_load_finished)
self.loadStarted.connect(self.handle_load_started)
self.loadProgress.connect(self.handle_load_progress)
def handle_load_started(self, *args):
self.loading_flag = True
self.loading_percent = 0
self.updateProgress.emit()
def handle_load_finished(self, *args):
self.loading_flag = False
self.loading_percent = 100
self.updateProgress.emit()
def handle_load_progress(self, percent):
self.loading_flag = True
self.loading_percent = percent
self.updateProgress.emit()
def customize_user_agent(self, url):
return "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.114 Safari/537.36"
def change_title(self, title):
call_method("change-buffer-title", [self.buffer_id, title])
def eventFilter(self, obj, event):
if event.type() in [QEvent.KeyPress, QEvent.KeyRelease,
QEvent.MouseButtonPress, QEvent.MouseButtonRelease,
QEvent.MouseMove, QEvent.MouseButtonDblClick, QEvent.Wheel,
QEvent.InputMethod, QEvent.InputMethodQuery, QEvent.ShortcutOverride,
QEvent.ActivationChange, QEvent.Enter, QEvent.WindowActivate,
]:
QApplication.sendEvent(self, event)
if event.type() == QEvent.KeyPress and event.key() == QtCore.Qt.Key_Control:
self.press_ctrl_flag = True
elif event.type() == QEvent.KeyRelease and event.key() == QtCore.Qt.Key_Control:
self.press_ctrl_flag = False
global emacs_xwindow_id
xlib_display = get_xlib_display()
xwindow = xlib_display.create_resource_object("window", emacs_xwindow_id)
mask = []
event_key = event.text()
if event.modifiers() & QtCore.Qt.AltModifier == QtCore.Qt.AltModifier:
mask.append("Alt")
elif event.modifiers() & QtCore.Qt.ControlModifier == QtCore.Qt.ControlModifier:
mask.append("Ctrl")
elif event.modifiers() & QtCore.Qt.ShiftModifier == QtCore.Qt.ShiftModifier:
mask.append("Shift")
elif event.modifiers() & QtCore.Qt.MetaModifier == QtCore.Qt.MetaModifier:
mask.append("Super")
send_string(xwindow, event_key, mask, event.type() == QEvent.KeyPress)
xlib_display.sync()
else:
if event.type() not in [12, 77]:
call_method("%s %s" % (event.type(), event))
return False
def add_view(self, view_id, x, y, w, h):
view = BrowserView(self, view_id)
self.view_dict[view_id] = view
self.update_view(view_id, x, y, w, h)
view.show()
def remove_view(self, view_id):
if view_id in self.view_dict:
self.view_dict[view_id].remove()
self.view_dict.pop(view_id)
def update_view(self, view_id, x, y, w, h):
self.view_dict[view_id].moveresize(x, y, w, h)
def remove_all_views(self):
for view_id in self.view_dict.keys():
self.remove_view(view_id)
def adjust_size(self, width, height):
self.buffer_width = width
self.buffer_height = height
self.resize(self.buffer_width, self.buffer_height)
@postGui()
def redraw(self):
if len(self.view_dict) > 0:
qimage = QImage(self.buffer_width, self.buffer_height, QImage.Format_ARGB32)
self.render(qimage)
self.redrawScreenshot.emit(qimage)
@postGui()
def open_url(self, url):
self.load(QUrl(url))
class BrowserView(QWidget):
def __init__(self, browser_buffer, view_id):
super(BrowserView, self).__init__()
self.browser_buffer = browser_buffer
self.view_id = view_id
self.setWindowFlags(Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground, True)
self.setContentsMargins(0, 0, 0, 0)
self.browser_buffer.redrawScreenshot.connect(self.updateView)
self.browser_buffer.updateProgress.connect(self.updateProgress)
self.qimage = None
self.installEventFilter(browser_buffer)
def remove(self):
self.browser_buffer.redrawScreenshot.disconnect(self.updateView)
self.destroy()
def paintEvent(self, event):
painter = QPainter(self)
if self.qimage:
painter.drawImage(QtCore.QRect(0, 0, self.width(), self.height()), self.qimage)
else:
painter.setBrush(QtGui.QColor(255, 255, 255, 255))
painter.drawRect(0, 0, self.browser_buffer.buffer_width, self.browser_buffer.buffer_height)
if self.browser_buffer.loading_flag:
painter.setPen(QtGui.QColor(10, 138, 255, 255))
painter.setBrush(QtGui.QColor(10, 138, 255, 255))
painter.drawRect(0, 0, self.browser_buffer.buffer_width * self.browser_buffer.loading_percent / 100, 1)
painter.end()
@postGui()
def updateView(self, qimage):
self.qimage = qimage
self.update()
@postGui()
def updateProgress(self):
self.update()
def moveresize(self, x, y, w, h):
self.resize(w, h)
self.reparent(x, y)
def adjust_size(self, x, y, w, h):
self.moveresize(x, y, w, h)
self.browser_buffer.adjust_size(w, h)
def reparent(self, x, y):
xlib_display = get_xlib_display()
browser_xwindow_id = self.winId().__int__()
browser_xwindow = xlib_display.create_resource_object("window", browser_xwindow_id)
emacs_xwindow = xlib_display.create_resource_object("window", emacs_xwindow_id)
browser_xwindow.reparent(emacs_xwindow, x, y)
xlib_display.sync()
class CookieJar(QNetworkCookieJar):
def __init__(self, parent = None):
QNetworkCookieJar.__init__(self, parent)
self.cookie_path = os.path.expanduser("~/.emacs.d/deepin-emacs/webkit-cookies")
def save_cookies(self):
allCookies = QNetworkCookieJar.allCookies(self)
cookie_dir = os.path.dirname(self.cookie_path)
if not os.path.exists(cookie_dir):
os.makedirs(cookie_dir)
with open(self.cookie_path, 'w') as f:
lines = ''
for cookie in allCookies:
lines = lines + cookie.toRawForm() + '\r\n'
f.writelines(lines)
def restore_cookies(self):
if os.path.exists(self.cookie_path):
with open(self.cookie_path, 'r') as f:
lines = ''
for line in f:
lines = lines + line
allCookies = QNetworkCookie.parseCookies(lines)
QNetworkCookieJar.setAllCookies(self, allCookies)
if __name__ == '__main__':
import sys
import signal
app = QApplication(sys.argv)
server = ThreadingEPCServer(('localhost', 0), log_traceback=True)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.allow_reuse_address = True
emacs_xwindow_id = 0
buffer_dict = {}
cookie_jar = CookieJar()
print_console_info = False
if len(sys.argv) >= 2 and sys.argv[1] == "--enable-proxy":
QNetworkProxy.setApplicationProxy(QNetworkProxy(QNetworkProxy.Socks5Proxy, "127.0.0.1", 7070))
def call_message(message):
call_method("message", [message])
def call_method(method_name, args):
handler = server.clients[0]
handler.call(method_name, args)
def handle_active_window(active_window_id):
global emacs_xwindow_id
emacs_real_id = get_parent_window_id(emacs_xwindow_id)
call_method("message", ["handle_active_window: %s %s %s" % (active_window_id, emacs_xwindow_id, emacs_real_id)])
if active_window_id == emacs_real_id:
call_method("focus-browser-view", [])
@postGui(False)
def init(emacs_xid):
global emacs_xwindow_id
emacs_xwindow_id = int(emacs_xid)
# NOTE: every epc method must should wrap with postGui.
# Because epc server is running in sub-thread.
@postGui(False)
def create_buffer(buffer_id, buffer_url, buffer_width, buffer_height):
if not buffer_id in buffer_dict:
buffer = BrowserBuffer(buffer_id, buffer_width, buffer_height)
buffer.open_url(buffer_url)
buffer_dict[buffer_id] = buffer
@postGui(False)
def remove_buffer(buffer_id):
if buffer_id in buffer_dict:
cookie_jar.save_cookies()
buffer = buffer_dict[buffer_id]
buffer.remove_all_views()
buffer_dict.pop(buffer_id)
buffer.destroy()
call_message("Remove buffer %s" % buffer_id)
@postGui(False)
def adjust_size(buffer_id, w, h):
if buffer_id in buffer_dict:
buffer_dict[buffer_id].adjust_size(w, h)
@postGui(False)
def update_views(view_infos):
buffer_view_dict = {}
for view_info in view_infos:
[buffer_id, x, y, w, h] = view_info
view_id = "%s_%s" % (x, y)
if buffer_id in buffer_dict:
if not buffer_id in buffer_view_dict:
buffer_view_dict[buffer_id] = {}
buffer_view_dict[buffer_id][view_id] = (x, y, w, h)
else:
call_message("Buffer id %s is not exist!" % buffer_id)
for buffer in buffer_dict.values():
if buffer.buffer_id in buffer_view_dict:
emacs_view_ids = buffer_view_dict[buffer.buffer_id].keys()
buffer_view_ids = buffer.view_dict.keys()
for emacs_view_id in emacs_view_ids:
(x, y, w, h) = buffer_view_dict[buffer.buffer_id][emacs_view_id]
# Update view.
if emacs_view_id in buffer_view_ids:
buffer.update_view(emacs_view_id, x, y, w, h)
# Create view.
else:
buffer.add_view(emacs_view_id, x, y, w, h)
for buffer_view_id in buffer_view_ids:
# Remove view.
if buffer_view_id not in emacs_view_ids:
buffer.remove_view(buffer_view_id)
else:
buffer.remove_all_views()
@postGui(False)
def focus_view(buffer_id, x, y, w, h):
if buffer_id in buffer_dict:
buffer = buffer_dict[buffer_id]
view_id = "%s_%s" % (x, y)
if view_id in buffer.view_dict:
view = buffer.view_dict[view_id]
view_xwindow_id = view.winId().__int__()
grab_focus(view_xwindow_id)
def toggle_console_info():
global print_console_info
print_console_info = not print_console_info
def update_buffer():
while True:
for buffer in buffer_dict.values():
buffer.redraw()
time.sleep(0.05)
server_thread.start()
server.print_port()
server.register_function(init)
server.register_function(create_buffer)
server.register_function(remove_buffer)
server.register_function(adjust_size)
server.register_function(update_views)
server.register_function(focus_view)
server.register_function(toggle_console_info)
threading.Thread(target=update_buffer).start()
active_window_watcher = ActiveWindowWatcher()
active_window_watcher.activeWindowChanged.connect(handle_active_window)
active_window_watcher.start()
signal.signal(signal.SIGINT, signal.SIG_DFL)
sys.exit(app.exec_())
|
|
# (c) Copyright 2014 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
ZoneManager is responsible to manage access control using FC zoning
when zoning mode is set as 'fabric'.
ZoneManager provides interfaces to add connection and remove connection
for given initiator and target list associated with a FC volume attach and
detach operation.
**Related Flags**
:zone_driver: Used by:class:`ZoneManager`.
Defaults to
`cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver`
:zoning_policy: Used by: class: 'ZoneManager'. Defaults to 'none'
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
import six
from cinder import exception
from cinder.i18n import _, _LI
from cinder.volume import configuration as config
from cinder.zonemanager import fc_common
LOG = logging.getLogger(__name__)
zone_manager_opts = [
cfg.StrOpt('zone_driver',
default='cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver'
'.BrcdFCZoneDriver',
help='FC Zone Driver responsible for zone management'),
cfg.StrOpt('zoning_policy',
default='initiator-target',
help='Zoning policy configured by user; valid values include '
'"initiator-target" or "initiator"'),
cfg.StrOpt('fc_fabric_names',
help='Comma separated list of Fibre Channel fabric names.'
' This list of names is used to retrieve other SAN credentials'
' for connecting to each SAN fabric'),
cfg.StrOpt('fc_san_lookup_service',
default='cinder.zonemanager.drivers.brocade'
'.brcd_fc_san_lookup_service.BrcdFCSanLookupService',
help='FC SAN Lookup Service'),
]
CONF = cfg.CONF
CONF.register_opts(zone_manager_opts, group='fc-zone-manager')
class ZoneManager(fc_common.FCCommon):
"""Manages Connection control during attach/detach.
Version History:
1.0 - Initial version
1.0.1 - Added __new__ for singleton
"""
VERSION = "1.0.1"
driver = None
fabric_names = []
def __new__(class_, *args, **kwargs):
if not hasattr(class_, "_instance"):
class_._instance = object.__new__(class_, *args, **kwargs)
return class_._instance
def __init__(self, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
super(ZoneManager, self).__init__(**kwargs)
self.configuration = config.Configuration(zone_manager_opts,
'fc-zone-manager')
self._build_driver()
def _build_driver(self):
zone_driver = self.configuration.zone_driver
LOG.debug("Zone Driver from config: {%s}", zone_driver)
# Initialize vendor specific implementation of FCZoneDriver
self.driver = importutils.import_object(
zone_driver,
configuration=self.configuration)
def get_zoning_state_ref_count(self, initiator_wwn, target_wwn):
"""Zone management state check.
Performs state check for given I-T pair to return the current count of
active attach for the pair.
"""
# TODO(sk): ref count state management
count = 0
# check the state for I-T pair
return count
def add_connection(self, initiator_target_map):
"""Add connection control.
Adds connection control for the given initiator target map.
initiator_target_map - each initiator WWN mapped to a list of one
or more target WWN:
eg:
{
'10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40']
}
"""
connected_fabric = None
try:
for initiator in initiator_target_map.keys():
target_list = initiator_target_map[initiator]
LOG.debug("Target List: %s", target_list)
# get SAN context for the target list
fabric_map = self.get_san_context(target_list)
LOG.debug("Fabric Map after context lookup: %s", fabric_map)
# iterate over each SAN and apply connection control
for fabric in fabric_map.keys():
connected_fabric = fabric
t_list = fabric_map[fabric]
# get valid I-T map to add connection control
i_t_map = {initiator: t_list}
valid_i_t_map = self.get_valid_initiator_target_map(
i_t_map, True)
LOG.info(_LI("Final filtered map for fabric: %s"),
valid_i_t_map)
# Call driver to add connection control
self.driver.add_connection(fabric, valid_i_t_map)
LOG.info(_LI("Add Connection: Finished iterating "
"over all target list"))
except Exception as e:
msg = _("Failed adding connection for fabric=%(fabric)s: "
"Error: %(err)s") % {'fabric': connected_fabric,
'err': six.text_type(e)}
LOG.error(msg)
raise exception.ZoneManagerException(reason=msg)
def delete_connection(self, initiator_target_map):
"""Delete connection.
Updates/deletes connection control for the given initiator target map.
initiator_target_map - each initiator WWN mapped to a list of one
or more target WWN:
eg:
{
'10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40']
}
"""
connected_fabric = None
try:
for initiator in initiator_target_map.keys():
target_list = initiator_target_map[initiator]
LOG.info(_LI("Delete connection Target List: %s"),
target_list)
# get SAN context for the target list
fabric_map = self.get_san_context(target_list)
LOG.debug("Delete connection Fabric Map from SAN "
"context: %s", fabric_map)
# iterate over each SAN and apply connection control
for fabric in fabric_map.keys():
connected_fabric = fabric
t_list = fabric_map[fabric]
# get valid I-T map to add connection control
i_t_map = {initiator: t_list}
valid_i_t_map = self.get_valid_initiator_target_map(
i_t_map, False)
LOG.info(_LI("Final filtered map for delete "
"connection: %s"), valid_i_t_map)
# Call driver to delete connection control
if len(valid_i_t_map) > 0:
self.driver.delete_connection(fabric, valid_i_t_map)
LOG.debug("Delete Connection - Finished iterating over all"
" target list")
except Exception as e:
msg = _("Failed removing connection for fabric=%(fabric)s: "
"Error: %(err)s") % {'fabric': connected_fabric,
'err': six.text_type(e)}
LOG.error(msg)
raise exception.ZoneManagerException(reason=msg)
def get_san_context(self, target_wwn_list):
"""SAN lookup for end devices.
Look up each SAN configured and return a map of SAN (fabric IP)
to list of target WWNs visible to the fabric.
"""
fabric_map = self.driver.get_san_context(target_wwn_list)
LOG.debug("Got SAN context: %s", fabric_map)
return fabric_map
def get_valid_initiator_target_map(self, initiator_target_map,
add_control):
"""Reference count check for end devices.
Looks up the reference count for each initiator-target pair from the
map and returns a filtered list based on the operation type
add_control - operation type can be true for add connection control
and false for remove connection control
"""
filtered_i_t_map = {}
for initiator in initiator_target_map.keys():
t_list = initiator_target_map[initiator]
for target in t_list:
count = self.get_zoning_state_ref_count(initiator, target)
if add_control:
if count > 0:
t_list.remove(target)
# update count = count + 1
else:
if count > 1:
t_list.remove(target)
# update count = count - 1
if t_list:
filtered_i_t_map[initiator] = t_list
else:
LOG.info(_LI("No targets to add or remove connection for "
"I: %s"), initiator)
return filtered_i_t_map
|
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import functools
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from neutron.agent import firewall
from neutron.conf.agent import securitygroups_rpc as sc_cfg
LOG = logging.getLogger(__name__)
sc_cfg.register_securitygroups_opts()
def is_firewall_enabled():
return cfg.CONF.SECURITYGROUP.enable_security_group
def _disable_extension(extension, aliases):
if extension in aliases:
aliases.remove(extension)
def disable_security_group_extension_by_config(aliases):
if not is_firewall_enabled():
LOG.info('Disabled security-group extension.')
_disable_extension('security-group', aliases)
LOG.info('Disabled allowed-address-pairs extension.')
_disable_extension('allowed-address-pairs', aliases)
class SecurityGroupAgentRpc(object):
"""Enables SecurityGroup agent support in agent implementations."""
def __init__(self, context, plugin_rpc, local_vlan_map=None,
defer_refresh_firewall=False, integration_bridge=None):
self.context = context
self.plugin_rpc = plugin_rpc
self.init_firewall(defer_refresh_firewall, integration_bridge)
def _get_trusted_devices(self, device_ids, devices):
trusted_devices = []
# Devices which are already added in firewall ports should
# not be treated as trusted devices but as regular ports
all_devices = devices.copy()
all_devices.update(self.firewall.ports)
device_names = [
dev['device'] for dev in all_devices.values()]
for device_id in device_ids:
if (device_id not in all_devices.keys() and
device_id not in device_names):
trusted_devices.append(device_id)
return trusted_devices
def init_firewall(self, defer_refresh_firewall=False,
integration_bridge=None):
firewall_driver = cfg.CONF.SECURITYGROUP.firewall_driver or 'noop'
LOG.debug("Init firewall settings (driver=%s)", firewall_driver)
firewall_class = firewall.load_firewall_driver_class(firewall_driver)
try:
self.firewall = firewall_class(
integration_bridge=integration_bridge)
except TypeError:
self.firewall = firewall_class()
# The following flag will be set to true if port filter must not be
# applied as soon as a rule or membership notification is received
self.defer_refresh_firewall = defer_refresh_firewall
# Stores devices for which firewall should be refreshed when
# deferred refresh is enabled.
self.devices_to_refilter = set()
# Flag raised when a global refresh is needed
self.global_refresh_firewall = False
self._use_enhanced_rpc = None
@property
def use_enhanced_rpc(self):
if self._use_enhanced_rpc is None:
self._use_enhanced_rpc = (
self._check_enhanced_rpc_is_supported_by_server())
return self._use_enhanced_rpc
def _check_enhanced_rpc_is_supported_by_server(self):
try:
self.plugin_rpc.security_group_info_for_devices(
self.context, devices=[])
except oslo_messaging.UnsupportedVersion:
LOG.warning('security_group_info_for_devices rpc call not '
'supported by the server, falling back to old '
'security_group_rules_for_devices which scales '
'worse.')
return False
return True
def skip_if_noopfirewall_or_firewall_disabled(func):
@functools.wraps(func)
def decorated_function(self, *args, **kwargs):
if (isinstance(self.firewall, firewall.NoopFirewallDriver) or
not is_firewall_enabled()):
LOG.info("Skipping method %s as firewall is disabled "
"or configured as NoopFirewallDriver.",
func.__name__)
else:
return func(self, # pylint: disable=not-callable
*args, **kwargs)
return decorated_function
@skip_if_noopfirewall_or_firewall_disabled
def prepare_devices_filter(self, device_ids):
if not device_ids:
return
LOG.info("Preparing filters for devices %s", device_ids)
self._apply_port_filter(device_ids)
def _apply_port_filter(self, device_ids, update_filter=False):
if self.use_enhanced_rpc:
devices_info = self.plugin_rpc.security_group_info_for_devices(
self.context, list(device_ids))
devices = devices_info['devices']
security_groups = devices_info['security_groups']
security_group_member_ips = devices_info['sg_member_ips']
else:
devices = self.plugin_rpc.security_group_rules_for_devices(
self.context, list(device_ids))
trusted_devices = self._get_trusted_devices(device_ids, devices)
with self.firewall.defer_apply():
if self.use_enhanced_rpc:
LOG.debug("Update security group information for ports %s",
devices.keys())
self._update_security_group_info(
security_groups, security_group_member_ips)
for device in devices.values():
if update_filter:
LOG.debug("Update port filter for %s", device['device'])
self.firewall.update_port_filter(device)
else:
LOG.debug("Prepare port filter for %s", device['device'])
self.firewall.prepare_port_filter(device)
self.firewall.process_trusted_ports(trusted_devices)
def _update_security_group_info(self, security_groups,
security_group_member_ips):
LOG.debug("Update security group information")
for sg_id, sg_rules in security_groups.items():
self.firewall.update_security_group_rules(sg_id, sg_rules)
for remote_sg_id, member_ips in security_group_member_ips.items():
self.firewall.update_security_group_members(
remote_sg_id, member_ips)
def security_groups_rule_updated(self, security_groups):
LOG.info("Security group "
"rule updated %r", security_groups)
self._security_group_updated(
security_groups,
'security_groups',
'sg_rule')
def security_groups_member_updated(self, security_groups):
LOG.info("Security group "
"member updated %r", security_groups)
self._security_group_updated(
security_groups,
'security_group_source_groups',
'sg_member')
def _security_group_updated(self, security_groups, attribute, action_type):
devices = []
sec_grp_set = set(security_groups)
for device in self.firewall.ports.values():
if sec_grp_set & set(device.get(attribute, [])):
devices.append(device['device'])
if devices:
if self.use_enhanced_rpc:
self.firewall.security_group_updated(action_type, sec_grp_set)
if self.defer_refresh_firewall:
LOG.debug("Adding %s devices to the list of devices "
"for which firewall needs to be refreshed",
devices)
self.devices_to_refilter |= set(devices)
else:
self.refresh_firewall(devices)
def remove_devices_filter(self, device_ids):
if not device_ids:
return
LOG.info("Remove device filter for %r", device_ids)
with self.firewall.defer_apply():
for device_id in device_ids:
device = self.firewall.ports.get(device_id)
if device:
self.firewall.remove_port_filter(device)
else:
self.firewall.remove_trusted_ports([device_id])
@skip_if_noopfirewall_or_firewall_disabled
def refresh_firewall(self, device_ids=None):
LOG.info("Refresh firewall rules")
if not device_ids:
device_ids = self.firewall.ports.keys()
if not device_ids:
LOG.info("No ports here to refresh firewall")
return
self._apply_port_filter(device_ids, update_filter=True)
def firewall_refresh_needed(self):
return self.global_refresh_firewall or self.devices_to_refilter
def setup_port_filters(self, new_devices, updated_devices):
"""Configure port filters for devices.
This routine applies filters for new devices and refreshes firewall
rules when devices have been updated, or when there are changes in
security group membership or rules.
:param new_devices: set containing identifiers for new devices
:param updated_devices: set containing identifiers for
updated devices
"""
# These data structures are cleared here in order to avoid
# losing updates occurring during firewall refresh
devices_to_refilter = self.devices_to_refilter
global_refresh_firewall = self.global_refresh_firewall
self.devices_to_refilter = set()
self.global_refresh_firewall = False
# We must call prepare_devices_filter() after we've grabbed
# self.devices_to_refilter since an update for a new port
# could arrive while we're processing, and we need to make
# sure we don't skip it. It will get handled the next time.
if new_devices:
LOG.debug("Preparing device filters for %d new devices",
len(new_devices))
self.prepare_devices_filter(new_devices)
# TODO(salv-orlando): Avoid if possible ever performing the global
# refresh providing a precise list of devices for which firewall
# should be refreshed
if global_refresh_firewall:
LOG.debug("Refreshing firewall for all filtered devices")
self.refresh_firewall()
else:
if self.use_enhanced_rpc and updated_devices:
self.firewall.security_group_updated('sg_member', [],
updated_devices)
# If a device is both in new and updated devices
# avoid reprocessing it
updated_devices = ((updated_devices | devices_to_refilter) -
new_devices)
if updated_devices:
LOG.debug("Refreshing firewall for %d devices",
len(updated_devices))
self.refresh_firewall(updated_devices)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for XLA op wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.compiler.xla import xla_data_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class XlaOpsTest(xla_test.XLATestCase, parameterized.TestCase):
def _assertOpOutputMatchesExpected(self, op, args, expected,
equality_fn=None):
with self.test_session() as session:
with self.test_scope():
placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype), arg.shape)
for arg in args
]
feeds = {placeholders[i]: args[i] for i in range(0, len(args))}
output = op(*placeholders)
result = session.run(output, feeds)
if not equality_fn:
equality_fn = self.assertAllClose
equality_fn(result, expected, rtol=1e-3)
def testAdd(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
xla.add,
args=(np.array([1, 2, 3], dtype=dtype),
np.array([4, 5, 6], dtype=dtype)),
expected=np.array([5, 7, 9], dtype=dtype))
self._assertOpOutputMatchesExpected(
lambda x, y: xla.add(x, y, broadcast_dims=(0,)),
args=(np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([7, 11], dtype=dtype)),
expected=np.array([[8, 9], [14, 15]], dtype=dtype))
self._assertOpOutputMatchesExpected(
lambda x, y: xla.add(x, y, broadcast_dims=(1,)),
args=(np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([7, 11], dtype=dtype)),
expected=np.array([[8, 13], [10, 15]], dtype=dtype))
def testBroadcast(self):
for dtype in self.numeric_types:
v = np.arange(4, dtype=np.int32).astype(dtype).reshape([2, 2])
self._assertOpOutputMatchesExpected(
lambda x: xla.broadcast(x, (7, 42)),
args=(v,),
expected=np.tile(v, (7, 42, 1, 1)))
def testShiftRightLogical(self):
self._assertOpOutputMatchesExpected(
xla.shift_right_logical,
args=(np.array([-1, 16], dtype=np.int32), np.int32(4)),
expected=np.array([0x0FFFFFFF, 1], dtype=np.int32))
self._assertOpOutputMatchesExpected(
xla.shift_right_logical,
args=(np.array([0xFFFFFFFF, 16], dtype=np.uint32), np.uint32(4)),
expected=np.array([0x0FFFFFFF, 1], dtype=np.uint32))
def testShiftRightArithmetic(self):
self._assertOpOutputMatchesExpected(
xla.shift_right_arithmetic,
args=(np.array([-1, 16], dtype=np.int32), np.int32(4)),
expected=np.array([-1, 1], dtype=np.int32))
self._assertOpOutputMatchesExpected(
xla.shift_right_arithmetic,
args=(np.array([0xFFFFFFFF, 16], dtype=np.uint32), np.uint32(4)),
expected=np.array([0xFFFFFFFF, 1], dtype=np.uint32))
PRECISION_VALUES = (None, xla_data_pb2.PrecisionConfig.DEFAULT,
xla_data_pb2.PrecisionConfig.HIGH,
xla_data_pb2.PrecisionConfig.HIGHEST)
@parameterized.parameters(*PRECISION_VALUES)
def testConv(self, precision):
for dtype in set(self.float_types).intersection(
set([dtypes.bfloat16.as_numpy_dtype, np.float32])):
def conv_1d_fn(lhs, rhs):
dnums = xla_data_pb2.ConvolutionDimensionNumbers()
num_spatial_dims = 1
dnums.input_batch_dimension = 0
dnums.input_feature_dimension = 1
dnums.output_batch_dimension = 0
dnums.output_feature_dimension = 1
dnums.kernel_output_feature_dimension = 0
dnums.kernel_input_feature_dimension = 1
dnums.input_spatial_dimensions.extend(range(2, 2 + num_spatial_dims))
dnums.kernel_spatial_dimensions.extend(range(2, 2 + num_spatial_dims))
dnums.output_spatial_dimensions.extend(range(2, 2 + num_spatial_dims))
precision_config = None
if precision:
precision_config = xla_data_pb2.PrecisionConfig()
precision_config.operand_precision.extend([precision, precision])
return xla.conv(
lhs,
rhs,
window_strides=(1,),
padding=((2, 1),),
lhs_dilation=(1,),
rhs_dilation=(2,),
dimension_numbers=dnums)
self._assertOpOutputMatchesExpected(
conv_1d_fn,
args=(
np.array([[[3, 4, 5, 6]]], dtype=dtype),
np.array([[[-2, -3]]], dtype=dtype),
),
expected=np.array([[[-9, -12, -21, -26, -10]]], dtype=dtype))
@parameterized.parameters(*PRECISION_VALUES)
def testDotGeneral(self, precision):
for dtype in self.float_types:
def dot_fn(lhs, rhs):
dnums = xla_data_pb2.DotDimensionNumbers()
dnums.lhs_contracting_dimensions.append(2)
dnums.rhs_contracting_dimensions.append(1)
dnums.lhs_batch_dimensions.append(0)
dnums.rhs_batch_dimensions.append(0)
precision_config = None
if precision:
precision_config = xla_data_pb2.PrecisionConfig()
precision_config.operand_precision.extend([precision, precision])
return xla.dot_general(
lhs,
rhs,
dimension_numbers=dnums,
precision_config=precision_config)
lhs = np.array(
[
[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
], dtype=dtype)
rhs = np.array(
[
[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]],
], dtype=dtype)
self._assertOpOutputMatchesExpected(
dot_fn,
args=(lhs, rhs),
expected=np.array(
[
[[9, 12, 15], [19, 26, 33]],
[[95, 106, 117], [129, 144, 159]],
],
dtype=dtype))
def testNeg(self):
for dtype in self.numeric_types - {np.uint8, np.int8}:
self._assertOpOutputMatchesExpected(
xla.neg,
args=(np.array([1, 2, 3], dtype=dtype),),
expected=np.array([-1, -2, -3], dtype=dtype))
def testPad(self):
for dtype in self.numeric_types:
def pad_fn(x):
return xla.pad(
x,
padding_value=7,
padding_low=[2, 1],
padding_high=[1, 2],
padding_interior=[1, 0])
self._assertOpOutputMatchesExpected(
pad_fn,
args=(np.arange(4, dtype=np.int32).astype(dtype).reshape([2, 2]),),
expected=np.array(
[[7, 7, 7, 7, 7], [7, 7, 7, 7, 7], [7, 0, 1, 7, 7],
[7, 7, 7, 7, 7], [7, 2, 3, 7, 7], [7, 7, 7, 7, 7]],
dtype=dtype))
def testReduce(self):
for dtype in set(self.numeric_types).intersection(
set([dtypes.bfloat16.as_numpy_dtype, np.float32])):
@function.Defun(dtype, dtype)
def sum_reducer(x, y):
return x + y
def sum_reduction(dims):
def fn(x):
return xla.reduce(
x, init_value=0, dimensions_to_reduce=dims, reducer=sum_reducer)
return fn
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]))
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[0]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.array([12, 15, 18, 21], dtype=dtype))
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[1]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.array([6, 22, 38], dtype=dtype))
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[0, 1]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=dtype(66))
@function.Defun(dtype, dtype)
def mul_reducer(x, y):
return x * y
def mul_reduction(dims):
def fn(x):
return xla.reduce(
x, init_value=1, dimensions_to_reduce=dims, reducer=mul_reducer)
return fn
self._assertOpOutputMatchesExpected(
mul_reduction(dims=[0]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.array([0, 45, 120, 231], dtype=dtype))
def testSelectAndScatter(self):
for dtype in set(self.numeric_types).intersection(
set([dtypes.bfloat16.as_numpy_dtype, np.float32])):
@function.Defun(dtype, dtype)
def add_scatter(x, y):
return x + y
@function.Defun(dtype, dtype)
def ge_select(x, y):
return x >= y
def test_fn(operand, source):
return xla.select_and_scatter(
operand,
window_dimensions=[2, 3, 1, 1],
window_strides=[2, 2, 1, 1],
padding=[[0, 0]] * 4,
source=source,
init_value=0,
select=ge_select,
scatter=add_scatter)
self._assertOpOutputMatchesExpected(
test_fn,
args=(np.array(
[[7, 2, 5, 3, 8], [3, 8, 9, 3, 4], [1, 5, 7, 5, 6],
[0, 6, 2, 10, 2]],
dtype=dtype).reshape((4, 5, 1, 1)),
np.array([[2, 6], [3, 1]], dtype=dtype).reshape((2, 2, 1, 1))),
expected=np.array(
[[0, 0, 0, 0, 0], [0, 0, 8, 0, 0], [0, 0, 3, 0, 0],
[0, 0, 0, 1, 0]],
dtype=dtype).reshape((4, 5, 1, 1)))
def testTranspose(self):
for dtype in self.numeric_types:
v = np.arange(4, dtype=np.int32).astype(dtype).reshape([2, 2])
self._assertOpOutputMatchesExpected(
lambda x: xla.transpose(x, [1, 0]), args=(v,), expected=v.T)
def testDynamicSlice(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
xla.dynamic_slice,
args=(np.arange(1000,
dtype=np.int32).astype(dtype).reshape([10, 10, 10]),
np.array([5, 7, 3]), np.array([2, 3, 2])),
expected=np.array(
np.array([[[573, 574], [583, 584], [593, 594]],
[[673, 674], [683, 684], [693, 694]]]),
dtype=dtype))
def testDynamicSliceWithIncorrectStartIndicesShape(self):
with self.test_session() as session:
with self.test_scope():
output = xla.dynamic_slice(
np.arange(1000, dtype=np.int32).reshape([10, 10, 10]),
np.array([5, 7]), np.array([2, 3, 4]))
with self.assertRaises(errors.InvalidArgumentError) as invalid_arg_error:
session.run(output)
self.assertRegexpMatches(
invalid_arg_error.exception.message,
(r'start_indices must be a vector with length equal to input rank, '
r'but input rank is 3 and start_indices has shape \[2\].*'))
def testDynamicSliceWithIncorrectSizeIndicesShape(self):
with self.test_session() as session:
with self.test_scope():
output = xla.dynamic_slice(
np.arange(1000, dtype=np.int32).reshape([10, 10, 10]),
np.array([5, 7, 3]), np.array([2, 3]))
with self.assertRaises(errors.InvalidArgumentError) as invalid_arg_error:
session.run(output)
self.assertRegexpMatches(
invalid_arg_error.exception.message,
(r'size_indices must be a vector with length equal to input rank, '
r'but input rank is 3 and size_indices has shape \[2\].*'))
if __name__ == '__main__':
googletest.main()
|
|
# Copyright (c) 2016 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabor Dozsa
# Andreas Sandberg
# This is an example configuration script for full system simulation of
# a generic ARM bigLITTLE system.
import argparse
import os
import sys
import m5
from m5.objects import *
m5.util.addToPath("../../")
from common import SysPaths
from common import CpuConfig
import devices
default_dtb = 'armv8_gem5_v1_big_little_2_2.dtb'
default_kernel = 'vmlinux4.3.aarch64'
default_disk = 'aarch64-ubuntu-trusty-headless.img'
default_rcs = 'bootscript.rcS'
default_mem_size= "2GB"
class BigCluster(devices.CpuCluster):
def __init__(self, system, num_cpus, cpu_clock,
cpu_voltage="1.0V"):
cpu_config = [ CpuConfig.get("arm_detailed"), devices.L1I, devices.L1D,
devices.WalkCache, devices.L2 ]
super(BigCluster, self).__init__(system, num_cpus, cpu_clock,
cpu_voltage, *cpu_config)
class LittleCluster(devices.CpuCluster):
def __init__(self, system, num_cpus, cpu_clock,
cpu_voltage="1.0V"):
cpu_config = [ CpuConfig.get("minor"), devices.L1I, devices.L1D,
devices.WalkCache, devices.L2 ]
super(LittleCluster, self).__init__(system, num_cpus, cpu_clock,
cpu_voltage, *cpu_config)
def createSystem(caches, kernel, bootscript, disks=[]):
sys = devices.SimpleSystem(caches, default_mem_size,
kernel=SysPaths.binary(kernel),
readfile=bootscript,
machine_type="DTOnly")
sys.mem_ctrls = SimpleMemory(range=sys._mem_range)
sys.mem_ctrls.port = sys.membus.master
sys.connect()
# Attach disk images
if disks:
def cow_disk(image_file):
image = CowDiskImage()
image.child.image_file = SysPaths.disk(image_file)
return image
sys.disk_images = [ cow_disk(f) for f in disks ]
sys.pci_vio_block = [ PciVirtIO(vio=VirtIOBlock(image=img))
for img in sys.disk_images ]
for dev in sys.pci_vio_block:
sys.attach_pci(dev)
sys.realview.setupBootLoader(sys.membus, sys, SysPaths.binary)
return sys
def main():
parser = argparse.ArgumentParser(
description="Generic ARM big.LITTLE configuration")
parser.add_argument("--restore-from", type=str, default=None,
help="Restore from checkpoint")
parser.add_argument("--dtb", type=str, default=default_dtb,
help="DTB file to load")
parser.add_argument("--kernel", type=str, default=default_kernel,
help="Linux kernel")
parser.add_argument("--disk", action="append", type=str, default=[],
help="Disks to instantiate")
parser.add_argument("--bootscript", type=str, default=default_rcs,
help="Linux bootscript")
parser.add_argument("--atomic", action="store_true", default=False,
help="Use atomic CPUs")
parser.add_argument("--kernel-init", type=str, default="/sbin/init",
help="Override init")
parser.add_argument("--big-cpus", type=int, default=1,
help="Number of big CPUs to instantiate")
parser.add_argument("--little-cpus", type=int, default=1,
help="Number of little CPUs to instantiate")
parser.add_argument("--caches", action="store_true", default=False,
help="Instantiate caches")
parser.add_argument("--last-cache-level", type=int, default=2,
help="Last level of caches (e.g. 3 for L3)")
parser.add_argument("--big-cpu-clock", type=str, default="2GHz",
help="Big CPU clock frequency")
parser.add_argument("--little-cpu-clock", type=str, default="1GHz",
help="Little CPU clock frequency")
m5.ticks.fixGlobalFrequency()
options = parser.parse_args()
kernel_cmd = [
"earlyprintk=pl011,0x1c090000",
"console=ttyAMA0",
"lpj=19988480",
"norandmaps",
"loglevel=8",
"mem=%s" % default_mem_size,
"root=/dev/vda1",
"rw",
"init=%s" % options.kernel_init,
"vmalloc=768MB",
]
root = Root(full_system=True)
disks = [default_disk] if len(options.disk) == 0 else options.disk
system = createSystem(options.caches,
options.kernel,
options.bootscript,
disks=disks)
root.system = system
system.boot_osflags = " ".join(kernel_cmd)
AtomicCluster = devices.AtomicCluster
if options.big_cpus + options.little_cpus == 0:
m5.util.panic("Empty CPU clusters")
# big cluster
if options.big_cpus > 0:
if options.atomic:
system.bigCluster = AtomicCluster(system, options.big_cpus,
options.big_cpu_clock)
else:
system.bigCluster = BigCluster(system, options.big_cpus,
options.big_cpu_clock)
mem_mode = system.bigCluster.memoryMode()
# little cluster
if options.little_cpus > 0:
if options.atomic:
system.littleCluster = AtomicCluster(system, options.little_cpus,
options.little_cpu_clock)
else:
system.littleCluster = LittleCluster(system, options.little_cpus,
options.little_cpu_clock)
mem_mode = system.littleCluster.memoryMode()
if options.big_cpus > 0 and options.little_cpus > 0:
if system.bigCluster.memoryMode() != system.littleCluster.memoryMode():
m5.util.panic("Memory mode missmatch among CPU clusters")
system.mem_mode = mem_mode
# create caches
system.addCaches(options.caches, options.last_cache_level)
if not options.caches:
if options.big_cpus > 0 and system.bigCluster.requireCaches():
m5.util.panic("Big CPU model requires caches")
if options.little_cpus > 0 and system.littleCluster.requireCaches():
m5.util.panic("Little CPU model requires caches")
# Linux device tree
system.dtb_filename = SysPaths.binary(options.dtb)
# Get and load from the chkpt or simpoint checkpoint
if options.restore_from is not None:
m5.instantiate(options.restore_from)
else:
m5.instantiate()
# start simulation (and drop checkpoints when requested)
while True:
event = m5.simulate()
exit_msg = event.getCause()
if exit_msg == "checkpoint":
print "Dropping checkpoint at tick %d" % m5.curTick()
cpt_dir = os.path.join(m5.options.outdir, "cpt.%d" % m5.curTick())
m5.checkpoint(os.path.join(cpt_dir))
print "Checkpoint done."
else:
print exit_msg, " @ ", m5.curTick()
break
sys.exit(event.getCode())
if __name__ == "__m5_main__":
main()
|
|
# -*- coding: utf-8 -*-
import time
import logging
from datetime import datetime
import furl
import pymongo
import requests
from modularodm import fields, StoredObject
from box import CredentialsV2, refresh_v2_token, BoxClientException
from framework.auth import Auth
from framework.exceptions import HTTPError
from website.addons.base import exceptions
from website.addons.base import AddonUserSettingsBase, AddonNodeSettingsBase, GuidFile
from website.addons.box import settings
from website.addons.box.utils import BoxNodeLogger
from website.addons.box.client import get_client_from_user_settings
logger = logging.getLogger(__name__)
class BoxFile(GuidFile):
"""A Box file model with a GUID. Created lazily upon viewing a
file's detail page.
"""
__indices__ = [
{
'key_or_list': [
('node', pymongo.ASCENDING),
('path', pymongo.ASCENDING),
],
'unique': True,
}
]
path = fields.StringField(required=True, index=True)
@property
def waterbutler_path(self):
if not self.path.startswith('/'):
return '/{}'.format(self.path)
return self.path
@property
def provider(self):
return 'box'
@property
def version_identifier(self):
return 'revision'
@property
def unique_identifier(self):
return self._metadata_cache['extra'].get('etag') or self._metadata_cache['version']
class BoxOAuthSettings(StoredObject):
"""
this model address the problem if we have two osf user link
to the same box user and their access token conflicts issue
"""
# Box user id, for example, "4974056"
user_id = fields.StringField(primary=True, required=True)
# Box user name this is the user's login
username = fields.StringField()
access_token = fields.StringField()
refresh_token = fields.StringField()
expires_at = fields.DateTimeField()
def fetch_access_token(self):
self.refresh_access_token()
return self.access_token
def get_credentialsv2(self):
return CredentialsV2(
self.access_token,
self.refresh_token,
settings.BOX_KEY,
settings.BOX_SECRET
)
def refresh_access_token(self, force=False):
# Ensure that most recent tokens are loaded from the database. Needed
# in case another concurrent request has already changed the tokens.
if self._is_loaded:
try:
self.reload()
except:
pass
if self._needs_refresh() or force:
token = refresh_v2_token(settings.BOX_KEY, settings.BOX_SECRET, self.refresh_token)
self.access_token = token['access_token']
self.refresh_token = token.get('refresh_token', self.refresh_token)
self.expires_at = datetime.utcfromtimestamp(time.time() + token['expires_in'])
self.save()
def revoke_access_token(self):
# if there is only one osf user linked to this box user oauth, revoke the token,
# otherwise, disconnect the osf user from the boxoauthsettings
if len(self.boxusersettings__accessed) <= 1:
url = furl.furl('https://www.box.com/api/oauth2/revoke/')
url.args = {
'token': self.access_token,
'client_id': settings.BOX_KEY,
'client_secret': settings.BOX_SECRET,
}
# no need to fail, revoke is opportunistic
requests.post(url.url)
# remove the object as its the last instance.
BoxOAuthSettings.remove_one(self)
def _needs_refresh(self):
if self.expires_at is None:
return False
return (self.expires_at - datetime.utcnow()).total_seconds() < settings.REFRESH_TIME
class BoxUserSettings(AddonUserSettingsBase):
"""Stores user-specific box information, including the Oauth access
token.
"""
oauth_settings = fields.ForeignField(
'boxoauthsettings', backref='accessed'
)
@property
def user_id(self):
if self.oauth_settings:
return self.oauth_settings.user_id
return None
@user_id.setter
def user_id(self, val):
self.oauth_settings.user_id = val
@property
def username(self):
if self.oauth_settings:
return self.oauth_settings.username
return None
@username.setter
def username(self, val):
self.oauth_settings.name = val
@property
def access_token(self):
if self.oauth_settings:
return self.oauth_settings.access_token
return None
@access_token.setter
def access_token(self, val):
self.oauth_settings.access_token = val
@property
def refresh_token(self):
if self.oauth_settings:
return self.oauth_settings.refresh_token
return None
@refresh_token.setter
def refresh_token(self, val):
self.oauth_settings.refresh_token = val
@property
def expires_at(self):
if self.oauth_settings:
return self.oauth_settings.expires_at
return None
@expires_at.setter
def expires_at(self, val):
self.oauth_settings.expires_at = val
@property
def has_auth(self):
if self.oauth_settings:
return self.oauth_settings.access_token is not None
return False
def fetch_access_token(self):
if self.oauth_settings:
return self.oauth_settings.fetch_access_token()
return None
def delete(self, save=True):
self.clear()
super(BoxUserSettings, self).delete(save)
def clear(self):
"""Clear settings and deauthorize any associated nodes."""
if self.oauth_settings:
self.oauth_settings.revoke_access_token()
self.oauth_settings = None
self.save()
for node_settings in self.boxnodesettings__authorized:
node_settings.deauthorize(Auth(self.owner))
node_settings.save()
def get_credentialsv2(self):
if not self.has_auth:
return None
return self.oauth_settings.get_credentialsv2()
def save(self, *args, **kwargs):
if self.oauth_settings:
self.oauth_settings.save()
return super(BoxUserSettings, self).save(*args, **kwargs)
def __repr__(self):
return u'<BoxUserSettings(user={self.owner.username!r})>'.format(self=self)
class BoxNodeSettings(AddonNodeSettingsBase):
user_settings = fields.ForeignField(
'boxusersettings', backref='authorized'
)
folder_id = fields.StringField(default=None)
folder_name = fields.StringField()
folder_path = fields.StringField()
_folder_data = None
@property
def display_name(self):
return '{0}: {1}'.format(self.config.full_name, self.folder_id)
@property
def has_auth(self):
"""Whether an access token is associated with this node."""
return bool(self.user_settings and self.user_settings.has_auth)
@property
def complete(self):
return self.has_auth and self.folder_id is not None
def fetch_folder_name(self):
self._update_folder_data()
return self.folder_name
def fetch_full_folder_path(self):
self._update_folder_data()
return self.folder_path
def _update_folder_data(self):
if self.folder_id is None:
return None
if not self._folder_data:
try:
client = get_client_from_user_settings(self.user_settings)
self._folder_data = client.get_folder(self.folder_id)
except BoxClientException:
return
self.folder_name = self._folder_data['name']
self.folder_path = '/'.join(
[x['name'] for x in self._folder_data['path_collection']['entries']]
+ [self.fetch_folder_name()]
)
self.save()
def set_folder(self, folder_id, auth):
self.folder_id = str(folder_id)
self._update_folder_data()
self.save()
# Add log to node
nodelogger = BoxNodeLogger(node=self.owner, auth=auth)
nodelogger.log(action="folder_selected", save=True)
def set_user_auth(self, user_settings):
"""Import a user's Box authentication and create a NodeLog.
:param BoxUserSettings user_settings: The user settings to link.
"""
self.user_settings = user_settings
nodelogger = BoxNodeLogger(node=self.owner, auth=Auth(user_settings.owner))
nodelogger.log(action="node_authorized", save=True)
def find_or_create_file_guid(self, path):
return BoxFile.get_or_create(node=self.owner, path=path)
# TODO: Is this used? If not, remove this and perhaps remove the 'deleted' field
def delete(self, save=True):
self.deauthorize(add_log=False)
super(BoxNodeSettings, self).delete(save)
def deauthorize(self, auth=None, add_log=True):
"""Remove user authorization from this node and log the event."""
node = self.owner
if add_log:
extra = {'folder_id': self.folder_id}
nodelogger = BoxNodeLogger(node=node, auth=auth)
nodelogger.log(action="node_deauthorized", extra=extra, save=True)
self.folder_id = None
self._update_folder_data()
self.user_settings = None
self.save()
def serialize_waterbutler_credentials(self):
if not self.has_auth:
raise exceptions.AddonError('Addon is not authorized')
try:
return {'token': self.user_settings.fetch_access_token()}
except BoxClientException as error:
raise HTTPError(error.status_code, data={'message_long': error.message})
def serialize_waterbutler_settings(self):
if self.folder_id is None:
raise exceptions.AddonError('Folder is not configured')
return {'folder': self.folder_id}
def create_waterbutler_log(self, auth, action, metadata):
self.owner.add_log(
'box_{0}'.format(action),
auth=auth,
params={
'path': metadata['materialized'],
'project': self.owner.parent_id,
'node': self.owner._id,
'folder': self.folder_id,
'urls': {
'view': self.owner.web_url_for('addon_view_or_download_file', provider='box', action='view', path=metadata['path']),
'download': self.owner.web_url_for('addon_view_or_download_file', provider='box', action='download', path=metadata['path']),
},
},
)
##### Callback overrides #####
def before_register_message(self, node, user):
"""Return warning text to display if user auth will be copied to a
registration.
"""
category = node.project_or_component
if self.user_settings and self.user_settings.has_auth:
return (
u'The contents of Box add-ons cannot be registered at this time; '
u'the Box folder linked to this {category} will not be included '
u'as part of this registration.'
).format(**locals())
# backwards compatibility
before_register = before_register_message
def before_fork_message(self, node, user):
"""Return warning text to display if user auth will be copied to a
fork.
"""
category = node.project_or_component
if self.user_settings and self.user_settings.owner == user:
return (
u'Because you have authorized the Box add-on for this '
'{category}, forking it will also transfer your authentication token to '
'the forked {category}.'
).format(category=category)
else:
return (
u'Because the Box add-on has been authorized by a different '
'user, forking it will not transfer authentication token to the forked '
'{category}.'
).format(category=category)
# backwards compatibility
before_fork = before_fork_message
def before_remove_contributor_message(self, node, removed):
"""Return warning text to display if removed contributor is the user
who authorized the Box addon
"""
if self.user_settings and self.user_settings.owner == removed:
category = node.project_or_component
name = removed.fullname
return (
u'The Box add-on for this {category} is authenticated by {name}. '
'Removing this user will also remove write access to Box '
'unless another contributor re-authenticates the add-on.'
).format(**locals())
# backwards compatibility
before_remove_contributor = before_remove_contributor_message
def after_fork(self, node, fork, user, save=True):
"""After forking, copy user settings if the user is the one who authorized
the addon.
:return: A tuple of the form (cloned_settings, message)
"""
clone, _ = super(BoxNodeSettings, self).after_fork(
node=node, fork=fork, user=user, save=False
)
if self.user_settings and self.user_settings.owner == user:
clone.user_settings = self.user_settings
message = (
'Box authorization copied to forked {cat}.'
).format(cat=fork.project_or_component)
else:
message = (
u'Box authorization not copied to forked {cat}. You may '
'authorize this fork on the <a href="{url}">Settings</a> '
'page.'
).format(
url=fork.web_url_for('node_setting'),
cat=fork.project_or_component
)
if save:
clone.save()
return clone, message
def after_remove_contributor(self, node, removed, auth=None):
"""If the removed contributor was the user who authorized the Box
addon, remove the auth credentials from this node.
Return the message text that will be displayed to the user.
"""
if self.user_settings and self.user_settings.owner == removed:
# Delete OAuth tokens
self.user_settings = None
self.save()
message = (
u'Because the Box add-on for {category} "{title}" was authenticated '
u'by {user}, authentication information has been deleted.'
).format(
category=node.category_display,
title=node.title,
user=removed.fullname
)
if not auth or auth.user != removed:
url = node.web_url_for('node_setting')
message += (
u' You can re-authenticate on the <a href="{url}">Settings</a> page.'
).format(url=url)
#
return message
def after_delete(self, node, user):
self.deauthorize(Auth(user=user), add_log=True)
self.save()
|
|
import re
import logging
from indra.resources import load_resource_json
logger = logging.getLogger(__name__)
identifiers_url = 'https://identifiers.org'
# These are just special cases of name spaces where the mapping from INDRA to
# identifiers.org is not a question of simplecapitalization.
identifiers_mappings = {
'UP': 'uniprot',
'UPPRO': 'uniprot.chain',
'UPISO': 'uniprot.isoform',
'REFSEQ_PROT': 'refseq',
'PF': 'pfam',
'IP': 'interpro',
'ECCODE': 'ec-code',
'NONCODE': 'noncodev4.rna',
'LNCRNADB': 'rnacentral',
'MIRBASEM': 'mirbase.mature',
'EGID': 'ncbigene',
'NCBI': 'ncibgene',
'HGNC_GROUP': 'hgnc.genefamily',
'LINCS': 'lincs.smallmolecule',
'PUBCHEM': 'pubchem.compound',
'CHEMBL': 'chembl.compound',
'CTD': 'ctd.chemical',
'CVCL': 'cellosaurus',
}
# These are namespaces used by INDRA that don't have corresponding
# identifiers.org entries
non_registry = {
'SDIS', 'SCHEM', 'SFAM', 'SCOMP', 'HMS-LINCS', 'NXPFA',
'OMIM', 'LSPCI', 'UPLOC', 'BFO', 'CCLE', 'CLO', 'GENBANK',
'DRUGBANK.SALT', 'SMILES',
}
# These are namespaces that can appear in db_refs but are actually not
# representing grounding.
non_grounding = {
'TEXT', 'TEXT_NORM'
}
# These are reverse mappings from identifiers.org namespaces to INDRA
# namespaces
identifiers_reverse = {
v: k for k, v in identifiers_mappings.items()
}
# We have to patch this one because it is ambiguous
identifiers_reverse['ncbigene'] = 'EGID'
# These are only the URLs that are strictly prefixes and not more complicated
# patterns. This is because some downstream code uses these as prefixes
# rather than arbitrary patterns.
url_prefixes = {
# Biology namespaces
'NXPFA': 'https://www.nextprot.org/term/FA-',
'SIGNOR': 'https://signor.uniroma2.it/relation_result.php?id=',
'LSPCI': 'https://labsyspharm.github.io/lspci/',
# WM namespaces
'UN': 'https://github.com/clulab/eidos/wiki/JSON-LD#Grounding/',
'WDI': 'https://github.com/clulab/eidos/wiki/JSON-LD#Grounding/',
'FAO': 'https://github.com/clulab/eidos/wiki/JSON-LD#Grounding/',
'HUME': ('https://github.com/BBN-E/Hume/blob/master/resource/ontologies'
'/hume_ontology/'),
'CWMS': 'http://trips.ihmc.us/',
'SOFIA': 'http://cs.cmu.edu/sofia/',
}
def get_ns_from_identifiers(identifiers_ns):
""""Return a namespace compatible with INDRA from an identifiers namespace.
For example, this function can be used to map 'uniprot' to 'UP'.
Parameters
----------
identifiers_ns : str
An identifiers.org standard namespace.
Returns
-------
str or None
The namespace compatible with INDRA's internal representation or
None if the given namespace isn't an identifiers.org standard.
"""
reg_entry = identifiers_registry.get(identifiers_ns.lower())
if not reg_entry:
return None
mapping = identifiers_reverse.get(identifiers_ns.lower())
if mapping:
return mapping
else:
return identifiers_ns.upper()
def get_ns_id_from_identifiers(identifiers_ns, identifiers_id):
"""Return a namespace/ID pair compatible with INDRA from identifiers.
Parameters
----------
identifiers_ns : str
An identifiers.org standard namespace.
identifiers_id : str
An identifiers.org standard ID in the given namespace.
Returns
-------
(str, str)
A namespace and ID that are valid in INDRA db_refs.
"""
reg_entry = identifiers_registry.get(identifiers_ns.lower())
db_ns = get_ns_from_identifiers(identifiers_ns)
if db_ns is None:
return None, None
db_id = identifiers_id
if reg_entry['namespace_embedded']:
if not identifiers_id.startswith(identifiers_ns.upper()):
db_id = '%s:%s' % (identifiers_ns.upper(), identifiers_id)
return db_ns, db_id
def get_identifiers_ns(db_name):
"""Map an INDRA namespace to an identifiers.org namespace when possible.
Example: this can be used to map 'UP' to 'uniprot'.
Parameters
----------
db_name : str
An INDRA namespace to map to identifiers.org
Returns
-------
str or None
An identifiers.org namespace or None if not available.
"""
mapped_db_name = identifiers_mappings.get(db_name, db_name.lower())
if mapped_db_name not in identifiers_registry:
return None
return mapped_db_name
def get_url_prefix(db_name):
"""Return the URL prefix for a given namespace."""
identifiers_ns = get_identifiers_ns(db_name)
if identifiers_ns:
identifiers_entry = identifiers_registry.get(identifiers_ns)
if not identifiers_entry['namespace_embedded']:
return '%s/%s:' % (identifiers_url, identifiers_ns.lower())
else:
return '%s/' % identifiers_url
else:
if db_name in url_prefixes:
return url_prefixes[db_name]
return None
def get_identifiers_url(db_name, db_id):
"""Return an identifiers.org URL for a given database name and ID.
Parameters
----------
db_name : str
An internal database name: HGNC, UP, CHEBI, etc.
db_id : str
An identifier in the given database.
Returns
-------
url : str
An identifiers.org URL corresponding to the given database name and ID.
"""
# This is the case where we have a prefix that we can simply attach the
# db_id to to get the desired URL.
if db_name == 'CHEMBL':
db_id = ensure_chembl_prefix(db_id)
elif db_name == 'CHEBI':
db_id = ensure_chebi_prefix(db_id)
prefix = get_url_prefix(db_name)
if prefix:
return '%s%s' % (prefix, db_id)
# Otherwise, we have to handle some special cases
bel_scai_url = 'https://arty.scai.fraunhofer.de/artifactory/bel/namespace/'
if db_name == 'LINCS':
if db_id.startswith('LSM-'): # Lincs Small Molecule ID
url = identifiers_url + '/lincs.smallmolecule:%s' % db_id
elif db_id.startswith('LCL-'): # Lincs Cell Line ID
url = identifiers_url + '/lincs.cell:%s' % db_id
else: # Assume LINCS Protein
url = identifiers_url + '/lincs.protein:%s' % db_id
elif db_name == 'CHEMBL':
if not db_id.startswith('CHEMBL'):
db_id = 'CHEMBL%s' % db_id
url = identifiers_url + '/chembl.compound:%s' % db_id
elif db_name == 'HMS-LINCS':
url = 'http://lincs.hms.harvard.edu/db/sm/%s-101' % db_id
# Special cases with no identifiers entry
elif db_name == 'SCHEM':
url = bel_scai_url + 'selventa-legacy-chemicals/' + \
'selventa-legacy-chemicals-20150601.belns'
elif db_name == 'SCOMP':
url = bel_scai_url + 'selventa-named-complexes/' + \
'selventa-named-complexes-20150601.belns'
elif db_name == 'SFAM':
url = bel_scai_url + 'selventa-protein-families/' + \
'selventa-protein-families-20150601.belns'
elif db_name == 'TEXT' or db_name == 'TEXT_NORM':
return None
else:
logger.warning('Unhandled name space %s' % db_name)
url = None
return url
def parse_identifiers_url(url):
"""Retrieve database name and ID given the URL.
Parameters
----------
url : str
An identifiers.org URL to parse.
Returns
-------
db_name : str
An internal database name: HGNC, UP, CHEBI, etc. corresponding to the
given URL.
db_id : str
An identifier in the database.
"""
# Try matching by string pattern
db_ns, db_id = None, None
url_pattern = \
r'(?:https?)://identifiers.org/([A-Za-z0-9.-]+)(/|:)([A-Za-z0-9:_.-]+)'
match = re.match(url_pattern, url)
if match is not None:
g = match.groups()
if len(g) == 3:
pattern_ns, pattern_id = g[0], g[2]
db_ns, db_id = get_ns_id_from_identifiers(pattern_ns, pattern_id)
if db_ns == 'HGNC':
if db_id.startswith('HGNC:'):
db_id = db_id[5:]
# If we got UP and UPPRO, return UPPRO
if db_ns == 'UP' and '#PRO_' in url:
db_ns = 'UPPRO'
db_id = url[url.find('PRO_'):]
if db_ns and db_id:
return db_ns, db_id
for ns, prefix in url_prefixes.items():
if url.startswith(prefix):
return ns, url[len(prefix):]
# Handle other special cases
for part in ['/lincs.smallmolecule', '/lincs.cell', '/lincs.protein']:
if part in url:
return 'LINCS', url[(url.find(part) + len(part) + 1):]
if '/chembl.compound' in url:
return 'CHEMBL', url[
(url.find('/chembl.compound') + len('/chembl.compound:')):]
if 'lincs.hms.harvard.edu' in url:
return 'HMS-LINCS', url[len('http://lincs.hms.harvard.edu/db/sm/'):-4]
if 'selventa-legacy-chemicals/' in url:
return 'SCHEM', None
if 'selventa-named-complexes/' in url:
return 'SCOMP', None
if 'selventa-protein-families/' in url:
return 'SFAM', None
else:
logger.warning('Could not parse URL %s' % url)
return None, None
def namespace_embedded(db_ns: str) -> bool:
"""Return true if this namespace requires IDs to have namespace embedded.
This function first maps the given namespace to an identifiers.org
namespace and then checks the registry to see if namespaces need
to be embedded in IDs. If yes, it returns True. If not, or the ID can't
be mapped to identifiers.org, it returns False
Parameters
----------
db_ns :
The namespace to check.
Returns
-------
:
True if the namespace is known to be embedded in IDs of this
namespace. False if unknown or known not to be embedded.
"""
identifiers_ns = get_identifiers_ns(db_ns)
if identifiers_ns:
identifiers_entry = identifiers_registry.get(identifiers_ns)
if identifiers_entry['namespace_embedded']:
return True
return False
def ensure_prefix_if_needed(db_ns: str, db_id: str) -> str:
"""Return an ID ensuring a namespace prefix if known to be needed.
Parameters
----------
db_ns :
The namespace associated with the identifier.
db_id :
The original identifier.
Returns
-------
:
The identifier with namespace embedded if needed.
"""
if namespace_embedded(db_ns):
return ensure_prefix(db_ns, db_id)
return db_id
def ensure_prefix(db_ns, db_id, with_colon=True):
"""Return a valid ID that has the given namespace embedded.
This is useful for namespaces such as CHEBI, GO or BTO that require
the namespace to be part of the ID. Note that this function always
ensures that the given db_ns is embedded in the ID and can handle the
case whene it's already present.
Parameters
----------
db_ns : str
A namespace.
db_id : str
An ID within that namespace which should have the namespace
as a prefix in it.
with_colon: Optional[bool]
If True, the namespace prefix is followed by a colon in the ID (e.g.,
CHEBI:12345). Otherwise, no colon is added (e.g., CHEMBL1234).
Default: True
"""
if db_id is None:
return None
colon = ':' if with_colon else ''
if not db_id.startswith(f'{db_ns}{colon}'):
return f'{db_ns}{colon}{db_id}'
return db_id
def ensure_chebi_prefix(chebi_id):
"""Return a valid CHEBI ID that has the appropriate CHEBI: prefix."""
return ensure_prefix('CHEBI', chebi_id)
def ensure_chembl_prefix(chembl_id):
"""Return a valid CHEMBL ID that has the appropriate CHEMBL prefix."""
return ensure_prefix('CHEMBL', chembl_id, with_colon=False)
def _load_identifiers_registry():
identifiers_registry = load_resource_json('identifiers_patterns.json')
# Override pattern otherwise patterns like 1.1 can't be used
identifiers_registry['ec-code']['pattern'] = '^\\d{1,2}(\\.\\d{0,3}){0,3}$'
identifiers_registry['mondo'] = {
"pattern": "^\\d+$",
"namespace_embedded": False,
}
for value in identifiers_registry.values():
value["pattern_compiled"] = re.compile(value["pattern"])
return identifiers_registry
identifiers_registry = _load_identifiers_registry()
|
|
#-*- coding: utf-8 -*-
from __future__ import division
import os
import time
import tensorflow as tf
import numpy as np
from ops import *
from utils import *
class BEGAN(object):
model_name = "BEGAN" # name for checkpoint
def __init__(self, sess, epoch, batch_size, z_dim, dataset_name, checkpoint_dir, result_dir, log_dir):
self.sess = sess
self.dataset_name = dataset_name
self.checkpoint_dir = checkpoint_dir
self.result_dir = result_dir
self.log_dir = log_dir
self.epoch = epoch
self.batch_size = batch_size
if dataset_name == 'mnist' or dataset_name == 'fashion-mnist':
# parameters
self.input_height = 28
self.input_width = 28
self.output_height = 28
self.output_width = 28
self.z_dim = z_dim # dimension of noise-vector
self.c_dim = 1
# BEGAN Parameter
self.gamma = 0.75
self.lamda = 0.001
# train
self.learning_rate = 0.0002
self.beta1 = 0.5
# test
self.sample_num = 64 # number of generated images to be saved
# load mnist
self.data_X, self.data_y = load_mnist(self.dataset_name)
# get number of batches for a single epoch
self.num_batches = len(self.data_X) // self.batch_size
else:
raise NotImplementedError
def discriminator(self, x, is_training=True, reuse=False):
# It must be Auto-Encoder style architecture
# Architecture : (64)4c2s-FC32_BR-FC64*14*14_BR-(1)4dc2s_S
with tf.variable_scope("discriminator", reuse=reuse):
net = tf.nn.relu(conv2d(x, 64, 4, 4, 2, 2, name='d_conv1'))
net = tf.reshape(net, [self.batch_size, -1])
code = tf.nn.relu(bn(linear(net, 32, scope='d_fc6'), is_training=is_training, scope='d_bn6'))
net = tf.nn.relu(bn(linear(code, 64 * 14 * 14, scope='d_fc3'), is_training=is_training, scope='d_bn3'))
net = tf.reshape(net, [self.batch_size, 14, 14, 64])
out = tf.nn.sigmoid(deconv2d(net, [self.batch_size, 28, 28, 1], 4, 4, 2, 2, name='d_dc5'))
# recon loss
recon_error = tf.sqrt(2 * tf.nn.l2_loss(out - x)) / self.batch_size
return out, recon_error, code
def generator(self, z, is_training=True, reuse=False):
# Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657)
# Architecture : FC1024_BR-FC7x7x128_BR-(64)4dc2s_BR-(1)4dc2s_S
with tf.variable_scope("generator", reuse=reuse):
net = tf.nn.relu(bn(linear(z, 1024, scope='g_fc1'), is_training=is_training, scope='g_bn1'))
net = tf.nn.relu(bn(linear(net, 128 * 7 * 7, scope='g_fc2'), is_training=is_training, scope='g_bn2'))
net = tf.reshape(net, [self.batch_size, 7, 7, 128])
net = tf.nn.relu(
bn(deconv2d(net, [self.batch_size, 14, 14, 64], 4, 4, 2, 2, name='g_dc3'), is_training=is_training,
scope='g_bn3'))
out = tf.nn.sigmoid(deconv2d(net, [self.batch_size, 28, 28, 1], 4, 4, 2, 2, name='g_dc4'))
return out
def build_model(self):
# some parameters
image_dims = [self.input_height, self.input_width, self.c_dim]
bs = self.batch_size
""" BEGAN variable """
self.k = tf.Variable(0., trainable=False)
""" Graph Input """
# images
self.inputs = tf.placeholder(tf.float32, [bs] + image_dims, name='real_images')
# noises
self.z = tf.placeholder(tf.float32, [bs, self.z_dim], name='z')
""" Loss Function """
# output of D for real images
D_real_img, D_real_err, D_real_code = self.discriminator(self.inputs, is_training=True, reuse=False)
# output of D for fake images
G = self.generator(self.z, is_training=True, reuse=False)
D_fake_img, D_fake_err, D_fake_code = self.discriminator(G, is_training=True, reuse=True)
# get loss for discriminator
self.d_loss = D_real_err - self.k*D_fake_err
# get loss for generator
self.g_loss = D_fake_err
# convergence metric
self.M = D_real_err + tf.abs(self.gamma*D_real_err - D_fake_err)
# operation for updating k
self.update_k = self.k.assign(
tf.clip_by_value(self.k + self.lamda*(self.gamma*D_real_err - D_fake_err), 0, 1))
""" Training """
# divide trainable variables into a group for D and a group for G
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'd_' in var.name]
g_vars = [var for var in t_vars if 'g_' in var.name]
# optimizers
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self.d_optim = tf.train.AdamOptimizer(self.learning_rate, beta1=self.beta1) \
.minimize(self.d_loss, var_list=d_vars)
self.g_optim = tf.train.AdamOptimizer(self.learning_rate*5, beta1=self.beta1) \
.minimize(self.g_loss, var_list=g_vars)
"""" Testing """
# for test
self.fake_images = self.generator(self.z, is_training=False, reuse=True)
""" Summary """
d_loss_real_sum = tf.summary.scalar("d_error_real", D_real_err)
d_loss_fake_sum = tf.summary.scalar("d_error_fake", D_fake_err)
d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
M_sum = tf.summary.scalar("M", self.M)
k_sum = tf.summary.scalar("k", self.k)
# final summary operations
self.g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum])
self.d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum])
self.p_sum = tf.summary.merge([M_sum, k_sum])
def train(self):
# initialize all variables
tf.global_variables_initializer().run()
# graph inputs for visualize training results
self.sample_z = np.random.uniform(-1, 1, size=(self.batch_size , self.z_dim))
# saver to save model
self.saver = tf.train.Saver()
# summary writer
self.writer = tf.summary.FileWriter(self.log_dir + '/' + self.model_name, self.sess.graph)
# restore check-point if it exits
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
start_epoch = (int)(checkpoint_counter / self.num_batches)
start_batch_id = checkpoint_counter - start_epoch * self.num_batches
counter = checkpoint_counter
print(" [*] Load SUCCESS")
else:
start_epoch = 0
start_batch_id = 0
counter = 1
print(" [!] Load failed...")
# loop for epoch
start_time = time.time()
for epoch in range(start_epoch, self.epoch):
# get batch data
for idx in range(start_batch_id, self.num_batches):
batch_images = self.data_X[idx*self.batch_size:(idx+1)*self.batch_size]
batch_z = np.random.uniform(-1, 1, [self.batch_size, self.z_dim]).astype(np.float32)
# update D network
_, summary_str, d_loss = self.sess.run([self.d_optim, self.d_sum, self.d_loss],
feed_dict={self.inputs: batch_images, self.z: batch_z})
self.writer.add_summary(summary_str, counter)
# update G network
_, summary_str, g_loss = self.sess.run([self.g_optim, self.g_sum, self.g_loss], feed_dict={self.z: batch_z})
self.writer.add_summary(summary_str, counter)
# update k
_, summary_str, M_value, k_value = self.sess.run([self.update_k, self.p_sum, self.M, self.k], feed_dict={self.inputs: batch_images, self.z: batch_z})
self.writer.add_summary(summary_str, counter)
# display training status
counter += 1
print("Epoch: [%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f, M: %.8f, k: %.8f" \
% (epoch, idx, self.num_batches, time.time() - start_time, d_loss, g_loss, M_value, k_value))
# save training results for every 300 steps
if np.mod(counter, 300) == 0:
samples = self.sess.run(self.fake_images, feed_dict={self.z: self.sample_z})
tot_num_samples = min(self.sample_num, self.batch_size)
manifold_h = int(np.floor(np.sqrt(tot_num_samples)))
manifold_w = int(np.floor(np.sqrt(tot_num_samples)))
save_images(samples[:manifold_h * manifold_w, :, :, :], [manifold_h, manifold_w],
'./' + check_folder(self.result_dir + '/' + self.model_dir) + '/' + self.model_name + '_train_{:02d}_{:04d}.png'.format(
epoch, idx))
# After an epoch, start_batch_id is set to zero
# non-zero value is only for the first epoch after loading pre-trained model
start_batch_id = 0
# save model
self.save(self.checkpoint_dir, counter)
# show temporal results
self.visualize_results(epoch)
# save model for final step
self.save(self.checkpoint_dir, counter)
def visualize_results(self, epoch):
tot_num_samples = min(self.sample_num, self.batch_size)
image_frame_dim = int(np.floor(np.sqrt(tot_num_samples)))
""" random condition, random noise """
z_sample = np.random.uniform(-1, 1, size=(self.batch_size, self.z_dim))
samples = self.sess.run(self.fake_images, feed_dict={self.z: z_sample})
save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
check_folder(self.result_dir + '/' + self.model_dir) + '/' + self.model_name + '_epoch%03d' % epoch + '_test_all_classes.png')
@property
def model_dir(self):
return "{}_{}_{}_{}".format(
self.model_name, self.dataset_name,
self.batch_size, self.z_dim)
def save(self, checkpoint_dir, step):
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir, self.model_name)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,os.path.join(checkpoint_dir, self.model_name+'.model'), global_step=step)
def load(self, checkpoint_dir):
import re
print(" [*] Reading checkpoints...")
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir, self.model_name)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(next(re.finditer("(\d+)(?!.*\d)",ckpt_name)).group(0))
print(" [*] Success to read {}".format(ckpt_name))
return True, counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
|
|
#!/usr/bin/python
# coding=utf-8
# Copyright 2012-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
import random
import socket
import subprocess
import sys
import tempfile
sys.path.insert(0, '../')
import mongo_orchestration.process as process
from mongo_orchestration.errors import TimeoutError
from tests import unittest, SkipTest, HOSTNAME
class PortPoolTestCase(unittest.TestCase):
def setUp(self):
self.hostname = HOSTNAME
self.pp = process.PortPool()
self.pp.change_range(min_port=1025, max_port=1080)
self.sockets = {}
def tearDown(self):
for s in self.sockets:
self.sockets[s].close()
def listen_port(self, port, max_connection=0):
if self.sockets.get(port, None):
self.sockets[port].close()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOSTNAME, port))
s.listen(max_connection)
self.sockets[port] = s
def test_singleton(self):
pp2 = process.PortPool(min_port=1025, max_port=1038)
self.assertEqual(id(self.pp), id(pp2))
def test_port_sequence(self):
ports = set([1025, 1026, 1027, 1028, 30, 28, 22, 45])
self.pp.change_range(port_sequence=ports)
_ports = self.pp._PortPool__closed.union(self.pp._PortPool__ports)
self.assertEqual(ports, _ports)
def test_find_port(self):
port = self.pp.port()
self.pp.change_range(port, port)
port = self.pp.port()
self.assertTrue(port > 0)
self.listen_port(port)
self.assertRaises(IndexError, self.pp.port)
def test_port_with_check(self):
self.pp.change_range(min_port=1100, max_port=1200)
port1, port2 = self.pp.port(check=True), self.pp.port(check=True)
self.pp.change_range(port_sequence=[port1, port2])
self.listen_port(port1, 0)
self.assertTrue(port2 == self.pp.port(check=True))
def test_check_port(self):
port = self.pp.port(check=True)
self.assertTrue(self.pp._PortPool__check_port(port))
self.listen_port(port)
self.assertFalse(self.pp._PortPool__check_port(port))
def test_release_port(self):
port = self.pp.port(check=True)
self.assertTrue(port in self.pp._PortPool__closed)
self.pp.release_port(port)
self.assertFalse(port in self.pp._PortPool__closed)
def test_refresh(self):
ports = set([random.randint(1025, 2000) for i in range(15)])
self.pp.change_range(port_sequence=ports)
ports_opened = self.pp._PortPool__ports.copy()
test_port = ports_opened.pop()
self.assertTrue(test_port in self.pp._PortPool__ports)
self.assertTrue(len(self.pp._PortPool__ports) > 1)
for port in ports:
if port != test_port:
try:
self.listen_port(port)
except (socket.error):
pass
self.pp.refresh()
self.assertTrue(len(self.pp._PortPool__ports) == 1)
def test_refresh_only_closed(self):
ports = set([random.randint(1025, 2000) for _ in range(15)])
self.pp.change_range(port_sequence=ports)
closed_num = len(self.pp._PortPool__closed)
self.pp.port(), self.pp.port()
self.assertTrue(closed_num + 2 == len(self.pp._PortPool__closed))
ports_opened = self.pp._PortPool__ports.copy()
test_port = ports_opened.pop()
self.listen_port(test_port)
self.pp.refresh(only_closed=True)
self.assertTrue(closed_num == len(self.pp._PortPool__closed))
self.pp.refresh()
self.assertTrue(closed_num + 1 == len(self.pp._PortPool__closed))
def test_change_range(self):
self.pp.change_range(min_port=1025, max_port=1033)
ports = self.pp._PortPool__closed.union(self.pp._PortPool__ports)
self.assertTrue(ports == set(range(1025, 1033 + 1)))
random_ports = set([random.randint(1025, 2000) for i in range(15)])
self.pp.change_range(port_sequence=random_ports)
ports = self.pp._PortPool__closed.union(self.pp._PortPool__ports)
self.assertTrue(ports == random_ports)
class ProcessTestCase(unittest.TestCase):
def setUp(self):
self.hostname = HOSTNAME
self.s = None
self.executable = sys.executable
self.pp = process.PortPool(min_port=1025, max_port=2000)
self.sockets = {}
self.tmp_files = list()
self.bin_path = os.path.join(os.environ.get('MONGOBIN', ''), 'mongod')
self.db_path = tempfile.mkdtemp()
self.cfg = {"oplogSize": 10, 'dbpath': self.db_path}
def tearDown(self):
for s in self.sockets:
self.sockets[s].close()
if self.cfg:
process.cleanup_mprocess('', self.cfg)
for item in self.tmp_files:
if os.path.exists(item):
os.remove(item)
def listen_port(self, port, max_connection=0):
if self.sockets.get(port, None):
self.sockets[port].close()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOSTNAME, port))
s.listen(max_connection)
self.sockets[port] = s
def test_wait_for(self):
port = self.pp.port(check=True)
self.listen_port(port, max_connection=1)
self.assertTrue(process.wait_for(port, 1))
self.sockets.pop(port).close()
self.assertFalse(process.wait_for(port, 1))
def test_repair(self):
port = self.pp.port(check=True)
# Assume we're testing on 64-bit machines.
self.cfg['nojournal'] = True
lock_file = os.path.join(self.cfg['dbpath'], 'mongod.lock')
config_path = process.write_config(self.cfg)
self.tmp_files.append(config_path)
proc, host = process.mprocess(self.bin_path, config_path, port=port, timeout=60)
self.assertTrue(os.path.exists(lock_file))
if platform.system() == 'Windows':
# mongod.lock cannot be read by any external process on Windows.
with self.assertRaises(IOError):
open(lock_file, 'r')
else:
with open(lock_file, 'r') as fd:
self.assertGreater(len(fd.read()), 0)
proc.terminate()
proc.communicate()
process.repair_mongo(self.bin_path, self.cfg['dbpath'])
with open(lock_file, 'r') as fd:
contents = fd.read()
self.assertEqual(len(contents), 0,
"lock_file contains: " + contents)
def test_mprocess_fail(self):
fd_cfg, config_path = tempfile.mkstemp()
os.close(fd_cfg)
self.tmp_files.append(config_path)
self.assertRaises(OSError, process.mprocess,
'fake-process_', config_path, None, 30)
process.write_config({"fake": True}, config_path)
self.assertRaises(TimeoutError, process.mprocess,
self.bin_path, config_path, None, 30)
def test_mprocess(self):
port = self.pp.port(check=True)
config_path = process.write_config(self.cfg)
self.tmp_files.append(config_path)
result = process.mprocess(self.bin_path, config_path, port=port)
self.assertTrue(isinstance(result, tuple))
proc, host = result
self.assertTrue(isinstance(proc, subprocess.Popen))
self.assertTrue(isinstance(host, str))
process.kill_mprocess(proc)
def test_mprocess_timeout(self):
port = self.pp.port()
cfg = self.cfg.copy()
cfg['journal'] = True
config_path = process.write_config(cfg)
self.tmp_files.append(config_path)
proc, host = process.mprocess(self.bin_path, config_path, port, 0)
self.assertTrue(isinstance(proc, subprocess.Popen))
self.assertTrue(isinstance(host, str))
process.kill_mprocess(proc)
if platform.system() == 'Windows':
raise SkipTest("Cannot test mongod startup timeout on Windows.")
with self.assertRaises(TimeoutError):
result = process.mprocess(self.bin_path, config_path, port, 0.1)
print(result)
def test_mprocess_busy_port(self):
config_path = process.write_config(self.cfg)
self.tmp_files.append(config_path)
port = self.pp.port()
self.listen_port(port, max_connection=0)
proc, host = process.mprocess(self.executable, config_path,
port=port, timeout=2)
self.assertTrue(proc.pid > 0)
self.assertEqual(host, self.hostname + ':' + str(port))
self.sockets.pop(port).close()
self.assertRaises(OSError, process.mprocess,
self.executable, '', port, 1)
def test_kill_mprocess(self):
p = subprocess.Popen([self.executable])
self.assertTrue(process.proc_alive(p))
process.kill_mprocess(p)
self.assertFalse(process.proc_alive(p))
def test_cleanup_process(self):
fd_cfg, config_path = tempfile.mkstemp()
fd_key, key_file = tempfile.mkstemp()
fd_log, log_path = tempfile.mkstemp()
db_path = tempfile.mkdtemp()
self.assertTrue(os.path.exists(config_path))
self.assertTrue(os.path.exists(key_file))
self.assertTrue(os.path.exists(log_path))
self.assertTrue(os.path.exists(db_path))
with os.fdopen(fd_cfg, 'w') as fd:
fd.write('keyFile={key_file}\n'
'logPath={log_path}\n'
'dbpath={db_path}'.format(**locals()))
for fd in (fd_cfg, fd_key, fd_log):
try:
os.close(fd)
except OSError:
# fd_cfg may be closed already if fdopen() didn't raise
pass
cfg = {'keyFile': key_file, 'logpath': log_path, 'dbpath': db_path}
process.cleanup_mprocess(config_path, cfg)
self.assertFalse(os.path.exists(config_path))
self.assertFalse(os.path.exists(key_file))
self.assertTrue(os.path.exists(log_path))
self.assertFalse(os.path.exists(db_path))
process.remove_path(log_path)
self.assertFalse(os.path.exists(log_path))
def test_remove_path(self):
fd, file_path = tempfile.mkstemp()
os.close(fd)
self.assertTrue(os.path.exists(file_path))
process.remove_path(file_path)
self.assertFalse(os.path.exists(file_path))
dir_path = tempfile.mkdtemp()
fd, file_path = tempfile.mkstemp(dir=dir_path)
os.close(fd)
process.remove_path(dir_path)
self.assertFalse(os.path.exists(file_path))
self.assertFalse(os.path.exists(dir_path))
def test_write_config(self):
cfg = {'port': 27017, 'objcheck': 'true'}
config_path = process.write_config(cfg)
self.assertTrue(os.path.exists(config_path))
with open(config_path, 'r') as fd:
config_data = fd.read()
self.assertTrue('port=27017' in config_data)
self.assertTrue('objcheck=true' in config_data)
process.cleanup_mprocess(config_path, cfg)
def test_write_config_with_specify_config_path(self):
cfg = {'port': 27017, 'objcheck': 'true'}
fd_key, file_path = tempfile.mkstemp()
os.close(fd_key)
config_path = process.write_config(cfg, file_path)
self.assertEqual(file_path, config_path)
process.cleanup_mprocess(config_path, cfg)
def test_proc_alive(self):
p = subprocess.Popen([self.executable])
self.assertTrue(process.proc_alive(p))
p.terminate()
p.wait()
self.assertFalse(process.proc_alive(p))
self.assertFalse(process.proc_alive(None))
def test_read_config(self):
cfg = {"oplogSize": 10, "other": "some string"}
config_path = process.write_config(cfg)
self.tmp_files.append(config_path)
self.assertEqual(process.read_config(config_path), cfg)
if __name__ == '__main__':
unittest.main()
|
|
"""Support for 1-Wire environment sensors."""
from __future__ import annotations
import asyncio
import copy
from dataclasses import dataclass
import logging
import os
from types import MappingProxyType
from typing import TYPE_CHECKING, Any
from pi1wire import InvalidCRCException, OneWireInterface, UnsupportResponseException
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_TYPE,
ELECTRIC_POTENTIAL_VOLT,
LIGHT_LUX,
PERCENTAGE,
PRESSURE_CBAR,
PRESSURE_MBAR,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from .const import (
CONF_TYPE_OWSERVER,
CONF_TYPE_SYSBUS,
DEVICE_KEYS_0_3,
DEVICE_KEYS_A_B,
DOMAIN,
READ_MODE_FLOAT,
READ_MODE_INT,
)
from .model import OWDirectDeviceDescription, OWServerDeviceDescription
from .onewire_entities import (
OneWireBaseEntity,
OneWireEntityDescription,
OneWireProxyEntity,
)
from .onewirehub import OneWireHub
@dataclass
class OneWireSensorEntityDescription(OneWireEntityDescription, SensorEntityDescription):
"""Class describing OneWire sensor entities."""
SIMPLE_TEMPERATURE_SENSOR_DESCRIPTION = OneWireSensorEntityDescription(
key="temperature",
device_class=SensorDeviceClass.TEMPERATURE,
name="Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
)
_LOGGER = logging.getLogger(__name__)
DEVICE_SENSORS: dict[str, tuple[OneWireSensorEntityDescription, ...]] = {
"10": (SIMPLE_TEMPERATURE_SENSOR_DESCRIPTION,),
"12": (
OneWireSensorEntityDescription(
key="TAI8570/temperature",
device_class=SensorDeviceClass.TEMPERATURE,
entity_registry_enabled_default=False,
name="Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
OneWireSensorEntityDescription(
key="TAI8570/pressure",
device_class=SensorDeviceClass.PRESSURE,
entity_registry_enabled_default=False,
name="Pressure",
native_unit_of_measurement=PRESSURE_MBAR,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
),
"22": (SIMPLE_TEMPERATURE_SENSOR_DESCRIPTION,),
"26": (
SIMPLE_TEMPERATURE_SENSOR_DESCRIPTION,
OneWireSensorEntityDescription(
key="humidity",
device_class=SensorDeviceClass.HUMIDITY,
entity_registry_enabled_default=False,
name="Humidity",
native_unit_of_measurement=PERCENTAGE,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
OneWireSensorEntityDescription(
key="HIH3600/humidity",
device_class=SensorDeviceClass.HUMIDITY,
entity_registry_enabled_default=False,
name="Humidity HIH3600",
native_unit_of_measurement=PERCENTAGE,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
OneWireSensorEntityDescription(
key="HIH4000/humidity",
device_class=SensorDeviceClass.HUMIDITY,
entity_registry_enabled_default=False,
name="Humidity HIH4000",
native_unit_of_measurement=PERCENTAGE,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
OneWireSensorEntityDescription(
key="HIH5030/humidity",
device_class=SensorDeviceClass.HUMIDITY,
entity_registry_enabled_default=False,
name="Humidity HIH5030",
native_unit_of_measurement=PERCENTAGE,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
OneWireSensorEntityDescription(
key="HTM1735/humidity",
device_class=SensorDeviceClass.HUMIDITY,
entity_registry_enabled_default=False,
name="Humidity HTM1735",
native_unit_of_measurement=PERCENTAGE,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
OneWireSensorEntityDescription(
key="B1-R1-A/pressure",
device_class=SensorDeviceClass.PRESSURE,
entity_registry_enabled_default=False,
name="Pressure",
native_unit_of_measurement=PRESSURE_MBAR,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
OneWireSensorEntityDescription(
key="S3-R1-A/illuminance",
device_class=SensorDeviceClass.ILLUMINANCE,
entity_registry_enabled_default=False,
name="Illuminance",
native_unit_of_measurement=LIGHT_LUX,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
OneWireSensorEntityDescription(
key="VAD",
device_class=SensorDeviceClass.VOLTAGE,
entity_registry_enabled_default=False,
name="Voltage VAD",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
OneWireSensorEntityDescription(
key="VDD",
device_class=SensorDeviceClass.VOLTAGE,
entity_registry_enabled_default=False,
name="Voltage VDD",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
OneWireSensorEntityDescription(
key="vis",
device_class=SensorDeviceClass.VOLTAGE,
entity_registry_enabled_default=False,
name="vis",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
),
"28": (SIMPLE_TEMPERATURE_SENSOR_DESCRIPTION,),
"3B": (SIMPLE_TEMPERATURE_SENSOR_DESCRIPTION,),
"42": (SIMPLE_TEMPERATURE_SENSOR_DESCRIPTION,),
"1D": tuple(
OneWireSensorEntityDescription(
key=f"counter.{id}",
name=f"Counter {id}",
native_unit_of_measurement="count",
read_mode=READ_MODE_INT,
state_class=SensorStateClass.TOTAL_INCREASING,
)
for id in DEVICE_KEYS_A_B
),
}
# EF sensors are usually hobbyboards specialized sensors.
# These can only be read by OWFS. Currently this driver only supports them
# via owserver (network protocol)
HOBBYBOARD_EF: dict[str, tuple[OneWireSensorEntityDescription, ...]] = {
"HobbyBoards_EF": (
OneWireSensorEntityDescription(
key="humidity/humidity_corrected",
device_class=SensorDeviceClass.HUMIDITY,
name="Humidity",
native_unit_of_measurement=PERCENTAGE,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
OneWireSensorEntityDescription(
key="humidity/humidity_raw",
device_class=SensorDeviceClass.HUMIDITY,
name="Humidity Raw",
native_unit_of_measurement=PERCENTAGE,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
OneWireSensorEntityDescription(
key="humidity/temperature",
device_class=SensorDeviceClass.TEMPERATURE,
name="Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
),
"HB_MOISTURE_METER": tuple(
OneWireSensorEntityDescription(
key=f"moisture/sensor.{id}",
device_class=SensorDeviceClass.PRESSURE,
name=f"Moisture {id}",
native_unit_of_measurement=PRESSURE_CBAR,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
)
for id in DEVICE_KEYS_0_3
),
}
# 7E sensors are special sensors by Embedded Data Systems
EDS_SENSORS: dict[str, tuple[OneWireSensorEntityDescription, ...]] = {
"EDS0066": (
OneWireSensorEntityDescription(
key="EDS0066/temperature",
device_class=SensorDeviceClass.TEMPERATURE,
name="Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
OneWireSensorEntityDescription(
key="EDS0066/pressure",
device_class=SensorDeviceClass.PRESSURE,
name="Pressure",
native_unit_of_measurement=PRESSURE_MBAR,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
),
"EDS0068": (
OneWireSensorEntityDescription(
key="EDS0068/temperature",
device_class=SensorDeviceClass.TEMPERATURE,
name="Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
OneWireSensorEntityDescription(
key="EDS0068/pressure",
device_class=SensorDeviceClass.PRESSURE,
name="Pressure",
native_unit_of_measurement=PRESSURE_MBAR,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
OneWireSensorEntityDescription(
key="EDS0068/light",
device_class=SensorDeviceClass.ILLUMINANCE,
name="Illuminance",
native_unit_of_measurement=LIGHT_LUX,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
OneWireSensorEntityDescription(
key="EDS0068/humidity",
device_class=SensorDeviceClass.HUMIDITY,
name="Humidity",
native_unit_of_measurement=PERCENTAGE,
read_mode=READ_MODE_FLOAT,
state_class=SensorStateClass.MEASUREMENT,
),
),
}
def get_sensor_types(
device_sub_type: str,
) -> dict[str, tuple[OneWireSensorEntityDescription, ...]]:
"""Return the proper info array for the device type."""
if "HobbyBoard" in device_sub_type:
return HOBBYBOARD_EF
if "EDS" in device_sub_type:
return EDS_SENSORS
return DEVICE_SENSORS
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up 1-Wire platform."""
onewirehub = hass.data[DOMAIN][config_entry.entry_id]
entities = await hass.async_add_executor_job(
get_entities, onewirehub, config_entry.data
)
async_add_entities(entities, True)
def get_entities(
onewirehub: OneWireHub, config: MappingProxyType[str, Any]
) -> list[SensorEntity]:
"""Get a list of entities."""
if not onewirehub.devices:
return []
entities: list[SensorEntity] = []
conf_type = config[CONF_TYPE]
# We have an owserver on a remote(or local) host/port
if conf_type == CONF_TYPE_OWSERVER:
assert onewirehub.owproxy
for device in onewirehub.devices:
if TYPE_CHECKING:
assert isinstance(device, OWServerDeviceDescription)
family = device.family
device_type = device.type
device_id = device.id
device_info = device.device_info
device_sub_type = "std"
device_path = device.path
if "EF" in family:
device_sub_type = "HobbyBoard"
family = device_type
elif "7E" in family:
device_sub_type = "EDS"
family = device_type
if family not in get_sensor_types(device_sub_type):
continue
for description in get_sensor_types(device_sub_type)[family]:
if description.key.startswith("moisture/"):
s_id = description.key.split(".")[1]
is_leaf = int(
onewirehub.owproxy.read(
f"{device_path}moisture/is_leaf.{s_id}"
).decode()
)
if is_leaf:
description = copy.deepcopy(description)
description.device_class = SensorDeviceClass.HUMIDITY
description.native_unit_of_measurement = PERCENTAGE
description.name = f"Wetness {s_id}"
device_file = os.path.join(
os.path.split(device.path)[0], description.key
)
name = f"{device_id} {description.name}"
entities.append(
OneWireProxySensor(
description=description,
device_id=device_id,
device_file=device_file,
device_info=device_info,
name=name,
owproxy=onewirehub.owproxy,
)
)
# We have a raw GPIO ow sensor on a Pi
elif conf_type == CONF_TYPE_SYSBUS:
for device in onewirehub.devices:
if TYPE_CHECKING:
assert isinstance(device, OWDirectDeviceDescription)
p1sensor: OneWireInterface = device.interface
family = p1sensor.mac_address[:2]
device_id = f"{family}-{p1sensor.mac_address[2:]}"
device_info = device.device_info
description = SIMPLE_TEMPERATURE_SENSOR_DESCRIPTION
device_file = f"/sys/bus/w1/devices/{device_id}/w1_slave"
name = f"{device_id} {description.name}"
entities.append(
OneWireDirectSensor(
description=description,
device_id=device_id,
device_file=device_file,
device_info=device_info,
name=name,
owsensor=p1sensor,
)
)
return entities
class OneWireSensor(OneWireBaseEntity, SensorEntity):
"""Mixin for sensor specific attributes."""
entity_description: OneWireSensorEntityDescription
class OneWireProxySensor(OneWireProxyEntity, OneWireSensor):
"""Implementation of a 1-Wire sensor connected through owserver."""
entity_description: OneWireSensorEntityDescription
@property
def native_value(self) -> StateType:
"""Return the state of the entity."""
return self._state
class OneWireDirectSensor(OneWireSensor):
"""Implementation of a 1-Wire sensor directly connected to RPI GPIO."""
def __init__(
self,
description: OneWireSensorEntityDescription,
device_id: str,
device_info: DeviceInfo,
device_file: str,
name: str,
owsensor: OneWireInterface,
) -> None:
"""Initialize the sensor."""
super().__init__(
description=description,
device_id=device_id,
device_info=device_info,
device_file=device_file,
name=name,
)
self._attr_unique_id = device_file
self._owsensor = owsensor
@property
def native_value(self) -> StateType:
"""Return the state of the entity."""
return self._state
async def get_temperature(self) -> float:
"""Get the latest data from the device."""
attempts = 1
while True:
try:
return await self.hass.async_add_executor_job(
self._owsensor.get_temperature
)
except UnsupportResponseException as ex:
_LOGGER.debug(
"Cannot read from sensor %s (retry attempt %s): %s",
self._device_file,
attempts,
ex,
)
await asyncio.sleep(0.2)
attempts += 1
if attempts > 10:
raise
async def async_update(self) -> None:
"""Get the latest data from the device."""
try:
self._value_raw = await self.get_temperature()
self._state = round(self._value_raw, 1)
except (
FileNotFoundError,
InvalidCRCException,
UnsupportResponseException,
) as ex:
_LOGGER.warning(
"Cannot read from sensor %s: %s",
self._device_file,
ex,
)
self._state = None
|
|
import boto3
import sure # noqa # pylint: disable=unused-import
import pytest
from botocore.exceptions import ClientError
from moto import mock_ec2
from tests import EXAMPLE_AMI_ID
from .test_instances import retrieve_all_instances
from uuid import uuid4
@mock_ec2
def test_instance_create_tags():
ec2 = boto3.resource("ec2", "us-west-1")
client = boto3.client("ec2", "us-west-1")
instance = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)[0]
with pytest.raises(ClientError) as ex:
instance.create_tags(
Tags=[{"Key": "a key", "Value": "some value"}], DryRun=True
)
ex.value.response["Error"]["Code"].should.equal("DryRunOperation")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(412)
ex.value.response["Error"]["Message"].should.equal(
"An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set"
)
instance.create_tags(Tags=[{"Key": "a key", "Value": "some value"}])
existing_instances = retrieve_all_instances(client)
ours = [i for i in existing_instances if i["InstanceId"] == instance.id][0]
ours["Tags"].should.equal([{"Key": "a key", "Value": "some value"}])
@mock_ec2
def test_instance_delete_tags():
ec2 = boto3.resource("ec2", region_name="us-east-1")
client = boto3.client("ec2", region_name="us-east-1")
instance = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)[0]
instance.create_tags(Tags=[{"Key": "a key", "Value": "some value"}])
tags = client.describe_tags(
Filters=[{"Name": "resource-id", "Values": [instance.id]}]
)["Tags"]
tag = tags[0]
tag.should.have.key("Key").equal("a key")
tag.should.have.key("Value").equal("some value")
with pytest.raises(ClientError) as ex:
instance.delete_tags(DryRun=True)
ex.value.response["Error"]["Code"].should.equal("DryRunOperation")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(412)
ex.value.response["Error"]["Message"].should.equal(
"An error occurred (DryRunOperation) when calling the DeleteTags operation: Request would have succeeded, but DryRun flag is set"
)
# Specifying key only
instance.delete_tags(Tags=[{"Key": "a key"}])
client.describe_tags(Filters=[{"Name": "resource-id", "Values": [instance.id]}])[
"Tags"
].should.have.length_of(0)
instance.create_tags(Tags=[{"Key": "a key", "Value": "some value"}])
client.describe_tags(Filters=[{"Name": "resource-id", "Values": [instance.id]}])[
"Tags"
].should.have.length_of(1)
# Specifying key and value
instance.delete_tags(Tags=[{"Key": "a key", "Value": "some value"}])
client.describe_tags(Filters=[{"Name": "resource-id", "Values": [instance.id]}])[
"Tags"
].should.have.length_of(0)
@mock_ec2
def test_get_all_tags_with_special_characters_boto3():
ec2 = boto3.resource("ec2", region_name="us-east-1")
client = boto3.client("ec2", region_name="us-east-1")
instance = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)[0]
tag_key = str(uuid4())
instance.create_tags(Tags=[{"Key": tag_key, "Value": "some<> value"}])
tag = client.describe_tags(Filters=[{"Name": "key", "Values": [tag_key]}])["Tags"][
0
]
tag.should.have.key("Key").equal(tag_key)
tag.should.have.key("Value").equal("some<> value")
@mock_ec2
def test_create_tags_boto3():
ec2 = boto3.resource("ec2", region_name="us-east-1")
client = boto3.client("ec2", region_name="us-east-1")
instance = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)[0]
tag_list = [
{"Key": "a key", "Value": "some value"},
{"Key": "another key", "Value": "some other value"},
{"Key": "blank key", "Value": ""},
]
with pytest.raises(ClientError) as ex:
client.create_tags(Resources=[instance.id], Tags=tag_list, DryRun=True)
ex.value.response["Error"]["Code"].should.equal("DryRunOperation")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(412)
ex.value.response["Error"]["Message"].should.equal(
"An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set"
)
client.create_tags(Resources=[instance.id], Tags=tag_list)
tags = client.describe_tags(
Filters=[{"Name": "resource-id", "Values": [instance.id]}]
)["Tags"]
tags.should.have.length_of(3)
for expected_tag in tag_list:
tags.should.contain(
{
"Key": expected_tag["Key"],
"ResourceId": instance.id,
"ResourceType": "instance",
"Value": expected_tag["Value"],
}
)
@mock_ec2
def test_tag_limit_exceeded_boto3():
ec2 = boto3.resource("ec2", region_name="us-east-1")
client = boto3.client("ec2", region_name="us-east-1")
instance = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)[0]
tag_list = []
for i in range(51):
tag_list.append({"Key": "{0:02d}".format(i + 1), "Value": ""})
with pytest.raises(ClientError) as ex:
client.create_tags(Resources=[instance.id], Tags=tag_list)
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.value.response["ResponseMetadata"].should.have.key("RequestId")
ex.value.response["Error"]["Code"].should.equal("TagLimitExceeded")
instance.create_tags(Tags=[{"Key": "a key", "Value": "a value"}])
with pytest.raises(ClientError) as ex:
client.create_tags(Resources=[instance.id], Tags=tag_list)
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.value.response["ResponseMetadata"].should.have.key("RequestId")
ex.value.response["Error"]["Code"].should.equal("TagLimitExceeded")
tags = client.describe_tags(
Filters=[{"Name": "resource-id", "Values": [instance.id]}]
)["Tags"]
tags.should.have.length_of(1)
tags[0].should.have.key("Key").equal("a key")
tags[0].should.have.key("Value").equal("a value")
@mock_ec2
def test_invalid_id_boto3():
client = boto3.client("ec2", region_name="us-east-1")
with pytest.raises(ClientError) as ex:
client.create_tags(
Resources=["ami-blah"], Tags=[{"Key": "key", "Value": "tag"}]
)
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.value.response["ResponseMetadata"].should.have.key("RequestId")
ex.value.response["Error"]["Code"].should.equal("InvalidID")
with pytest.raises(ClientError) as ex:
client.create_tags(
Resources=["blah-blah"], Tags=[{"Key": "key", "Value": "tag"}]
)
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.value.response["ResponseMetadata"].should.have.key("RequestId")
ex.value.response["Error"]["Code"].should.equal("InvalidID")
@mock_ec2
def test_get_all_tags_resource_filter_boto3():
ec2 = boto3.resource("ec2", region_name="us-east-1")
client = boto3.client("ec2", region_name="us-east-1")
instance = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)[0]
inst_tag_key = str(uuid4())[0:6]
client.create_tags(
Resources=[instance.id], Tags=[{"Key": inst_tag_key, "Value": "some value"}],
)
image = instance.create_image(Name="test-ami", Description="this is a test ami")
image.create_tags(Tags=[{"Key": "an image key", "Value": "some value"}])
expected = {
"Key": inst_tag_key,
"ResourceId": instance.id,
"ResourceType": "instance",
"Value": "some value",
}
our_tags = client.describe_tags(
Filters=[{"Name": "resource-id", "Values": [instance.id]}]
)["Tags"]
our_tags.should.equal([expected])
instances = client.describe_tags(
Filters=[{"Name": "resource-type", "Values": ["instance"]}]
)["Tags"]
instances.should.contain(expected)
tags = client.describe_tags(Filters=[{"Name": "key", "Values": [inst_tag_key]}])[
"Tags"
]
tags.should.equal([expected])
expected = {
"Key": "an image key",
"ResourceId": image.id,
"ResourceType": "image",
"Value": "some value",
}
my_image = client.describe_tags(
Filters=[{"Name": "resource-id", "Values": [image.id]}]
)["Tags"]
my_image.should.equal([expected])
all_images = client.describe_tags(
Filters=[{"Name": "resource-type", "Values": ["image"]}]
)["Tags"]
all_images.should.contain(expected)
tags = client.describe_tags(
Filters=[{"Name": "resource-type", "Values": ["unknown"]}]
)["Tags"]
tags.should.equal([])
@mock_ec2
def test_get_all_tags_value_filter_boto3():
ec2 = boto3.resource("ec2", region_name="us-east-1")
client = boto3.client("ec2", region_name="us-east-1")
def create_instance_with_tag(value):
instance = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)[
0
]
tag = {"Key": "an instance key", "Value": value}
client.create_tags(Resources=[instance.id], Tags=[tag])
return instance
instance_a = create_instance_with_tag("some value")
instance_b = create_instance_with_tag("some other value")
instance_c = create_instance_with_tag("other value*")
instance_d = create_instance_with_tag("other value**")
instance_e = create_instance_with_tag("other value*?")
image = instance_a.create_image(Name="test-ami", Description="this is a test ami")
image.create_tags(Tags=[{"Key": "an image key", "Value": "some value"}])
def filter_by_value(query, expected):
filters = [{"Name": "value", "Values": [query]}]
tags = retrieve_all_tagged(client, filters)
actual = set([t["ResourceId"] for t in tags])
for e in expected:
actual.should.contain(e)
filter_by_value("some value", [instance_a.id, image.id])
filter_by_value("some*value", [instance_a.id, instance_b.id, image.id])
filter_by_value("*some*value", [instance_a.id, instance_b.id, image.id])
filter_by_value("*some*value*", [instance_a.id, instance_b.id, image.id])
filter_by_value(r"*value\*", [instance_c.id])
filter_by_value(r"*value\*\*", [instance_d.id])
filter_by_value(r"*value\*\?", [instance_e.id])
@mock_ec2
def test_retrieved_instances_must_contain_their_tags_boto3():
tag_key = "Tag name"
tag_value = "Tag value"
tags_to_be_set = {"Key": tag_key, "Value": tag_value}
ec2 = boto3.resource("ec2", region_name="us-east-1")
client = boto3.client("ec2", region_name="us-east-1")
instance = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)[0]
all_instances = retrieve_all_instances(client)
ours = [i for i in all_instances if i["InstanceId"] == instance.id]
ours.should.have.length_of(1)
ours[0]["InstanceId"].should.equal(instance.id)
ours[0].shouldnt.have.key("Tags")
client.create_tags(Resources=[instance.id], Tags=[tags_to_be_set])
all_instances = retrieve_all_instances(client)
ours = [i for i in all_instances if i["InstanceId"] == instance.id]
retrieved_tags = ours[0]["Tags"]
# Check whether tag is present with correct value
retrieved_tags.should.equal([{"Key": tag_key, "Value": tag_value}])
@mock_ec2
def test_retrieved_volumes_must_contain_their_tags_boto3():
tag_key = "Tag name"
tag_value = "Tag value"
tags_to_be_set = {"Key": tag_key, "Value": tag_value}
ec2 = boto3.resource("ec2", region_name="eu-west-1")
client = boto3.client("ec2", region_name="eu-west-1")
volume = ec2.create_volume(Size=80, AvailabilityZone="us-east-1a")
volume.tags.should.be.none
client.create_tags(Resources=[volume.id], Tags=[tags_to_be_set])
volume.reload()
volume.tags.should.equal([{"Key": tag_key, "Value": tag_value}])
@mock_ec2
def test_retrieved_snapshots_must_contain_their_tags_boto3():
tag_key = "Tag name"
tag_value = "Tag value"
tags_to_be_set = {"Key": tag_key, "Value": tag_value}
ec2 = boto3.resource("ec2", region_name="eu-west-1")
client = boto3.client("ec2", region_name="eu-west-1")
volume = ec2.create_volume(Size=80, AvailabilityZone="eu-west-1a")
snapshot = ec2.create_snapshot(VolumeId=volume.id)
client.create_tags(Resources=[snapshot.id], Tags=[tags_to_be_set])
snapshot = client.describe_snapshots(SnapshotIds=[snapshot.id])["Snapshots"][0]
snapshot["Tags"].should.equal([{"Key": tag_key, "Value": tag_value}])
@mock_ec2
def test_filter_instances_by_wildcard_tags_boto3():
ec2 = boto3.resource("ec2", region_name="eu-west-1")
client = boto3.client("ec2", region_name="eu-west-1")
reservations = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=2, MaxCount=2)
instance_a, instance_b = reservations
instance_a.create_tags(Tags=[{"Key": "Key1", "Value": "Value1"}])
instance_b.create_tags(Tags=[{"Key": "Key1", "Value": "Value2"}])
res = client.describe_instances(
Filters=[{"Name": "tag:Key1", "Values": ["Value*"]}]
)
res["Reservations"][0]["Instances"].should.have.length_of(2)
res = client.describe_instances(Filters=[{"Name": "tag-key", "Values": ["Key*"]}])
res["Reservations"][0]["Instances"].should.have.length_of(2)
res = client.describe_instances(
Filters=[{"Name": "tag-value", "Values": ["Value*"]}]
)
res["Reservations"][0]["Instances"].should.have.length_of(2)
res = client.describe_instances(
Filters=[{"Name": "tag-value", "Values": ["Value2*"]}]
)
res["Reservations"][0]["Instances"].should.have.length_of(1)
@mock_ec2
def test_create_volume_with_tags():
client = boto3.client("ec2", "us-west-2")
response = client.create_volume(
AvailabilityZone="us-west-2",
Encrypted=False,
Size=40,
TagSpecifications=[
{
"ResourceType": "volume",
"Tags": [{"Key": "TEST_TAG", "Value": "TEST_VALUE"}],
}
],
)
assert response["Tags"][0]["Key"] == "TEST_TAG"
@mock_ec2
def test_create_snapshot_with_tags():
client = boto3.client("ec2", "us-west-2")
volume_id = client.create_volume(
AvailabilityZone="us-west-2",
Encrypted=False,
Size=40,
TagSpecifications=[
{
"ResourceType": "volume",
"Tags": [{"Key": "TEST_TAG", "Value": "TEST_VALUE"}],
}
],
)["VolumeId"]
snapshot = client.create_snapshot(
VolumeId=volume_id,
TagSpecifications=[
{
"ResourceType": "snapshot",
"Tags": [{"Key": "TEST_SNAPSHOT_TAG", "Value": "TEST_SNAPSHOT_VALUE"}],
}
],
)
expected_tags = [{"Key": "TEST_SNAPSHOT_TAG", "Value": "TEST_SNAPSHOT_VALUE"}]
assert snapshot["Tags"] == expected_tags
@mock_ec2
def test_create_tag_empty_resource():
# create ec2 client in us-west-1
client = boto3.client("ec2", region_name="us-west-1")
# create tag with empty resource
with pytest.raises(ClientError) as ex:
client.create_tags(Resources=[], Tags=[{"Key": "Value"}])
ex.value.response["Error"]["Code"].should.equal("MissingParameter")
ex.value.response["Error"]["Message"].should.equal(
"The request must contain the parameter resourceIdSet"
)
@mock_ec2
def test_delete_tag_empty_resource():
# create ec2 client in us-west-1
client = boto3.client("ec2", region_name="us-west-1")
# delete tag with empty resource
with pytest.raises(ClientError) as ex:
client.delete_tags(Resources=[], Tags=[{"Key": "Value"}])
ex.value.response["Error"]["Code"].should.equal("MissingParameter")
ex.value.response["Error"]["Message"].should.equal(
"The request must contain the parameter resourceIdSet"
)
@mock_ec2
def test_retrieve_resource_with_multiple_tags():
ec2 = boto3.resource("ec2", region_name="us-west-1")
blue, green = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=2, MaxCount=2)
tag_val1 = str(uuid4())
ec2.create_tags(
Resources=[blue.instance_id],
Tags=[
{"Key": "environment", "Value": tag_val1},
{"Key": "application", "Value": "api"},
],
)
tag_val2 = str(uuid4())
ec2.create_tags(
Resources=[green.instance_id],
Tags=[
{"Key": "environment", "Value": tag_val2},
{"Key": "application", "Value": "api"},
],
)
green_instances = list(ec2.instances.filter(Filters=(get_filter(tag_val2))))
green_instances.should.equal([green])
blue_instances = list(ec2.instances.filter(Filters=(get_filter(tag_val1))))
blue_instances.should.equal([blue])
def get_filter(tag_val):
return [
{"Name": "tag-key", "Values": ["application"]},
{"Name": "tag-value", "Values": ["api"]},
{"Name": "tag-key", "Values": ["environment"]},
{"Name": "tag-value", "Values": [tag_val]},
]
def retrieve_all_tagged(client, filters=[]): # pylint: disable=W0102
resp = client.describe_tags(Filters=filters)
tags = resp["Tags"]
token = resp.get("NextToken")
while token:
resp = client.describe_tags(Filters=filters, NextToken=token)
tags.extend(resp["Tags"])
token = resp.get("Token")
return tags
|
|
from bluetooth import *
from serial_stub import *
from serial_comminication import *
import json
def goodbye(client_sock, server_sock):
if client_sock:
print_msg("goodbye", "Closing bluetooth client")
client_sock.close()
if server_sock:
print_msg("goodbye", "Closing bluetooth server")
server_sock.close()
class AndroidAPI(object):
def __init__(self, serial_api):
self.client_sock = None
self.server_sock = None
self.is_connected = False
#self.serial_api = SerialAPIStub()
self.serial_api = serial_api
self.map_outgoing = Queue()
self.name = "Android Commander"
# explore and run signal
self.explore_start = False
self.run_start = False
def is_connect(self):
return self.is_connected
def init_bluetooth(self, btport = 4):
"""
manual connect to nexus
channel port: 4
nexus address "08:60:6E:A5:90:50" #mac number
"""
self.server_sock = BluetoothSocket( RFCOMM )
self.server_sock.bind(("", btport))
self.server_sock.listen(1)
port = self.server_sock.getsockname()[1]
uuid = "94f39d29-7d6d-437d-973b-fba39e49d4ee"
advertise_service(self.server_sock, "SampleServer",
service_id=uuid,
service_classes=[uuid, SERIAL_PORT_CLASS],
profiles=[SERIAL_PORT_PROFILE],
)
import atexit
atexit.register(goodbye, None, self.server_sock)
#print "atexit registered 1"
print_msg(self.name, "waiting for connection on RFCOMM channel %d" % port)
self.client_sock, client_info = self.server_sock.accept() # blocking
atexit.register(goodbye, self.client_sock, self.server_sock)
#print "atexit registered 2"
print_msg(self.name, "Accepted connection from "+str(client_info))
self.is_connected = True
def disconnect(self):
print "disconnect"
self.server_sock.close()
def connect(self):
"""
auto connect to nexus
"""
print_msg(self.name, "Connecting...")
self.client_sock = BluetoothSocket( RFCOMM )
uuid = "00001101-0000-1000-8000-00805F9B34FB"
btaddr = "08:60:6E:A5:90:50"
service_match = find_service(uuid = uuid, address = btaddr)
while len(service_match) == 0:
service_match = find_service(uuid = uuid, address = btaddr)
first_match = service_match[0]
port = first_match["port"]
host = first_match["host"]
self.client_sock.connect((host,port))
print_msg(self.name, "Connected to "+str(host))
self.is_connected = True
def write(self,msg):
"""
write to nexus
:status + msg
"""
try:
while self.is_connect():
#while self.is_connect():
self.client_sock.send("status "+str(msg))
print_msg(self.name, "Write to Android: %s" % msg)
return True
except IOError:
print_msg(self.name, "disconnected")
self.is_connected = False
self.client_sock.close()
self.disconnect()
sys.exit(-1)
def write_map(self,msg):
"""
write to nexus
:status + msg
"""
try:
while self.is_connect():
#while self.is_connect():
#self.client_sock.send("GRID 5 5 2 2 3 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0")#str(msg))
self.client_sock.send("GRID 15 20 "+str(msg))
print_msg(self.name, "Write to Android: %s" % msg[:15])
return True
except IOError:
print_msg(self.name, "disconnected")
self.is_connected = False
self.client_sock.close()
self.disconnect()
sys.exit(-1)
def __translate_robot_location(self, x, y):
"""
:param x: int
:param y: int
:return: String
"""
# android starts from 1 1
x+=1
y+=1
coordinates = "%d %d %d %d"%(x, y, x+1, y)
return coordinates
def update_android(self, map_coordinate, location_coordinate):
"""
Main Flow of Auto
Send: F1, L90, R90
"""
robot_x, robot_y = location_coordinate.split(",")
#map_x, map_y = map_coordinate.split(",")
robot_coordinate = self.__translate_robot_location(int(robot_x), int(robot_y))
#robot_coord = str(robot_coordinate)+str(map_coordinate)
final_coordinate = robot_coordinate+" "+" ".join(str(map_coordinate))
self.write_map(final_coordinate)
#return final_coordinate
def read_for_remote_control(self):
"""
listen from nexus
Main Flow of Remote Control
"""
try:
if self.is_connect():
print_msg(self.name, "Receiving socket package")
b_data = self.client_sock.recv(1024)
print "Received from Android: %s" % b_data
if len(b_data) != 0:
print_msg(self.name, "decoding")
message = self.__decode_n_execute(b_data)
self.write(message)
except IOError:
print_msg(self.name, "disconnected")
self.is_connected = False
self.client_sock.close()
self.disconnect()
sys.exit(-1)
def read_for_explore_run(self):
"""
listen from nexus
Main Flow of Auto mode
"""
b_data = self.client_sock.recv(1024)
if b_data!=None and len(b_data)!=0:
if b_data!="GRID": # AUTO mode in android, to avoid flush cmd
print "Received from Android: %s" % b_data
if b_data=="explore":
print_msg(self.name, "Setting \"explore\" flag")
self.explore_start = True
elif b_data=="run":
print_msg(self.name, "Setting \"run\" flag")
self.run_start = True
else:
pass
def __execute_msg(self, function_code, parameter):
#self.write("Forward")
self.serial_api.command_put(function_code, parameter)
while True:
lst = self.serial_api.response_pop()
print_msg(self.name, "Waiting for response")
print lst
if lst==None:
time.sleep(0.05)
continue
else:
ack, type_data, data = lst[0], lst[1], lst[2]
print ack, type_data, data
if ack==True:
break
else:
continue
def __decode_n_execute(self, msg):
if msg == "w":
self.__execute_msg(0, 10)
return "Robot moved Forward"
elif msg == "a":
self.__execute_msg(1, 90)
return "Robot Turned Left"
elif msg == "d":
self.__execute_msg(2, 90)
return "Robot Turned Right"
elif msg == "run":#shortest path
#self.write("run")
return ""
elif msg == "explore":#explore maze
#self.write("explore")
return ""
########################################################################################################################
def map_pop_n_exe(self):
if not self.map_outgoing.empty():
#self.ack = False
command_pair = self.map_outgoing.get()
self.update_android(str(command_pair[0]), str(command_pair[1]))
#self.write_map(str(command_pair[0])+str(command_pair[1]))
def map_put(self, map_grid, location):
self.map_outgoing.put([map_grid, location])
def is_map_empty(self):
return self.map_outgoing.empty()
class AndroidThread(AbstractThread):
@Override(AbstractThread)
def __init__(self, name, android_api, mode, production):
"""
:param name: name for the thread
:param serial_api: Shared resoureces
:param mode: either "auto" or "control"
:param production: Boolean, if false, use __test_run_pipeline_style rather than waiting for PC
"""
super(AndroidThread, self).__init__(name)
self.android_api = android_api
self.mode = mode
self.production = production
@Override(AbstractThread)
def run(self):
self.print_msg("Starting")
if self.mode=="control":
self.print_msg("In remote control mode")
self.__remote_control_mode()
else:
#self.production("In auto mode")
self.print_msg("In auto mode")
self.__auto_mode()
self.print_msg("Ending")
def __auto_mode(self):
"""
Auto run mode. Android update the map
"""
while True:
# establish connection
while True:
if self.android_api.is_connect():
break
self.android_api.init_bluetooth()
time.sleep(0.05)
if self.android_api.is_map_empty():
if self.production:
# self.print_msg("Waiting for map update")
time.sleep(0.05)
continue
else:
self.__test_run_pipeline_style()
else:
self.print_msg("Updating map")
self.android_api.map_pop_n_exe()
time.sleep(0.05)
def __remote_control_mode(self):
"""
Android remotely control the robot
"""
while True:
while True:
if self.android_api.is_connect():
break
self.android_api.init_bluetooth()
time.sleep(0.05)
self.android_api.read_for_remote_control()
time.sleep(0.05)
def __test_run_pipeline_style(self):
loc = ""
for i in range(300): #300
loc = loc+str(i%1)
msg_received = "{\"map\":\"%s\", \"location\":\"1,1\"}"%loc
msg_json = json.loads(msg_received)
location = msg_json["location"]
map_grid = msg_json["map"]
self.android_api.map_put(map_grid, location)
msg_received = "{\"map\":\"%s\", \"location\":\"2,1\"}"%loc
msg_json = json.loads(msg_received)
location = msg_json["location"]
map_grid = msg_json["map"]
self.android_api.map_put(map_grid, location)
msg_received = "{\"map\":\"%s\", \"location\":\"3,1\"}"%loc
msg_json = json.loads(msg_received)
location = msg_json["location"]
map_grid = msg_json["map"]
self.android_api.map_put(map_grid, location)
msg_received = "{\"map\":\"%s\", \"location\":\"4,1\"}"%loc
msg_json = json.loads(msg_received)
location = msg_json["location"]
map_grid = msg_json["map"]
self.android_api.map_put(map_grid, location)
msg_received = "{\"map\":\"%s\", \"location\":\"5,1\"}"%loc
msg_json = json.loads(msg_received)
location = msg_json["location"]
map_grid = msg_json["map"]
self.android_api.map_put(map_grid, location)
msg_received = "{\"map\":\"%s\", \"location\":\"6,1\"}"%loc
msg_json = json.loads(msg_received)
location = msg_json["location"]
map_grid = msg_json["map"]
self.android_api.map_put(map_grid, location)
msg_received = "{\"map\":\"%s\", \"location\":\"6,2\"}"%loc
msg_json = json.loads(msg_received)
location = msg_json["location"]
map_grid = msg_json["map"]
self.android_api.map_put(map_grid, location)
msg_received = "{\"map\":\"%s\", \"location\":\"6,3\"}"%loc
msg_json = json.loads(msg_received)
location = msg_json["location"]
map_grid = msg_json["map"]
self.android_api.map_put(map_grid, location)
class AndroidExploreRunThread(AbstractThread):
@Override(AbstractThread)
def __init__(self, name, android_api):
super(AndroidExploreRunThread, self).__init__(name, production=True)
self.android_api = android_api
self.setDaemon(True)
@Override(AbstractThread)
def run(self):
while True:
if self.android_api.is_connect():
break
time.sleep(0.05)
while not self.android_api.explore_start or not self.android_api.run_start:
self.android_api.read_for_explore_run()
# testing only bluetoot
if __name__=="__main__":
print "Executing main flow"
serial_api = SerialAPIStub()
android_api = AndroidAPI(serial_api)
android_thread = AndroidThread("android", android_api, mode="auto", production=False)
explore_run_thread = AndroidExploreRunThread("explore_run", android_api)
android_thread.start()
explore_run_thread.start()
|
|
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack Foundation
# Copyright 2011 University of Southern California
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import webob
from cinder.api.contrib import types_extra_specs
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
import cinder.wsgi
def return_create_volume_type_extra_specs(context, volume_type_id,
extra_specs):
return stub_volume_type_extra_specs()
def return_volume_type_extra_specs(context, volume_type_id):
return stub_volume_type_extra_specs()
def return_empty_volume_type_extra_specs(context, volume_type_id):
return {}
def delete_volume_type_extra_specs(context, volume_type_id, key):
pass
def delete_volume_type_extra_specs_not_found(context, volume_type_id, key):
raise exception.VolumeTypeExtraSpecsNotFound("Not Found")
def stub_volume_type_extra_specs():
specs = {"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
return specs
def volume_type_get(context, id, inactive=False, expected_fields=None):
pass
class VolumeTypesExtraSpecsTest(test.TestCase):
def setUp(self):
super(VolumeTypesExtraSpecsTest, self).setUp()
self.flags(host='fake')
self.stubs.Set(cinder.db, 'volume_type_get', volume_type_get)
self.api_path = '/v2/%s/os-volume-types/%s/extra_specs' % (
fake.PROJECT_ID, fake.VOLUME_TYPE_ID)
self.controller = types_extra_specs.VolumeTypeExtraSpecsController()
"""to reset notifier drivers left over from other api/contrib tests"""
def test_index(self):
self.stubs.Set(cinder.db, 'volume_type_extra_specs_get',
return_volume_type_extra_specs)
req = fakes.HTTPRequest.blank(self.api_path)
res_dict = self.controller.index(req, fake.VOLUME_TYPE_ID)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
def test_index_no_data(self):
self.stubs.Set(cinder.db, 'volume_type_extra_specs_get',
return_empty_volume_type_extra_specs)
req = fakes.HTTPRequest.blank(self.api_path)
res_dict = self.controller.index(req, fake.VOLUME_TYPE_ID)
self.assertEqual(0, len(res_dict['extra_specs']))
def test_show(self):
self.stubs.Set(cinder.db, 'volume_type_extra_specs_get',
return_volume_type_extra_specs)
req = fakes.HTTPRequest.blank(self.api_path + '/key5')
res_dict = self.controller.show(req, fake.VOLUME_TYPE_ID, 'key5')
self.assertEqual('value5', res_dict['key5'])
def test_show_spec_not_found(self):
self.stubs.Set(cinder.db, 'volume_type_extra_specs_get',
return_empty_volume_type_extra_specs)
req = fakes.HTTPRequest.blank(self.api_path + '/key6')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, fake.VOLUME_ID, 'key6')
def test_delete(self):
self.stubs.Set(cinder.db, 'volume_type_extra_specs_delete',
delete_volume_type_extra_specs)
self.assertEqual(0, len(self.notifier.notifications))
req = fakes.HTTPRequest.blank(self.api_path + '/key5')
self.controller.delete(req, fake.VOLUME_ID, 'key5')
self.assertEqual(1, len(self.notifier.notifications))
def test_delete_not_found(self):
self.stubs.Set(cinder.db, 'volume_type_extra_specs_delete',
delete_volume_type_extra_specs_not_found)
req = fakes.HTTPRequest.blank(self.api_path + '/key6')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, fake.VOLUME_ID, 'key6')
@mock.patch('cinder.utils.check_string_length')
def test_create(self, mock_check):
self.stubs.Set(cinder.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
body = {"extra_specs": {"key1": "value1"}}
self.assertEqual(0, len(self.notifier.notifications))
req = fakes.HTTPRequest.blank(self.api_path)
res_dict = self.controller.create(req, fake.VOLUME_ID, body)
self.assertEqual(1, len(self.notifier.notifications))
self.assertTrue(mock_check.called)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
@mock.patch.object(cinder.db, 'volume_type_extra_specs_update_or_create')
@mock.patch('cinder.utils.check_string_length')
def test_create_key_allowed_chars(
self, mock_check, volume_type_extra_specs_update_or_create):
mock_return_value = {"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
volume_type_extra_specs_update_or_create.\
return_value = mock_return_value
body = {"extra_specs": {"other_alphanum.-_:": "value1"}}
self.assertEqual(0, len(self.notifier.notifications))
req = fakes.HTTPRequest.blank(self.api_path)
res_dict = self.controller.create(req, fake.VOLUME_ID, body)
self.assertEqual(1, len(self.notifier.notifications))
self.assertTrue(mock_check.called)
self.assertEqual('value1',
res_dict['extra_specs']['other_alphanum.-_:'])
@mock.patch.object(cinder.db, 'volume_type_extra_specs_update_or_create')
@mock.patch('cinder.utils.check_string_length')
def test_create_too_many_keys_allowed_chars(
self, mock_check, volume_type_extra_specs_update_or_create):
mock_return_value = {"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
volume_type_extra_specs_update_or_create.\
return_value = mock_return_value
body = {"extra_specs": {"other_alphanum.-_:": "value1",
"other2_alphanum.-_:": "value2",
"other3_alphanum.-_:": "value3"}}
self.assertEqual(0, len(self.notifier.notifications))
req = fakes.HTTPRequest.blank(self.api_path)
res_dict = self.controller.create(req, fake.VOLUME_ID, body)
self.assertEqual(1, len(self.notifier.notifications))
self.assertTrue(mock_check.called)
self.assertEqual('value1',
res_dict['extra_specs']['other_alphanum.-_:'])
self.assertEqual('value2',
res_dict['extra_specs']['other2_alphanum.-_:'])
self.assertEqual('value3',
res_dict['extra_specs']['other3_alphanum.-_:'])
@mock.patch('cinder.utils.check_string_length')
def test_update_item(self, mock_check):
self.stubs.Set(cinder.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
body = {"key1": "value1"}
self.assertEqual(0, len(self.notifier.notifications))
req = fakes.HTTPRequest.blank(self.api_path + '/key1')
res_dict = self.controller.update(req, fake.VOLUME_ID, 'key1', body)
self.assertEqual(1, len(self.notifier.notifications))
self.assertTrue(mock_check.called)
self.assertEqual('value1', res_dict['key1'])
def test_update_item_too_many_keys(self):
self.stubs.Set(cinder.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
body = {"key1": "value1", "key2": "value2"}
req = fakes.HTTPRequest.blank(self.api_path + '/key1')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, fake.VOLUME_ID, 'key1', body)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(cinder.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
body = {"key1": "value1"}
req = fakes.HTTPRequest.blank(self.api_path + '/bad')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, fake.VOLUME_ID, 'bad', body)
def _extra_specs_empty_update(self, body):
req = fakes.HTTPRequest.blank('/v2/%s/types/%s/extra_specs' % (
fake.PROJECT_ID, fake.VOLUME_TYPE_ID))
req.method = 'POST'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, fake.VOLUME_ID, body)
def test_update_no_body(self):
self._extra_specs_empty_update(body=None)
def test_update_empty_body(self):
self._extra_specs_empty_update(body={})
def _extra_specs_create_bad_body(self, body):
req = fakes.HTTPRequest.blank('/v2/%s/types/%s/extra_specs' % (
fake.PROJECT_ID, fake.VOLUME_TYPE_ID))
req.method = 'POST'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, fake.VOLUME_ID, body)
def test_create_no_body(self):
self._extra_specs_create_bad_body(body=None)
def test_create_missing_volume(self):
body = {'foo': {'a': 'b'}}
self._extra_specs_create_bad_body(body=body)
def test_create_malformed_entity(self):
body = {'extra_specs': 'string'}
self._extra_specs_create_bad_body(body=body)
def test_create_invalid_key(self):
body = {"extra_specs": {"ke/y1": "value1"}}
self._extra_specs_create_bad_body(body=body)
def test_create_invalid_too_many_key(self):
body = {"key1": "value1", "ke/y2": "value2", "key3": "value3"}
self._extra_specs_create_bad_body(body=body)
|
|
#!python
# Collect for each region the list of players by league
# Strategy: we go through the list of all the known players and check their games
# As a starting list, we can take master/challenger players
import os
import multiprocessing
import time
import pickle
import sys
from pickle import PickleError
from InterfaceAPI import InterfaceAPI, ApiError403, ApiError
import Modes
MAX_DAYS = 1 # up to how many days we look up
# Note it's not important that we get every single player, since we only need one participant for each game
MAX_DEPTH = 1000 * (time.time() - 86400 * MAX_DAYS)
ATTEMPTS = 6
ATTEMPTS_WAIT = 300
SAVE_INTERVAL = 60 # save every minute
DATABASE_WAIT = 60 # if the database cannot be reached, wait
class PlayerListing:
def __init__(self, database, leagues, region, fast=False):
self.api = InterfaceAPI()
self.database = database
self.leagues = leagues
self.region = region
self.nextSave = time.time() + SAVE_INTERVAL
from_scratch = True
if not os.path.isdir(self.database):
raise FileNotFoundError(self.database)
if not os.path.isdir(os.path.join(self.database, 'player_listing', self.region)):
os.makedirs(os.path.join(self.database, 'player_listing', self.region))
if os.path.exists(os.path.join(database, 'player_listing', self.region, 'players')):
self.players = pickle.load(open(os.path.join(database, 'player_listing', self.region, 'players'), 'rb'))
for league in leagues:
if self.players[league]:
from_scratch = False
break
else:
self.players = {}
for league in leagues:
self.players[league] = []
# to make sure we don't explore several time the same player/ games
if os.path.exists(os.path.join(database, 'player_listing', self.region, 'exploredPlayers')):
self.exploredPlayers = pickle.load(open(os.path.join(database, 'player_listing', self.region, 'exploredPlayers'), 'rb'))
else:
self.exploredPlayers = []
if os.path.exists(os.path.join(database, 'player_listing', self.region, 'exploredGames')):
self.exploredGames = pickle.load(open(os.path.join(database, 'player_listing', self.region, 'exploredGames'), 'rb'))
else:
self.exploredGames = []
if os.path.exists(os.path.join(database, 'player_listing', self.region, 'to_explore')):
self.to_explore = pickle.load(open(os.path.join(database, 'player_listing', self.region, 'to_explore'), 'rb'))
else:
self.to_explore = []
if os.path.exists(os.path.join(database, 'player_listing', self.region, 'exploredLeagues')):
self.exploredLeagues = pickle.load(open(os.path.join(database, 'player_listing', self.region, 'exploredLeagues'), 'rb'))
else:
self.exploredLeagues = []
if from_scratch:
print(region, 'first time exploration, checking challenger and master leagues', file=sys.stderr)
# only the first time
if fast: # only the challenger and master league, no need to explore anything
if 'challenger' in self.players:
challLeague = self.api.getData('https://%s.api.riotgames.com/lol/league/v4/challengerleagues/by-queue/RANKED_SOLO_5x5' % self.region)
for e in challLeague['entries']:
self.players['challenger'].append(e['summonerId'])
if 'grandmaster' in self.players:
grandmasterLeague = self.api.getData('https://%s.api.riotgames.com/lol/league/v4/grandmasterleagues/by-queue/RANKED_SOLO_5x5' % self.region)
for e in grandmasterLeague['entries']:
self.players['grandmaster'].append(e['summonerId'])
if 'master' in self.players:
masterLeague = self.api.getData('https://%s.api.riotgames.com/lol/league/v4/masterleagues/by-queue/RANKED_SOLO_5x5' % self.region)
for e in masterLeague['entries']:
self.players['master'].append(e['summonerId'])
else:
challLeague = self.api.getData('https://%s.api.riotgames.com/lol/league/v4/challengerleagues/by-queue/RANKED_SOLO_5x5' % self.region)
for e in challLeague['entries']:
self.to_explore.append(e['summonerId'])
grandmasterLeague = self.api.getData('https://%s.api.riotgames.com/lol/league/v4/grandmasterleagues/by-queue/RANKED_SOLO_5x5' % self.region)
for e in grandmasterLeague['entries']:
self.to_explore.append(e['summonerId'])
masterLeague = self.api.getData('https://%s.api.riotgames.com/lol/league/v4/masterleagues/by-queue/RANKED_SOLO_5x5' % self.region)
for e in masterLeague['entries']:
self.to_explore.append(e['summonerId'])
self.exploredPlayers = list(self.to_explore)
def explore(self):
print(self.region, len(self.to_explore), 'players left to explore', file=sys.stderr)
while self.to_explore:
if time.time() > self.nextSave:
print(self.region, len(self.to_explore), 'players left to explore', file=sys.stderr)
print(self.region, 'saving...', file=sys.stderr)
self.save()
self.nextSave = time.time() + SAVE_INTERVAL
sumID = self.to_explore.pop(0) # strongest players first
try:
accountID = self.api.getData('https://%s.api.riotgames.com/lol/summoner/v4/summoners/%s' % (self.region, sumID))['accountId']
games = \
self.api.getData('https://%s.api.riotgames.com/lol/match/v4/matchlists/by-account/%s' % (self.region, accountID), {'queue': 420})[
'matches']
playerLeagueList = self.api.getData('https://%s.api.riotgames.com/lol/league/v4/positions/by-summoner/%s' % (self.region, sumID))
except ApiError403 as e:
print(e, file=sys.stderr)
return e
except ApiError as e:
print(e, file=sys.stderr)
continue
# we check that the summoner is in one of the leagues we want
playerSoloQLeague = None
for league in playerLeagueList:
if league['queueType'] == 'RANKED_SOLO_5x5':
playerSoloQLeague = league
break
if not playerSoloQLeague:
print('no soloQ rank: ', self.region, sumID)
continue
playerLeagueTier = playerSoloQLeague['tier'].lower()
playerLeagueId = playerSoloQLeague['leagueId']
if playerLeagueTier not in self.leagues:
print('refused tier:', self.region, sumID, playerLeagueTier)
continue
self.players[playerLeagueTier].append(sumID)
print('added:', self.region, sumID, playerLeagueTier)
# We add all the people in the same league for exploration
if playerLeagueId not in self.exploredLeagues:
self.exploredLeagues.append(playerLeagueId)
print('new league found:', self.region, playerLeagueTier, playerLeagueId)
try:
newLeague = self.api.getData('https://%s.api.riotgames.com/lol/league/v4/leagues/%s' % (self.region, playerLeagueId))['accountId']
except ApiError403 as e:
print(e, file=sys.stderr)
return e
except ApiError as e:
print(e, file=sys.stderr)
continue
for e in newLeague['entries']:
sumID = int(e['summonerId'])
if sumID not in self.exploredPlayers:
self.to_explore.append(sumID)
self.exploredPlayers.append(sumID)
# old API
# for e in playerSoloQLeague['entries']:
# sumID = int(e['summonerId'])
# if sumID not in self.exploredPlayers:
# self.to_explore.append(sumID)
# self.exploredPlayers.append(sumID)
# We have to explore some games to get to other leagues
# We hope that at least 1 player of each league has played within the time window
for game in games: # from most recent to oldest
# the same game can come up to 10 times, so it's better to not make useless API calls
if game['gameId'] in self.exploredGames:
continue
self.exploredGames.append(game['gameId'])
gameID = str(game['gameId'])
timestamp = game['timestamp']
if timestamp < MAX_DEPTH: # game is too old?
break
try:
gameData = self.api.getData('https://%s.api.riotgames.com/lol/match/v4/matches/%s' % (self.region, gameID))
except ApiError403 as e:
print(e, file=sys.stderr)
return e
except ApiError as e:
print(e, file=sys.stderr)
continue
# adding all the non explored players from the game
for participant in gameData['participantIdentities']:
sumID = participant['player']['summonerId']
if sumID not in self.exploredPlayers:
self.to_explore.append(sumID)
self.exploredPlayers.append(sumID)
return None # everything explored
def save(self):
while True:
if not os.path.isdir(self.database):
print(self.region, 'cannot access the local database', file=sys.stderr)
time.sleep(DATABASE_WAIT)
continue
try:
pickle.dump(self.players, open(os.path.join(self.database, 'player_listing', self.region, 'players'), 'wb'))
pickle.dump(self.exploredPlayers, open(os.path.join(self.database, 'player_listing', self.region, 'exploredPlayers'), 'wb'))
pickle.dump(self.exploredLeagues, open(os.path.join(self.database, 'player_listing', self.region, 'exploredLeagues'), 'wb'))
pickle.dump(self.exploredGames, open(os.path.join(self.database, 'player_listing', self.region, 'exploredGames'), 'wb'))
pickle.dump(self.to_explore, open(os.path.join(self.database, 'player_listing', self.region, 'to_explore'), 'wb'))
except (PickleError, FileNotFoundError) as e:
print(e, file=sys.stderr)
print(self.region, 'saving failed', file=sys.stderr)
time.sleep(DATABASE_WAIT)
continue
break
def keepExploring(database, leagues, region, attempts=ATTEMPTS):
print(region, 'starting player listing', file=sys.stderr)
pl = None
if list(set(leagues) - {'challenger', 'grandmaster', 'master'}): # we check it is necessary to look for the leagues
while True:
if not pl:
try:
pl = PlayerListing(database, leagues, region)
except ApiError403 as e:
print('FATAL ERROR', region, e, file=sys.stderr)
break
except ApiError as e:
print(e, file=sys.stderr)
attempts -= 1
if attempts <= 0:
print(region, 'initial connection failed. No more connection attempt.', file=sys.stderr)
break
print(region, 'initial connection failed. Retrying in {} minutes. Attempts left:'.format(ATTEMPTS_WAIT, attempts), file=sys.stderr)
time.sleep(ATTEMPTS_WAIT)
continue
except (PickleError, FileNotFoundError) as e:
print(e, file=sys.stderr)
print(region, 'cannot access the local database', file=sys.stderr)
time.sleep(DATABASE_WAIT)
continue
try:
e = pl.explore()
except KeyError: # appends sometime, looks like some data is corrupted
continue
if e is not None:
print('FATAL ERROR', region, e, file=sys.stderr)
else:
print(region, 'all players explored downloaded', file=sys.stderr)
break
else: # only master/challenger league
while True:
if not pl:
try:
pl = PlayerListing(database, leagues, region, fast=True)
except ApiError403 as e:
print('FATAL ERROR', region, e, file=sys.stderr)
break
except ApiError as e:
print(e, file=sys.stderr)
attempts -= 1
if attempts <= 0:
print(region, 'initial connection failed. No more connection attempt.', file=sys.stderr)
break
print(region, 'initial connection failed. Retrying in {} minutes. Attempts left: {}'.format(ATTEMPTS_WAIT, attempts), file=sys.stderr)
time.sleep(ATTEMPTS_WAIT)
continue
except (PickleError, FileNotFoundError) as e:
print(e, file=sys.stderr)
print(region, 'cannot access the local database', file=sys.stderr)
time.sleep(DATABASE_WAIT)
continue
# No need to explore
print(region, 'all players explored downloaded', file=sys.stderr)
break
# we finally save the players list
if pl is not None:
print(region, 'Saving players list', file=sys.stderr)
pl.save()
def run(mode):
assert isinstance(mode, Modes.Base_Mode), 'Unrecognized mode {}'.format(mode)
keprocs = []
for region in mode.REGIONS:
keprocs.append(multiprocessing.Process(target=keepExploring, args=(mode.DATABASE, mode.LEAGUES, region)))
keprocs[-1].start()
for keproc in keprocs:
keproc.join()
print('-- Listing complete --')
if __name__ == '__main__':
m = Modes.Base_Mode()
run(m)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base template using which the apis_map.py is generated."""
class APIDef(object):
"""Struct for info required to instantiate clients/messages for API versions.
Attributes:
class_path: str, Path to the package containing api related modules.
client_classpath: str, Relative path to the client class for an API version.
messages_modulepath: str, Relative path to the messages module for an
API version.
default_version: bool, Whether this API version is the default version for
the API.
"""
def __init__(self,
class_path,
client_classpath,
messages_modulepath,
default_version=False):
self.class_path = class_path
self.client_classpath = client_classpath
self.messages_modulepath = messages_modulepath
self.default_version = default_version
@property
def client_full_classpath(self):
return self.class_path + '.' + self.client_classpath
@property
def messages_full_modulepath(self):
return self.class_path + '.' + self.messages_modulepath
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def get_init_source(self):
src_fmt = 'APIDef("{0}", "{1}", "{2}", {3})'
return src_fmt.format(self.class_path,
self.client_classpath,
self.messages_modulepath,
self.default_version)
def __repr__(self):
return self.get_init_source()
MAP = {
'apikeys': {
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.apikeys.v1',
client_classpath='apikeys_v1_client.ApikeysV1',
messages_modulepath='apikeys_v1_messages',
default_version=True
),
},
'appengine': {
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.appengine.v1',
client_classpath='appengine_v1_client.AppengineV1',
messages_modulepath='appengine_v1_messages',
default_version=True
),
},
'bigquery': {
'v2': APIDef(
class_path='googlecloudsdk.third_party.apis.bigquery.v2',
client_classpath='bigquery_v2_client.BigqueryV2',
messages_modulepath='bigquery_v2_messages',
default_version=True
),
},
'bigtableadmin': {
'v2': APIDef(
class_path='googlecloudsdk.third_party.apis.bigtableadmin.v2',
client_classpath='bigtableadmin_v2_client.BigtableadminV2',
messages_modulepath='bigtableadmin_v2_messages',
default_version=True
),
},
'bigtableclusteradmin': {
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.bigtableclusteradmin.v1',
client_classpath='bigtableclusteradmin_v1_client.BigtableclusteradminV1',
messages_modulepath='bigtableclusteradmin_v1_messages',
default_version=True
),
},
'bio': {
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.bio.v1',
client_classpath='bio_v1_client.BioV1',
messages_modulepath='bio_v1_messages',
default_version=True
),
},
'cloudbilling': {
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.cloudbilling.v1',
client_classpath='cloudbilling_v1_client.CloudbillingV1',
messages_modulepath='cloudbilling_v1_messages',
default_version=True
),
},
'cloudbuild': {
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.cloudbuild.v1',
client_classpath='cloudbuild_v1_client.CloudbuildV1',
messages_modulepath='cloudbuild_v1_messages',
default_version=True
),
},
'clouddebugger': {
'v2': APIDef(
class_path='googlecloudsdk.third_party.apis.clouddebugger.v2',
client_classpath='clouddebugger_v2_client.ClouddebuggerV2',
messages_modulepath='clouddebugger_v2_messages',
default_version=True
),
},
'clouderrorreporting': {
'v1beta1': APIDef(
class_path='googlecloudsdk.third_party.apis.clouderrorreporting.v1beta1',
client_classpath='clouderrorreporting_v1beta1_client.ClouderrorreportingV1beta1',
messages_modulepath='clouderrorreporting_v1beta1_messages',
default_version=True
),
},
'cloudfunctions': {
'v1beta2': APIDef(
class_path='googlecloudsdk.third_party.apis.cloudfunctions.v1beta2',
client_classpath='cloudfunctions_v1beta2_client.CloudfunctionsV1beta2',
messages_modulepath='cloudfunctions_v1beta2_messages',
default_version=True
),
},
'cloudkms': {
'v1beta1': APIDef(
class_path='googlecloudsdk.third_party.apis.cloudkms.v1beta1',
client_classpath='cloudkms_v1beta1_client.CloudkmsV1beta1',
messages_modulepath='cloudkms_v1beta1_messages',
default_version=True
),
},
'cloudresourcemanager': {
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.cloudresourcemanager.v1',
client_classpath='cloudresourcemanager_v1_client.CloudresourcemanagerV1',
messages_modulepath='cloudresourcemanager_v1_messages',
default_version=True
),
'v1beta1': APIDef(
class_path='googlecloudsdk.third_party.apis.cloudresourcemanager.v1beta1',
client_classpath='cloudresourcemanager_v1beta1_client.CloudresourcemanagerV1beta1',
messages_modulepath='cloudresourcemanager_v1beta1_messages',
default_version=False
),
'v2alpha1': APIDef(
class_path='googlecloudsdk.third_party.apis.cloudresourcemanager.v2alpha1',
client_classpath='cloudresourcemanager_v2alpha1_client.CloudresourcemanagerV2alpha1',
messages_modulepath='cloudresourcemanager_v2alpha1_messages',
default_version=False
),
},
'cloudresourcesearch': {
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.cloudresourcesearch.v1',
client_classpath='cloudresourcesearch_v1_client.CloudresourcesearchV1',
messages_modulepath='cloudresourcesearch_v1_messages',
default_version=True
),
},
'clouduseraccounts': {
'alpha': APIDef(
class_path='googlecloudsdk.third_party.apis.clouduseraccounts.alpha',
client_classpath='clouduseraccounts_alpha_client.ClouduseraccountsAlpha',
messages_modulepath='clouduseraccounts_alpha_messages',
default_version=False
),
'beta': APIDef(
class_path='googlecloudsdk.third_party.apis.clouduseraccounts.beta',
client_classpath='clouduseraccounts_beta_client.ClouduseraccountsBeta',
messages_modulepath='clouduseraccounts_beta_messages',
default_version=True
),
},
'compute': {
'alpha': APIDef(
class_path='googlecloudsdk.third_party.apis.compute.alpha',
client_classpath='compute_alpha_client.ComputeAlpha',
messages_modulepath='compute_alpha_messages',
default_version=False
),
'beta': APIDef(
class_path='googlecloudsdk.third_party.apis.compute.beta',
client_classpath='compute_beta_client.ComputeBeta',
messages_modulepath='compute_beta_messages',
default_version=False
),
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.compute.v1',
client_classpath='compute_v1_client.ComputeV1',
messages_modulepath='compute_v1_messages',
default_version=True
),
},
'container': {
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.container.v1',
client_classpath='container_v1_client.ContainerV1',
messages_modulepath='container_v1_messages',
default_version=True
),
},
'containeranalysis': {
'v1alpha1': APIDef(
class_path='googlecloudsdk.third_party.apis.containeranalysis.v1alpha1',
client_classpath='containeranalysis_v1alpha1_client.ContaineranalysisV1alpha1',
messages_modulepath='containeranalysis_v1alpha1_messages',
default_version=True
),
},
'dataflow': {
'v1b3': APIDef(
class_path='googlecloudsdk.third_party.apis.dataflow.v1b3',
client_classpath='dataflow_v1b3_client.DataflowV1b3',
messages_modulepath='dataflow_v1b3_messages',
default_version=True
),
},
'dataproc': {
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.dataproc.v1',
client_classpath='dataproc_v1_client.DataprocV1',
messages_modulepath='dataproc_v1_messages',
default_version=True
),
},
'deploymentmanager': {
'alpha': APIDef(
class_path='googlecloudsdk.third_party.apis.deploymentmanager.alpha',
client_classpath='deploymentmanager_alpha_client.DeploymentmanagerAlpha',
messages_modulepath='deploymentmanager_alpha_messages',
default_version=False
),
'v2': APIDef(
class_path='googlecloudsdk.third_party.apis.deploymentmanager.v2',
client_classpath='deploymentmanager_v2_client.DeploymentmanagerV2',
messages_modulepath='deploymentmanager_v2_messages',
default_version=True
),
'v2beta': APIDef(
class_path='googlecloudsdk.third_party.apis.deploymentmanager.v2beta',
client_classpath='deploymentmanager_v2beta_client.DeploymentmanagerV2beta',
messages_modulepath='deploymentmanager_v2beta_messages',
default_version=False
),
},
'dns': {
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.dns.v1',
client_classpath='dns_v1_client.DnsV1',
messages_modulepath='dns_v1_messages',
default_version=True
),
'v1beta1': APIDef(
class_path='googlecloudsdk.third_party.apis.dns.v1beta1',
client_classpath='dns_v1beta1_client.DnsV1beta1',
messages_modulepath='dns_v1beta1_messages',
default_version=False
),
},
'genomics': {
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.genomics.v1',
client_classpath='genomics_v1_client.GenomicsV1',
messages_modulepath='genomics_v1_messages',
default_version=True
),
'v1alpha2': APIDef(
class_path='googlecloudsdk.third_party.apis.genomics.v1alpha2',
client_classpath='genomics_v1alpha2_client.GenomicsV1alpha2',
messages_modulepath='genomics_v1alpha2_messages',
default_version=False
),
},
'iam': {
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.iam.v1',
client_classpath='iam_v1_client.IamV1',
messages_modulepath='iam_v1_messages',
default_version=True
),
},
'logging': {
'v1beta3': APIDef(
class_path='googlecloudsdk.third_party.apis.logging.v1beta3',
client_classpath='logging_v1beta3_client.LoggingV1beta3',
messages_modulepath='logging_v1beta3_messages',
default_version=True
),
'v2': APIDef(
class_path='googlecloudsdk.third_party.apis.logging.v2',
client_classpath='logging_v2_client.LoggingV2',
messages_modulepath='logging_v2_messages',
default_version=False
),
},
'manager': {
'v1beta2': APIDef(
class_path='googlecloudsdk.third_party.apis.manager.v1beta2',
client_classpath='manager_v1beta2_client.ManagerV1beta2',
messages_modulepath='manager_v1beta2_messages',
default_version=True
),
},
'ml': {
'v1beta1': APIDef(
class_path='googlecloudsdk.third_party.apis.ml.v1beta1',
client_classpath='ml_v1beta1_client.MlV1beta1',
messages_modulepath='ml_v1beta1_messages',
default_version=True
),
},
'pubsub': {
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.pubsub.v1',
client_classpath='pubsub_v1_client.PubsubV1',
messages_modulepath='pubsub_v1_messages',
default_version=True
),
},
'replicapoolupdater': {
'v1beta1': APIDef(
class_path='googlecloudsdk.third_party.apis.replicapoolupdater.v1beta1',
client_classpath='replicapoolupdater_v1beta1_client.ReplicapoolupdaterV1beta1',
messages_modulepath='replicapoolupdater_v1beta1_messages',
default_version=True
),
},
'runtimeconfig': {
'v1beta1': APIDef(
class_path='googlecloudsdk.third_party.apis.runtimeconfig.v1beta1',
client_classpath='runtimeconfig_v1beta1_client.RuntimeconfigV1beta1',
messages_modulepath='runtimeconfig_v1beta1_messages',
default_version=True
),
},
'servicemanagement': {
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.servicemanagement.v1',
client_classpath='servicemanagement_v1_client.ServicemanagementV1',
messages_modulepath='servicemanagement_v1_messages',
default_version=True
),
},
'serviceregistry': {
'v1alpha': APIDef(
class_path='googlecloudsdk.third_party.apis.serviceregistry.v1alpha',
client_classpath='serviceregistry_v1alpha_client.ServiceregistryV1alpha',
messages_modulepath='serviceregistry_v1alpha_messages',
default_version=True
),
},
'source': {
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.source.v1',
client_classpath='source_v1_client.SourceV1',
messages_modulepath='source_v1_messages',
default_version=True
),
},
'sourcerepo': {
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.sourcerepo.v1',
client_classpath='sourcerepo_v1_client.SourcerepoV1',
messages_modulepath='sourcerepo_v1_messages',
default_version=True
),
},
'sqladmin': {
'v1beta3': APIDef(
class_path='googlecloudsdk.third_party.apis.sqladmin.v1beta3',
client_classpath='sqladmin_v1beta3_client.SqladminV1beta3',
messages_modulepath='sqladmin_v1beta3_messages',
default_version=True
),
'v1beta4': APIDef(
class_path='googlecloudsdk.third_party.apis.sqladmin.v1beta4',
client_classpath='sqladmin_v1beta4_client.SqladminV1beta4',
messages_modulepath='sqladmin_v1beta4_messages',
default_version=False
),
},
'storage': {
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.storage.v1',
client_classpath='storage_v1_client.StorageV1',
messages_modulepath='storage_v1_messages',
default_version=True
),
},
'testing': {
'v1': APIDef(
class_path='googlecloudsdk.third_party.apis.testing.v1',
client_classpath='testing_v1_client.TestingV1',
messages_modulepath='testing_v1_messages',
default_version=True
),
},
'toolresults': {
'v1beta3': APIDef(
class_path='googlecloudsdk.third_party.apis.toolresults.v1beta3',
client_classpath='toolresults_v1beta3_client.ToolresultsV1beta3',
messages_modulepath='toolresults_v1beta3_messages',
default_version=True
),
},
}
|
|
# Django & Python
from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.http import QueryDict
from django.test import TestCase
from django.test import Client
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.conf.urls.static import static, settings
import json
# Modal
from registrar.models import Course
from registrar.models import Teacher
from registrar.models import Policy
# View
from teacher.views import policy
# Contants
TEST_USER_EMAIL = "[email protected]"
TEST_USER_USERNAME = "Ledo"
TEST_USER_PASSWORD = "ContinentalUnion"
TEST_USER_EMAIL2 = "[email protected]"
TEST_USER_USERNAME2 = "whalesquid"
TEST_USER_PASSWORD2 = "Evolvers"
# Create your tests here.
class PolicyTestCase(TestCase):
def tearDown(self):
policies = Policy.objects.all()
for policy in policies:
policy.delete()
courses = Course.objects.all()
for course in courses:
course.delete()
User.objects.all().delete()
def setUp(self):
# Create our Trudy user.
User.objects.create_user(
email=TEST_USER_EMAIL2,
username=TEST_USER_USERNAME2,
password=TEST_USER_PASSWORD2
)
user = User.objects.get(email=TEST_USER_EMAIL2)
teacher = Teacher.objects.create(user=user)
# Create our Student.
User.objects.create_user(
email=TEST_USER_EMAIL,
username=TEST_USER_USERNAME,
password=TEST_USER_PASSWORD,
)
user = User.objects.get(email=TEST_USER_EMAIL)
teacher = Teacher.objects.create(user=user)
# Create a test course.
Course.objects.create(
id=1,
title="Comics Book Course",
sub_title="The definitive course on comics!",
category="",
teacher=teacher,
)
def get_logged_in_client(self):
client = Client()
client.login(
username=TEST_USER_USERNAME,
password=TEST_USER_PASSWORD
)
return client
def get_logged_in_trudy_client(self):
client = Client()
client.login(
username=TEST_USER_USERNAME2,
password=TEST_USER_PASSWORD2
)
return client
def test_url_resolves_to_policy_page_view(self):
found = resolve('/teacher/course/1/policy')
self.assertEqual(found.func, policy.policy_page)
def test_policy_page_without_pdf_file(self):
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/policy')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Comics Book Course',response.content)
self.assertIn(b'<h1>Upload',response.content)
def test_policy_modal(self):
client = self.get_logged_in_client()
response = client.post('/teacher/course/1/policy_modal')
self.assertEqual(response.status_code, 200)
self.assertIn(b'policy_modal',response.content)
def test_policy_page_with_pdf_file(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
file_path = settings.MEDIA_ROOT + '/sample.pdf'
with open(file_path, 'rb') as fp:
self.assertTrue(fp is not None)
response = client.post('/teacher/course/1/save_policy',{
'file': fp,
}, **kwargs)
self.assertEqual(response.status_code, 200)
response = client.post('/teacher/course/1/policy')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Comics Book Course',response.content)
self.assertIn(b'ajax_delete_policy',response.content)
self.assertIn(b'PDF RESULT',response.content)
try:
Policy.objects.get(policy_id=1).delete()
except Policy.DoesNotExist:
pass
try:
Policy.objects.get(policy_id=2).delete()
except Policy.DoesNotExist:
pass
try:
Policy.objects.get(policy_id=3).delete()
except Policy.DoesNotExist:
pass
def test_save_policy(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
file_path = settings.MEDIA_ROOT + '/sample.pdf'
with open(file_path, 'rb') as fp:
self.assertTrue(fp is not None)
response = client.post('/teacher/course/1/save_policy',{
'file': fp,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'saved')
self.assertEqual(array['status'], 'success')
try:
Policy.objects.get(policy_id=1).delete()
except Policy.DoesNotExist:
pass
try:
Policy.objects.get(policy_id=2).delete()
except Policy.DoesNotExist:
pass
try:
Policy.objects.get(policy_id=3).delete()
except Policy.DoesNotExist:
pass
def test_delete_policy_with_submission_and_correct_user(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
file_path = settings.MEDIA_ROOT + '/sample.pdf'
with open(file_path, 'rb') as fp:
self.assertTrue(fp is not None)
response = client.post('/teacher/course/1/save_policy',{
'file': fp,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'saved')
self.assertEqual(array['status'], 'success')
response = client.post('/teacher/course/1/delete_policy',{
'policy_id': 1,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'deleted')
self.assertEqual(array['status'], 'success')
def test_delete_policy_with_submission_and_correct_user(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
file_path = settings.MEDIA_ROOT + '/sample.pdf'
with open(file_path, 'rb') as fp:
self.assertTrue(fp is not None)
response = client.post('/teacher/course/1/save_policy',{
'file': fp,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'saved')
self.assertEqual(array['status'], 'success')
client.logout()
client = self.get_logged_in_trudy_client()
response = client.post('/teacher/course/1/delete_policy',{
'policy_id': 1,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'unauthorized deletion')
self.assertEqual(array['status'], 'failed')
|
|
from __future__ import unicode_literals
from django.contrib.admin import TabularInline, ModelAdmin
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.admin.helpers import InlineAdminForm
from django.contrib.auth.models import User, Permission
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, override_settings, RequestFactory
# local test models
from .admin import InnerInline, site as admin_site
from .models import (Holder, Inner, Holder2, Inner2, Holder3, Inner3, Person,
OutfitItem, Fashionista, Teacher, Parent, Child, Author, Book, Profile,
ProfileCollection, ParentModelWithCustomPk, ChildModel1, ChildModel2,
Sighting, Novel, Chapter, FootNote, BinaryTree, SomeParentModel,
SomeChildModel)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_inlines.urls")
class TestInline(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
self.change_url = '/admin/admin_inlines/holder/%i/' % holder.id
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
self.factory = RequestFactory()
def tearDown(self):
self.client.logout()
def test_can_delete(self):
"""
can_delete should be passed to inlineformset factory.
"""
response = self.client.get(self.change_url)
inner_formset = response.context['inline_admin_formsets'][0].formset
expected = InnerInline.can_delete
actual = inner_formset.can_delete
self.assertEqual(expected, actual, 'can_delete must be equal')
def test_readonly_stacked_inline_label(self):
"""Bug #13174."""
holder = Holder.objects.create(dummy=42)
Inner.objects.create(holder=holder, dummy=42, readonly='')
response = self.client.get('/admin/admin_inlines/holder/%i/'
% holder.id)
self.assertContains(response, '<label>Inner readonly label:</label>')
def test_many_to_many_inlines(self):
"Autogenerated many-to-many inlines are displayed correctly (#13407)"
response = self.client.get('/admin/admin_inlines/author/add/')
# The heading for the m2m inline block uses the right text
self.assertContains(response, '<h2>Author-book relationships</h2>')
# The "add another" label is correct
self.assertContains(response, 'Add another Author-book relationship')
# The '+' is dropped from the autogenerated form prefix (Author_books+)
self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_primary(self):
person = Person.objects.create(firstname='Imelda')
item = OutfitItem.objects.create(name='Shoes')
# Imelda likes shoes, but can't carry her own bags.
data = {
'shoppingweakness_set-TOTAL_FORMS': 1,
'shoppingweakness_set-INITIAL_FORMS': 0,
'shoppingweakness_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'person': person.id,
'max_weight': 0,
'shoppingweakness_set-0-item': item.id,
}
response = self.client.post('/admin/admin_inlines/fashionista/add/', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(Fashionista.objects.filter(person__firstname='Imelda')), 1)
def test_tabular_non_field_errors(self):
"""
Ensure that non_field_errors are displayed correctly, including the
right value for colspan. Refs #13510.
"""
data = {
'title_set-TOTAL_FORMS': 1,
'title_set-INITIAL_FORMS': 0,
'title_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'title_set-0-title1': 'a title',
'title_set-0-title2': 'a different title',
}
response = self.client.post('/admin/admin_inlines/titlecollection/add/', data)
# Here colspan is "4": two fields (title1 and title2), one hidden field and the delete checkbox.
self.assertContains(response, '<tr><td colspan="4"><ul class="errorlist nonfield"><li>The two titles must be the same</li></ul></td></tr>')
def test_no_parent_callable_lookup(self):
"""Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable"""
# Identically named callable isn't present in the parent ModelAdmin,
# rendering of the add view shouldn't explode
response = self.client.get('/admin/admin_inlines/novel/add/')
self.assertEqual(response.status_code, 200)
# View should have the child inlines section
self.assertContains(response, '<div class="inline-group" id="chapter_set-group">')
def test_callable_lookup(self):
"""Admin inline should invoke local callable when its name is listed in readonly_fields"""
response = self.client.get('/admin/admin_inlines/poll/add/')
self.assertEqual(response.status_code, 200)
# Add parent object view should have the child inlines section
self.assertContains(response, '<div class="inline-group" id="question_set-group">')
# The right callable should be used for the inline readonly_fields
# column cells
self.assertContains(response, '<p>Callable in QuestionInline</p>')
def test_help_text(self):
"""
Ensure that the inlines' model field help texts are displayed when
using both the stacked and tabular layouts.
Ref #8190.
"""
response = self.client.get('/admin/admin_inlines/holder4/add/')
self.assertContains(response, '<p class="help">Awesome stacked help text is awesome.</p>', 4)
self.assertContains(response, '<img src="/static/admin/img/icon-unknown.gif" class="help help-tooltip" width="10" height="10" alt="(Awesome tabular help text is awesome.)" title="Awesome tabular help text is awesome." />', 1)
# ReadOnly fields
response = self.client.get('/admin/admin_inlines/capofamiglia/add/')
self.assertContains(response, '<img src="/static/admin/img/icon-unknown.gif" class="help help-tooltip" width="10" height="10" alt="(Help text for ReadOnlyInline)" title="Help text for ReadOnlyInline" />', 1)
def test_inline_hidden_field_no_column(self):
"""#18263 -- Make sure hidden fields don't get a column in tabular inlines"""
parent = SomeParentModel.objects.create(name='a')
SomeChildModel.objects.create(name='b', position='0', parent=parent)
SomeChildModel.objects.create(name='c', position='1', parent=parent)
response = self.client.get('/admin/admin_inlines/someparentmodel/%s/' % parent.pk)
self.assertNotContains(response, '<td class="field-position">')
self.assertContains(response, (
'<input id="id_somechildmodel_set-1-position" '
'name="somechildmodel_set-1-position" type="hidden" value="1" />'))
def test_non_related_name_inline(self):
"""
Ensure that multiple inlines with related_name='+' have correct form
prefixes. Bug #16838.
"""
response = self.client.get('/admin/admin_inlines/capofamiglia/add/')
self.assertContains(response,
'<input type="hidden" name="-1-0-id" id="id_-1-0-id" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-1-0-capo_famiglia" id="id_-1-0-capo_famiglia" />', html=True)
self.assertContains(response,
'<input id="id_-1-0-name" type="text" class="vTextField" '
'name="-1-0-name" maxlength="100" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-2-0-id" id="id_-2-0-id" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-2-0-capo_famiglia" id="id_-2-0-capo_famiglia" />', html=True)
self.assertContains(response,
'<input id="id_-2-0-name" type="text" class="vTextField" '
'name="-2-0-name" maxlength="100" />', html=True)
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_localize_pk_shortcut(self):
"""
Ensure that the "View on Site" link is correct for locales that use
thousand separators
"""
holder = Holder.objects.create(pk=123456789, dummy=42)
inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly='')
response = self.client.get('/admin/admin_inlines/holder/%i/' % holder.id)
inner_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(inner).pk, inner.pk)
self.assertContains(response, inner_shortcut)
def test_custom_pk_shortcut(self):
"""
Ensure that the "View on Site" link is correct for models with a
custom primary key field. Bug #18433.
"""
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
child1 = ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
child2 = ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get('/admin/admin_inlines/parentmodelwithcustompk/foo/')
child1_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child1).pk, child1.pk)
child2_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child2).pk, child2.pk)
self.assertContains(response, child1_shortcut)
self.assertContains(response, child2_shortcut)
def test_create_inlines_on_inherited_model(self):
"""
Ensure that an object can be created with inlines when it inherits
another class. Bug #19524.
"""
data = {
'name': 'Martian',
'sighting_set-TOTAL_FORMS': 1,
'sighting_set-INITIAL_FORMS': 0,
'sighting_set-MAX_NUM_FORMS': 0,
'sighting_set-0-place': 'Zone 51',
'_save': 'Save',
}
response = self.client.post('/admin/admin_inlines/extraterrestrial/add/', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Sighting.objects.filter(et__name='Martian').count(), 1)
def test_custom_get_extra_form(self):
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
# The maximum number of forms should respect 'get_max_num' on the
# ModelAdmin
max_forms_input = '<input id="id_binarytree_set-MAX_NUM_FORMS" name="binarytree_set-MAX_NUM_FORMS" type="hidden" value="%d" />'
# The total number of forms will remain the same in either case
total_forms_hidden = '<input id="id_binarytree_set-TOTAL_FORMS" name="binarytree_set-TOTAL_FORMS" type="hidden" value="2" />'
response = self.client.get('/admin/admin_inlines/binarytree/add/')
self.assertContains(response, max_forms_input % 3)
self.assertContains(response, total_forms_hidden)
response = self.client.get("/admin/admin_inlines/binarytree/%d/" % bt_head.id)
self.assertContains(response, max_forms_input % 2)
self.assertContains(response, total_forms_hidden)
def test_min_num(self):
"""
Ensure that min_num and extra determine number of forms.
"""
class MinNumInline(TabularInline):
model = BinaryTree
min_num = 2
extra = 3
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = '<input id="id_binarytree_set-MIN_NUM_FORMS" name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="2" />'
total_forms = '<input id="id_binarytree_set-TOTAL_FORMS" name="binarytree_set-TOTAL_FORMS" type="hidden" value="5" />'
request = self.factory.get('/admin/admin_inlines/binarytree/add/')
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertContains(response, min_forms)
self.assertContains(response, total_forms)
def test_custom_min_num(self):
"""
Ensure that get_min_num is called and used correctly.
"""
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
class MinNumInline(TabularInline):
model = BinaryTree
extra = 3
def get_min_num(self, request, obj=None, **kwargs):
if obj:
return 5
return 2
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = '<input id="id_binarytree_set-MIN_NUM_FORMS" name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="%d" />'
total_forms = '<input id="id_binarytree_set-TOTAL_FORMS" name="binarytree_set-TOTAL_FORMS" type="hidden" value="%d" />'
request = self.factory.get('/admin/admin_inlines/binarytree/add/')
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertContains(response, min_forms % 2)
self.assertContains(response, total_forms % 5)
request = self.factory.get("/admin/admin_inlines/binarytree/%d/" % bt_head.id)
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(bt_head.id))
self.assertContains(response, min_forms % 5)
self.assertContains(response, total_forms % 8)
def test_inline_nonauto_noneditable_pk(self):
response = self.client.get('/admin/admin_inlines/author/add/')
self.assertContains(response,
'<input id="id_nonautopkbook_set-0-rand_pk" name="nonautopkbook_set-0-rand_pk" type="hidden" />',
html=True)
self.assertContains(response,
'<input id="id_nonautopkbook_set-2-0-rand_pk" name="nonautopkbook_set-2-0-rand_pk" type="hidden" />',
html=True)
def test_inline_editable_pk(self):
response = self.client.get('/admin/admin_inlines/author/add/')
self.assertContains(response,
'<input class="vIntegerField" id="id_editablepkbook_set-0-manual_pk" name="editablepkbook_set-0-manual_pk" type="text" />',
html=True, count=1)
self.assertContains(response,
'<input class="vIntegerField" id="id_editablepkbook_set-2-0-manual_pk" name="editablepkbook_set-2-0-manual_pk" type="text" />',
html=True, count=1)
def test_stacked_inline_edit_form_contains_has_original_class(self):
holder = Holder.objects.create(dummy=1)
holder.inner_set.create(dummy=1)
response = self.client.get('/admin/admin_inlines/holder/%s/' % holder.pk)
self.assertContains(
response,
'<div class="inline-related has_original" id="inner_set-0">',
count=1
)
self.assertContains(
response,
'<div class="inline-related" id="inner_set-1">',
count=1
)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_inlines.urls")
class TestInlineMedia(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test_inline_media_only_base(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
change_url = '/admin/admin_inlines/holder/%i/' % holder.id
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
def test_inline_media_only_inline(self):
holder = Holder3(dummy=13)
holder.save()
Inner3(dummy=42, holder=holder).save()
change_url = '/admin/admin_inlines/holder3/%i/' % holder.id
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_inline_scripts.js')
def test_all_inline_media(self):
holder = Holder2(dummy=13)
holder.save()
Inner2(dummy=42, holder=holder).save()
change_url = '/admin/admin_inlines/holder2/%i/' % holder.id
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
self.assertContains(response, 'my_awesome_inline_scripts.js')
@override_settings(ROOT_URLCONF="admin_inlines.urls")
class TestInlineAdminForm(TestCase):
def test_immutable_content_type(self):
"""Regression for #9362
The problem depends only on InlineAdminForm and its "original"
argument, so we can safely set the other arguments to None/{}. We just
need to check that the content_type argument of Child isn't altered by
the internals of the inline form."""
sally = Teacher.objects.create(name='Sally')
john = Parent.objects.create(name='John')
joe = Child.objects.create(name='Joe', teacher=sally, parent=john)
iaf = InlineAdminForm(None, None, {}, {}, joe)
parent_ct = ContentType.objects.get_for_model(Parent)
self.assertEqual(iaf.original.content_type, parent_ct)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_inlines.urls")
class TestInlineProtectedOnDelete(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test_deleting_inline_with_protected_delete_does_not_validate(self):
lotr = Novel.objects.create(name='Lord of the rings')
chapter = Chapter.objects.create(novel=lotr, name='Many Meetings')
foot_note = FootNote.objects.create(chapter=chapter, note='yadda yadda')
change_url = '/admin/admin_inlines/novel/%i/' % lotr.id
response = self.client.get(change_url)
data = {
'name': lotr.name,
'chapter_set-TOTAL_FORMS': 1,
'chapter_set-INITIAL_FORMS': 1,
'chapter_set-MAX_NUM_FORMS': 1000,
'_save': 'Save',
'chapter_set-0-id': chapter.id,
'chapter_set-0-name': chapter.name,
'chapter_set-0-novel': lotr.id,
'chapter_set-0-DELETE': 'on'
}
response = self.client.post(change_url, data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Deleting chapter %s would require deleting "
"the following protected related objects: foot note %s"
% (chapter, foot_note))
@override_settings(ROOT_URLCONF="admin_inlines.urls")
class TestInlinePermissions(TestCase):
"""
Make sure the admin respects permissions for objects that are edited
inline. Refs #8060.
"""
def setUp(self):
self.user = User(username='admin')
self.user.is_staff = True
self.user.is_active = True
self.user.set_password('secret')
self.user.save()
self.author_ct = ContentType.objects.get_for_model(Author)
self.holder_ct = ContentType.objects.get_for_model(Holder2)
self.book_ct = ContentType.objects.get_for_model(Book)
self.inner_ct = ContentType.objects.get_for_model(Inner2)
# User always has permissions to add and change Authors, and Holders,
# the main (parent) models of the inlines. Permissions on the inlines
# vary per test.
permission = Permission.objects.get(codename='add_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='add_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
author = Author.objects.create(pk=1, name='The Author')
book = author.books.create(name='The inline Book')
self.author_change_url = '/admin/admin_inlines/author/%i/' % author.id
# Get the ID of the automatically created intermediate model for the Author-Book m2m
author_book_auto_m2m_intermediate = Author.books.through.objects.get(author=author, book=book)
self.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk
holder = Holder2.objects.create(dummy=13)
inner2 = Inner2.objects.create(dummy=42, holder=holder)
self.holder_change_url = '/admin/admin_inlines/holder2/%i/' % holder.id
self.inner2_id = inner2.id
self.assertEqual(
self.client.login(username='admin', password='secret'),
True)
def tearDown(self):
self.client.logout()
def test_inline_add_m2m_noperm(self):
response = self.client.get('/admin/admin_inlines/author/add/')
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_noperm(self):
response = self.client.get('/admin/admin_inlines/holder2/add/')
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_change_m2m_noperm(self):
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_change_fk_noperm(self):
response = self.client.get(self.holder_change_url)
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_add_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get('/admin/admin_inlines/author/add/')
# No change permission on Books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get('/admin/admin_inlines/holder2/add/')
# Add permission on inner2s, so we get the inline
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="3" name="inner2_set-TOTAL_FORMS" />', html=True)
def test_inline_change_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
self.assertNotContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_m2m_change_perm(self):
permission = Permission.objects.get(codename='change_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# We have change perm on books, so we can add/change/delete inlines
self.assertContains(response, '<h2>Author-book relationships</h2>')
self.assertContains(response, 'Add another Author-book relationship')
self.assertContains(response, '<input type="hidden" id="id_Author_books-TOTAL_FORMS" '
'value="4" name="Author_books-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_Author_books-0-id" '
'value="%i" name="Author_books-0-id" />' % self.author_book_auto_m2m_intermediate_id, html=True)
self.assertContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add permission on inner2s, so we can add but not modify existing
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
# 3 extra forms only, not the existing instance form
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="3" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertNotContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
def test_inline_change_fk_change_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change permission on inner2s, so we can change existing but not add new
self.assertContains(response, '<h2>Inner2s</h2>')
# Just the one form for existing instances
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="1" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
# max-num 0 means we can't add new ones
self.assertContains(response, '<input type="hidden" id="id_inner2_set-MAX_NUM_FORMS" '
'value="0" name="inner2_set-MAX_NUM_FORMS" />', html=True)
def test_inline_change_fk_add_change_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add/change perm, so we can add new and change existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance and three extra for new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="4" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
def test_inline_change_fk_change_del_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change/delete perm on inner2s, so we can change/delete existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, no new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="1" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
def test_inline_change_fk_all_perms(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# All perms on inner2s, so we can add/change/delete
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, three for new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="4" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_inlines.urls")
class SeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_inlines'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['admin-views-users.xml']
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_add_stackeds(self):
"""
Ensure that the "Add another XXX" link correctly adds items to the
stacked formset.
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/holder4/add/'))
inline_id = '#inner4stacked_set-group'
rows_length = lambda: len(self.selenium.find_elements_by_css_selector(
'%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
self.assertEqual(rows_length(), 4)
def test_delete_stackeds(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/holder4/add/'))
inline_id = '#inner4stacked_set-group'
rows_length = lambda: len(self.selenium.find_elements_by_css_selector(
'%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
add_button.click()
self.assertEqual(rows_length(), 5, msg="sanity check")
for delete_link in self.selenium.find_elements_by_css_selector(
'%s .inline-deletelink' % inline_id):
delete_link.click()
self.assertEqual(rows_length(), 3)
def test_add_inlines(self):
"""
Ensure that the "Add another XXX" link correctly adds items to the
inline form.
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/profilecollection/add/'))
# Check that there's only one inline to start with and that it has the
# correct ID.
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 1)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[0].get_attribute('id'),
'profile_set-0')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-last_name]')), 1)
# Add an inline
self.selenium.find_element_by_link_text('Add another Profile').click()
# Check that the inline has been added, that it has the right id, and
# that it contains the right fields.
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 2)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[1].get_attribute('id'), 'profile_set-1')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-last_name]')), 1)
# Let's add another one to be sure
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 3)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[2].get_attribute('id'), 'profile_set-2')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-last_name]')), 1)
# Enter some data and click 'Save'
self.selenium.find_element_by_name('profile_set-0-first_name').send_keys('0 first name 1')
self.selenium.find_element_by_name('profile_set-0-last_name').send_keys('0 last name 2')
self.selenium.find_element_by_name('profile_set-1-first_name').send_keys('1 first name 1')
self.selenium.find_element_by_name('profile_set-1-last_name').send_keys('1 last name 2')
self.selenium.find_element_by_name('profile_set-2-first_name').send_keys('2 first name 1')
self.selenium.find_element_by_name('profile_set-2-last_name').send_keys('2 last name 2')
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
# Check that the objects have been created in the database
self.assertEqual(ProfileCollection.objects.all().count(), 1)
self.assertEqual(Profile.objects.all().count(), 3)
def test_delete_inlines(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/profilecollection/add/'))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 5)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-3')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-4')), 1)
# Click on a few delete buttons
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1 td.delete a').click()
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2 td.delete a').click()
# Verify that they're gone and that the IDs have been re-sequenced
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 3)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
def test_alternating_rows(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/profilecollection/add/'))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
row_selector = 'form#profilecollection_form tr.dynamic-profile_set'
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row1" % row_selector)), 2, msg="Expect two row1 styled rows")
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row2" % row_selector)), 1, msg="Expect one row2 styled row")
class SeleniumChromeTests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class SeleniumIETests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
|
|
"""Onboarding views."""
import asyncio
from http import HTTPStatus
from aiohttp.web_exceptions import HTTPUnauthorized
import voluptuous as vol
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.components.auth import indieauth
from homeassistant.components.http.const import KEY_HASS_REFRESH_TOKEN_ID
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.core import callback
from homeassistant.helpers.system_info import async_get_system_info
from .const import (
DEFAULT_AREAS,
DOMAIN,
STEP_ANALYTICS,
STEP_CORE_CONFIG,
STEP_INTEGRATION,
STEP_USER,
STEPS,
)
async def async_setup(hass, data, store):
"""Set up the onboarding view."""
hass.http.register_view(OnboardingView(data, store))
hass.http.register_view(InstallationTypeOnboardingView(data))
hass.http.register_view(UserOnboardingView(data, store))
hass.http.register_view(CoreConfigOnboardingView(data, store))
hass.http.register_view(IntegrationOnboardingView(data, store))
hass.http.register_view(AnalyticsOnboardingView(data, store))
class OnboardingView(HomeAssistantView):
"""Return the onboarding status."""
requires_auth = False
url = "/api/onboarding"
name = "api:onboarding"
def __init__(self, data, store):
"""Initialize the onboarding view."""
self._store = store
self._data = data
async def get(self, request):
"""Return the onboarding status."""
return self.json(
[{"step": key, "done": key in self._data["done"]} for key in STEPS]
)
class InstallationTypeOnboardingView(HomeAssistantView):
"""Return the installation type during onboarding."""
requires_auth = False
url = "/api/onboarding/installation_type"
name = "api:onboarding:installation_type"
def __init__(self, data):
"""Initialize the onboarding installation type view."""
self._data = data
async def get(self, request):
"""Return the onboarding status."""
if self._data["done"]:
raise HTTPUnauthorized()
hass = request.app["hass"]
info = await async_get_system_info(hass)
return self.json({"installation_type": info["installation_type"]})
class _BaseOnboardingView(HomeAssistantView):
"""Base class for onboarding."""
step = None
def __init__(self, data, store):
"""Initialize the onboarding view."""
self._store = store
self._data = data
self._lock = asyncio.Lock()
@callback
def _async_is_done(self):
"""Return if this step is done."""
return self.step in self._data["done"]
async def _async_mark_done(self, hass):
"""Mark step as done."""
self._data["done"].append(self.step)
await self._store.async_save(self._data)
if set(self._data["done"]) == set(STEPS):
hass.data[DOMAIN] = True
class UserOnboardingView(_BaseOnboardingView):
"""View to handle create user onboarding step."""
url = "/api/onboarding/users"
name = "api:onboarding:users"
requires_auth = False
step = STEP_USER
@RequestDataValidator(
vol.Schema(
{
vol.Required("name"): str,
vol.Required("username"): str,
vol.Required("password"): str,
vol.Required("client_id"): str,
vol.Required("language"): str,
}
)
)
async def post(self, request, data):
"""Handle user creation, area creation."""
hass = request.app["hass"]
async with self._lock:
if self._async_is_done():
return self.json_message("User step already done", HTTPStatus.FORBIDDEN)
provider = _async_get_hass_provider(hass)
await provider.async_initialize()
user = await hass.auth.async_create_user(
data["name"], group_ids=[GROUP_ID_ADMIN]
)
await hass.async_add_executor_job(
provider.data.add_auth, data["username"], data["password"]
)
credentials = await provider.async_get_or_create_credentials(
{"username": data["username"]}
)
await provider.data.async_save()
await hass.auth.async_link_user(user, credentials)
if "person" in hass.config.components:
await hass.components.person.async_create_person(
data["name"], user_id=user.id
)
# Create default areas using the users supplied language.
translations = await hass.helpers.translation.async_get_translations(
data["language"], "area", DOMAIN
)
area_registry = await hass.helpers.area_registry.async_get_registry()
for area in DEFAULT_AREAS:
area_registry.async_create(
translations[f"component.onboarding.area.{area}"]
)
await self._async_mark_done(hass)
# Return authorization code for fetching tokens and connect
# during onboarding.
auth_code = hass.components.auth.create_auth_code(
data["client_id"], credentials
)
return self.json({"auth_code": auth_code})
class CoreConfigOnboardingView(_BaseOnboardingView):
"""View to finish core config onboarding step."""
url = "/api/onboarding/core_config"
name = "api:onboarding:core_config"
step = STEP_CORE_CONFIG
async def post(self, request):
"""Handle finishing core config step."""
hass = request.app["hass"]
async with self._lock:
if self._async_is_done():
return self.json_message(
"Core config step already done", HTTPStatus.FORBIDDEN
)
await self._async_mark_done(hass)
await hass.config_entries.flow.async_init(
"met", context={"source": "onboarding"}
)
if (
hass.components.hassio.is_hassio()
and "raspberrypi" in hass.components.hassio.get_core_info()["machine"]
):
await hass.config_entries.flow.async_init(
"rpi_power", context={"source": "onboarding"}
)
return self.json({})
class IntegrationOnboardingView(_BaseOnboardingView):
"""View to finish integration onboarding step."""
url = "/api/onboarding/integration"
name = "api:onboarding:integration"
step = STEP_INTEGRATION
@RequestDataValidator(
vol.Schema({vol.Required("client_id"): str, vol.Required("redirect_uri"): str})
)
async def post(self, request, data):
"""Handle token creation."""
hass = request.app["hass"]
refresh_token_id = request[KEY_HASS_REFRESH_TOKEN_ID]
async with self._lock:
if self._async_is_done():
return self.json_message(
"Integration step already done", HTTPStatus.FORBIDDEN
)
await self._async_mark_done(hass)
# Validate client ID and redirect uri
if not await indieauth.verify_redirect_uri(
request.app["hass"], data["client_id"], data["redirect_uri"]
):
return self.json_message(
"invalid client id or redirect uri", HTTPStatus.BAD_REQUEST
)
refresh_token = await hass.auth.async_get_refresh_token(refresh_token_id)
if refresh_token is None or refresh_token.credential is None:
return self.json_message(
"Credentials for user not available", HTTPStatus.FORBIDDEN
)
# Return authorization code so we can redirect user and log them in
auth_code = hass.components.auth.create_auth_code(
data["client_id"], refresh_token.credential
)
return self.json({"auth_code": auth_code})
class AnalyticsOnboardingView(_BaseOnboardingView):
"""View to finish analytics onboarding step."""
url = "/api/onboarding/analytics"
name = "api:onboarding:analytics"
step = STEP_ANALYTICS
async def post(self, request):
"""Handle finishing analytics step."""
hass = request.app["hass"]
async with self._lock:
if self._async_is_done():
return self.json_message(
"Analytics config step already done", HTTPStatus.FORBIDDEN
)
await self._async_mark_done(hass)
return self.json({})
@callback
def _async_get_hass_provider(hass):
"""Get the Home Assistant auth provider."""
for prv in hass.auth.auth_providers:
if prv.type == "homeassistant":
return prv
raise RuntimeError("No Home Assistant provider found")
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-09-30"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryApplicationName": _SERIALIZER.url("gallery_application_name", gallery_application_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-09-30"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryApplicationName": _SERIALIZER.url("gallery_application_name", gallery_application_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-30"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryApplicationName": _SERIALIZER.url("gallery_application_name", gallery_application_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-30"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryApplicationName": _SERIALIZER.url("gallery_application_name", gallery_application_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_gallery_request(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-30"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class GalleryApplicationsOperations(object):
"""GalleryApplicationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_09_30.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application: "_models.GalleryApplication",
**kwargs: Any
) -> "_models.GalleryApplication":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(gallery_application, 'GalleryApplication')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application: "_models.GalleryApplication",
**kwargs: Any
) -> LROPoller["_models.GalleryApplication"]:
"""Create or update a gallery Application Definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition is to be created.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition to be created
or updated. The allowed characters are alphabets and numbers with dots, dashes, and periods
allowed in the middle. The maximum length is 80 characters.
:type gallery_application_name: str
:param gallery_application: Parameters supplied to the create or update gallery Application
operation.
:type gallery_application: ~azure.mgmt.compute.v2020_09_30.models.GalleryApplication
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either GalleryApplication or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2020_09_30.models.GalleryApplication]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplication"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application=gallery_application,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application: "_models.GalleryApplicationUpdate",
**kwargs: Any
) -> "_models.GalleryApplication":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(gallery_application, 'GalleryApplicationUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application: "_models.GalleryApplicationUpdate",
**kwargs: Any
) -> LROPoller["_models.GalleryApplication"]:
"""Update a gallery Application Definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition is to be updated.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition to be updated.
The allowed characters are alphabets and numbers with dots, dashes, and periods allowed in the
middle. The maximum length is 80 characters.
:type gallery_application_name: str
:param gallery_application: Parameters supplied to the update gallery Application operation.
:type gallery_application: ~azure.mgmt.compute.v2020_09_30.models.GalleryApplicationUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either GalleryApplication or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2020_09_30.models.GalleryApplication]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplication"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application=gallery_application,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
**kwargs: Any
) -> "_models.GalleryApplication":
"""Retrieves information about a gallery Application Definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery from which the Application
Definitions are to be retrieved.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition to be
retrieved.
:type gallery_application_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GalleryApplication, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_09_30.models.GalleryApplication
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Delete a gallery Application.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition is to be deleted.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition to be deleted.
:type gallery_application_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
@distributed_trace
def list_by_gallery(
self,
resource_group_name: str,
gallery_name: str,
**kwargs: Any
) -> Iterable["_models.GalleryApplicationList"]:
"""List gallery Application Definitions in a gallery.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery from which Application
Definitions are to be listed.
:type gallery_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GalleryApplicationList or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2020_09_30.models.GalleryApplicationList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplicationList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_gallery_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
template_url=self.list_by_gallery.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_gallery_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("GalleryApplicationList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_gallery.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications'} # type: ignore
|
|
"""
Contains tests that assert the compliance with the
JSON RPC 2.0 specifications (http://www.jsonrpc.org/specification)
"""
import logging
import simplejson as json
import pytest
from tests.services.service_jsonrpc_specs import ServiceJsonRpcSpecs
@pytest.fixture
def app():
service = ServiceJsonRpcSpecs()
service._initial_setup()
return service.make_tornado_app()
@pytest.mark.gen_test
def test_incomplete_json(http_client, base_url):
body = json.dumps({"jsonrpc": "2.0", "method": "subtract"})[:15]
result = yield http_client.fetch(base_url + "/api", method="POST", body=body,
headers={"content-type": "application/json"})
assert result.code == 200
response_body = json.loads(result.body)
assert response_body["result"] is None
assert response_body["jsonrpc"] == "2.0"
assert response_body["error"] == {"code": -32700, "message": "Parse error"}
# examples from http://www.jsonrpc.org/specification
@pytest.mark.gen_test
def test_rpc_call_with_positional_parameters(http_client, base_url):
base_url += "/api"
body = json.dumps({"jsonrpc": "2.0", "method": "subtract", "params": [42, 23], "id": 1})
result = yield http_client.fetch(base_url, method="POST", body=body,
headers={"content-type": "application/json"})
assert result.code == 200
response_body = json.loads(result.body)
assert response_body["jsonrpc"] == "2.0"
assert response_body["result"] == 19
assert response_body["id"] == 1
body = json.dumps({"jsonrpc": "2.0", "method": "subtract", "params": [23, 42], "id": 2})
result = yield http_client.fetch(base_url, method="POST", body=body,
headers={"content-type": "application/json"})
assert result.code == 200
response_body = json.loads(result.body)
assert response_body["jsonrpc"] == "2.0"
assert response_body["result"] == -19
assert response_body["id"] == 2
@pytest.mark.gen_test
def test_rpc_call_with_named_parameters(http_client, base_url):
base_url += "/api"
body = json.dumps(
{"jsonrpc": "2.0", "method": "subtract", "params": {"a": 42, "b": 23}, "id": 3})
result = yield http_client.fetch(base_url, method="POST", body=body,
headers={"content-type": "application/json"})
assert result.code == 200
response_body = json.loads(result.body)
assert response_body["jsonrpc"] == "2.0"
assert response_body["result"] == 19
assert response_body["id"] == 3
body = json.dumps(
{"jsonrpc": "2.0", "method": "subtract", "params": {"b": 42, "a": 23}, "id": 4})
result = yield http_client.fetch(base_url, method="POST", body=body,
headers={"content-type": "application/json"})
assert result.code == 200
response_body = json.loads(result.body)
assert response_body["jsonrpc"] == "2.0"
assert response_body["result"] == -19
assert response_body["id"] == 4
@pytest.mark.gen_test
def test_a_notification(http_client, base_url):
base_url += "/api"
body = json.dumps({"jsonrpc": "2.0", "method": "update", "params": {"a": 23}})
result = yield http_client.fetch(base_url, method="POST", body=body,
headers={"content-type": "application/json"})
assert result.code == 200
response_body = json.loads(result.body)
assert response_body["jsonrpc"] == "2.0"
assert response_body["result"] is None
assert response_body["error"] is None
@pytest.mark.gen_test
def test_a_notification_inexistent_method(http_client, base_url):
base_url += "/api"
body = json.dumps({"jsonrpc": "2.0", "method": "does_not_exist", "params": {"a": 23}})
result = yield http_client.fetch(base_url, method="POST", body=body,
headers={"content-type": "application/json"})
assert result.code == 200
response_body = json.loads(result.body)
assert response_body["jsonrpc"] == "2.0"
assert response_body["result"] is None
assert response_body["error"] is None
@pytest.mark.gen_test
def test_rpc_call_of_non_existent_method(http_client, base_url):
base_url += "/api"
body = json.dumps({"jsonrpc": "2.0", "method": "does_not_exist", "params": {"a": 23}, "id": 1})
result = yield http_client.fetch(base_url, method="POST", body=body,
headers={"content-type": "application/json"})
assert result.code == 200
response_body = json.loads(result.body)
assert response_body["jsonrpc"] == "2.0"
assert response_body["result"] is None
assert response_body["error"] == {"code": -32601, "message": "Method not found"}
assert response_body["id"] == 1
@pytest.mark.gen_test
def test_rpc_call_with_invalid_json(http_client, base_url):
base_url += "/api"
body = '{"jsonrpc": "2.0", "method": "foobar, "params": "bar", "baz]'
result = yield http_client.fetch(base_url, method="POST", body=body,
headers={"content-type": "application/json"})
assert result.code == 200
response_body = json.loads(result.body)
assert response_body["jsonrpc"] == "2.0"
assert response_body["result"] is None
assert response_body["error"] == {"code": -32700, "message": "Parse error"}
@pytest.mark.gen_test
def test_rpc_call_with_invalid_request_object(http_client, base_url):
base_url += "/api"
body = json.dumps({"jsonrpc": "2.0", "method": "subtract", "params": "foobar"})
result = yield http_client.fetch(base_url, method="POST", body=body,
headers={"content-type": "application/json"})
assert result.code == 200
response_body = json.loads(result.body)
assert response_body["jsonrpc"] == "2.0"
assert response_body["result"] is None
assert response_body["error"] == {"code": -32600, "message": "Invalid Request"}
assert response_body["id"] is None
@pytest.mark.gen_test
def test_batch_call_with_invalid_json(http_client, base_url):
base_url += "/api"
body = '[{"jsonrpc": "2.0", "method": "sum", "params": [1,2,4], "id": "1"},{"jsonrpc": "2.0", "method"]'
result = yield http_client.fetch(base_url, method="POST", body=body,
headers={"content-type": "application/json"})
assert result.code == 200
response_body = json.loads(result.body)
print(response_body)
assert isinstance(response_body, dict)
assert response_body["jsonrpc"] == "2.0"
assert response_body["result"] is None
assert response_body["error"] == {"code": -32700, "message": "Parse error"}
@pytest.mark.gen_test
def test_batch_call_empty_array(http_client, base_url):
base_url += "/api"
body = "[]"
result = yield http_client.fetch(base_url, method="POST", body=body,
headers={"content-type": "application/json"})
assert result.code == 200
response_body = json.loads(result.body)
assert response_body["jsonrpc"] == "2.0"
assert response_body["result"] is None
assert response_body["error"] == {"code": -32600, "message": "Invalid Request"}
assert response_body["id"] is None
@pytest.mark.gen_test
def test_batch_call_invalid_batch_but_not_empty(http_client, base_url):
base_url += "/api"
body = "[1]"
result = yield http_client.fetch(base_url, method="POST", body=body,
headers={"content-type": "application/json"})
assert result.code == 200
response_body = json.loads(result.body)
assert isinstance(response_body, list)
assert len(response_body) == 1
assert response_body[0]["jsonrpc"] == "2.0"
assert response_body[0]["id"] is None
assert response_body[0]["result"] is None
assert response_body[0]["error"] == {"code": -32600, "message": "Invalid Request"}
@pytest.mark.gen_test
def test_batch_call_invalid_batch(http_client, base_url):
base_url += "/api"
body = "[1,2,3]"
result = yield http_client.fetch(base_url, method="POST", body=body,
headers={"content-type": "application/json"})
assert result.code == 200
response_body = json.loads(result.body)
assert isinstance(response_body, list)
assert len(response_body) == 3
for i in range(3):
assert response_body[i]["jsonrpc"] == "2.0"
assert response_body[i]["id"] is None
assert response_body[i]["result"] is None
assert response_body[i]["error"] == {"code": -32600, "message": "Invalid Request"}
@pytest.mark.gen_test
def test_batch_big_batch(http_client, base_url):
base_url += "/api"
body = [
{"jsonrpc": "2.0", "method": "sum", "params": [1, 2, 4], "id": "1"}, # valid
{"jsonrpc": "2.0", "method": "notify_hello", "params": [7]}, # notification
{"jsonrpc": "2.0", "method": "subtract", "params": [42, 23], "id": "2"}, # valid
{"foo": "boo"}, # invalid request
{"jsonrpc": "2.0", "method": "foo.get", "params": {"name": "myself"}, "id": "5"},
# method not found
{"jsonrpc": "2.0", "method": "get_data", "id": "9"} # valid no params
]
expected_results = {
"1": {"jsonrpc": "2.0", "result": 7, "id": "1", "error": None},
"2": {"jsonrpc": "2.0", "result": 19, "id": "2", "error": None},
"5": {"jsonrpc": "2.0", "error": {"code": -32601, "message": "Method not found"}, "id": "5",
"result": None},
"9": {"jsonrpc": "2.0", "result": ["hello", 5], "id": "9", "error": None}
}
result = yield http_client.fetch(base_url, method="POST", body=json.dumps(body),
headers={"content-type": "application/json"})
assert result.code == 200
response_body = json.loads(result.body)
assert isinstance(response_body, list)
print(response_body)
for request in body:
req_id = request.get("id")
if not req_id:
continue
expected = expected_results.get(req_id)
actual_response = [x for x in response_body if x["id"] == req_id][0]
assert expected == actual_response
|
|
#!/usr/bin/env python
'''
@package ion.processes.data.transforms
@file ion/processes/data/transforms/notification_worker.py
@author Brian McKenna <[email protected]>
@brief NotificationWorker class processes real-time notifications
'''
from datetime import datetime
from email.mime.text import MIMEText
import smtplib
from pyon.event.event import EventPublisher, EventSubscriber
from pyon.public import log, RT, OT, PRED, CFG
from interface.objects import DeliveryModeEnum, NotificationFrequencyEnum
from ion.core.process.transform import TransformEventListener
from ion.services.dm.utility.uns_utility_methods import load_notifications
from jinja2 import Environment, FileSystemLoader
import smtplib
class NotificationWorker(TransformEventListener):
"""
Instances of this class acts as a Notification Worker.
"""
def on_init(self):
# clients
self.resource_registry = self.container.resource_registry
self.event_publisher = EventPublisher(OT.NotificationSentEvent)
# SMTP client configurations
self.smtp_from = CFG.get_safe('server.smtp.from', '[email protected]')
self.smtp_host = CFG.get_safe('server.smtp.host', 'localhost')
self.smtp_port = CFG.get_safe('server.smtp.port', 25)
# Jinja2 template environment
self.jinja_env = Environment(loader=FileSystemLoader('res/templates'), trim_blocks=True, lstrip_blocks=True)
super(NotificationWorker, self).on_init()
def on_start(self):
super(NotificationWorker,self).on_start()
self.notifications = load_notifications()
def _load_notifications_callback(msg, headers):
""" local callback method so this can be used as callback in EventSubscribers """
self.notifications = load_notifications() # from uns_utility_methods
# the subscriber for the ReloadUserInfoEvent (new subscriber, subscription deleted, notifications changed, etc)
self.reload_user_info_subscriber = EventSubscriber(
event_type=OT.ReloadUserInfoEvent,
#origin='UserNotificationService',
callback=_load_notifications_callback
)
self.add_endpoint(self.reload_user_info_subscriber)
# the subscriber for the UserInfo resource update events
self.userinfo_rsc_mod_subscriber = EventSubscriber(
event_type=OT.ResourceModifiedEvent,
sub_type="UPDATE",
origin_type="UserInfo",
callback=_load_notifications_callback
)
self.add_endpoint(self.userinfo_rsc_mod_subscriber)
def process_event(self, event, headers):
"""
callback for the subscriber listening for all events
"""
# create tuple key (origin,origin_type,event_type,event_subtype) for incoming event
# use key to match against known notifications, keyed by same tuple (or combination of this tuple)
origin = event.origin
origin_type = event.origin_type
event_type = event.type_
event_subtype = event.sub_type
key = (origin,origin_type,event_type,event_subtype)
# users to notify with a list of the notifications that have been triggered by this Event
users = {} # users to be notified
# loop the combinations of keys (see _key_combinations below for explanation)
# set() to eliminate duplicates when '' values exist in tuple
for k in set(self._key_combinations(key)):
if k in self.notifications:
for (notification, user) in self.notifications.get(k, []):
# notification has been triggered
if user not in users:
users[user] = []
users[user].append(notification)
# we now have a dict, keyed by users that will be notified, each user has a list of notifications triggered by this event
# send email
if users:
# message content for Jinja2 template (these fields are based on Event and thus are the same for all users/notifications)
context = {}
context['event_label'] = self.event_type_to_label(event.type_) # convert to UX label if known
context['origin_type'] = event.origin_type
context['origin'] = event.origin
context['url'] = 'http://ooinet.oceanobservatories.org' # TODO get from CFG
context['timestamp'] = datetime.utcfromtimestamp(float(event.ts_created)/1000.0).strftime('%Y-%m-%d %H:%M:%S (UTC)')
# use one SMTP connection for all emails
smtp = self._initialize_smtp()
try:
# loop through list of users getting notified of this Event
for user in users:
# list of NotificationRequests for this user triggered by this event
for notification in users[user]:
# name of NotificationRequest, defaults to...NotificationRequest? I don't think name gets set anywhere? TODO, what's default?
context['notification_name'] = notification.name or notification.type_
# send message for each DeliveryConfiguration (this has mode and frequency to determine realtime, email or SMS)
for delivery_configuration in notification.delivery_configurations:
# skip if DeliveryConfiguration.frequency is DISABLED
if delivery_configuration.frequency == NotificationFrequencyEnum.DISABLED:
continue
# only process REAL_TIME
if delivery_configuration.frequency != NotificationFrequencyEnum.REAL_TIME:
continue
# default to UserInfo.contact.email if no email specified in DeliveryConfiguration
smtp_to = delivery_configuration.email if delivery_configuration.email else user.contact.email
context['smtp_to'] = smtp_to
# message from Jinja2 template (email or SMS)
try:
# email - MIMEText
if delivery_configuration.mode == DeliveryModeEnum.EMAIL:
body = self.jinja_env.get_template('notification_realtime_email.txt').render(context)
mime_text = MIMEText(body)
mime_text['Subject'] = 'OOINet ION Event Notification - %s' % context['event_label']
mime_text['From'] = self.smtp_from
mime_text['To'] = context['smtp_to']
smtp_msg = mime_text.as_string()
# SMS - just the template string
elif delivery_configuration.mode == DeliveryModeEnum.SMS:
body = self.jinja_env.get_template('notification_realtime_sms.txt').render(context)
smtp_msg = body
# unknown DeliveryMode
else:
raise Exception #TODO specify unknown DeliveryModeEnum
smtp.sendmail(self.smtp_from, smtp_to, smtp_msg)
except Exception:
log.error('Failed to create message for notification %s', notification._id)
continue # skips this notification
# publish NotificationSentEvent - one per NotificationRequest (EventListener plugin NotificationSentScanner listens)
notification_max = int(CFG.get_safe("service.user_notification.max_daily_notifications", 1000))
self.event_publisher.publish_event(user_id=user._id, notification_id=notification._id, notification_max=notification_max)
finally:
smtp.quit()
def _initialize_smtp(self):
""" class method so user/pass/etc can be added """
return smtplib.SMTP(self.smtp_host, self.smtp_port)
def _key_combinations(self, key):
"""
creates a list of all possible combinations of the tuple elements, from 1 member to len(key) members
only the elements of each combination are set, '' elsewhere, all therefore have same length as key
eg. ('a', 'b', 'c') -> (a) becomes ('a', '', '') and (b) becomes ('', 'b', '')
extension of https://docs.python.org/2/library/itertools.html#itertools.combinations (Equivalent to section)
differences:
- loops all r from 1 to n
- returns tuple of same length as n with '' as filler
"""
n = len(key)
# want all combinations of 1 to n
for r in range(1,n+1):
indices = range(r)
# first combination is the first r values
combination = ['']*n # creates a list of n ''s
for i in indices:
combination[i] = key[i]
yield tuple(combination)
# remaining combinations
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
break
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
combination = ['']*n
for i in indices:
combination[i] = key[i]
yield tuple(combination)
# TODO: REMOVE AND REPLACE WITHIN NotificationRequest
# this is a temporary hack so we're using UX (ion-ux) defined labels in the email
# see https://github.com/ooici/ion-ux/blob/master/static/js/ux-views-notifications.js#L1-L70
def event_type_to_label(self, key):
event_to_label = {
'ResourceAgentConnectionLostErrorEvent': 'Communication Lost/Restored',
'ResourceAgentErrorEvent': 'Device Error',
'ResourceIssueReportedEvent': 'Issue reported',
'ResourceLifecycleEvent': 'Lifecycle state change',
'ResourceAgentStateEvent': 'Agent operational state change',
'ResourceAgentResourceStateEvent': 'Device operational state change',
'DeviceOperatorEvent': 'Operator event on device',
'ResourceOperatorEvent': 'Operator event on resource',
'ParameterQCEvent': 'QC alert',
'OrgNegotiationInitiatedEvent': 'Request received',
'ResourceModifiedEvent': 'Resource modified',
'DeviceStatusAlertEvent': 'Status alert/change',
'DeviceAggregateStatusEvent': 'Aggregate Status alert/change',
}
# if not known, just return the event_type
return event_to_label.get(key, key)
|
|
'''
Clean up test case remnants
@author: Youyk
'''
import threading
import time
import sys
import os
import errno
import traceback
import apibinding.api as api
import apibinding.inventory as inventory
import apibinding.api_actions as api_actions
import zstacklib.utils.http as http
import zstacklib.utils.jsonobject as jsonobject
import zstacklib.utils.log as log
import zstacklib.utils.linux as linux
import zstacklib.utils.ssh as ssh
import zstacktestagent.plugins.vm as vm_plugin
import zstacktestagent.plugins.host as host_plugin
import zstacktestagent.testagent as testagent
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.header.host as host_header
import zstackwoodpecker.operations.config_operations as con_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.operations.zone_operations as zone_ops
#logger = log.get_logger(__name__)
def _get_host_ip(vm_inv, session_uuid):
if not vm_inv.hostUuid:
host_uuid = vm_inv.lastHostUuid
else:
host_uuid = vm_inv.hostUuid
if not host_uuid:
test_util.test_logger("Host UUID is None. Can't get Host IP address")
return None
ret = res_ops.get_resource(res_ops.HOST, session_uuid, uuid=host_uuid)[0]
return ret.managementIp
def _delete_file(host_ip, path):
cmd = host_plugin.HostShellCmd()
cmd.command = "rm -rf %s" % path
test_util.test_logger("Delete file: %s in Host: %s" % (path, host_ip))
http.json_dump_post(testagent.build_http_path(host_ip, host_plugin.HOST_SHELL_CMD_PATH), cmd)
def _delete_files(host_ip, path):
cmd = host_plugin.HostShellCmd()
cmd.command = "rm -rf %s*" % path
test_util.test_logger("Delete files: %s in Host: %s" % (path, host_ip))
http.json_dump_post(testagent.build_http_path(host_ip, host_plugin.HOST_SHELL_CMD_PATH), cmd)
def _rm_folder_contents_violently(host_ip, path):
cmd = host_plugin.HostShellCmd()
cmd.command = "rm -rf %s/*" % path
try:
http.json_dump_post(testagent.build_http_path(host_ip, host_plugin.HOST_SHELL_CMD_PATH), cmd)
except Exception as e:
err = linux.get_exception_stacktrace()
test_util.test_logger("Fail to delete contents in folder: %s in Host: %s" % (path, host_ip))
test_util.test_logger(err)
test_util.test_logger("Successfully delete contents in folder: %s in Host: %s" % (path, host_ip))
def _umount_folder_violently(host_ip, path):
cmd = host_plugin.HostShellCmd()
cmd.command = "umount %s" % path
try:
http.json_dump_post(testagent.build_http_path(host_ip, host_plugin.HOST_SHELL_CMD_PATH), cmd)
except Exception as e:
err = linux.get_exception_stacktrace()
test_util.test_logger("Fail to umount folder: %s in Host: %s" % (path, host_ip))
test_util.test_logger(err)
test_util.test_logger("Umount folder: %s in Host: %s" % (path, host_ip))
def _destroy_vm_violently(host_ip, uuid):
cmd = vm_plugin.DeleteVmCmd()
cmd.vm_uuids = [uuid]
http.json_dump_post(testagent.build_http_path(host_ip, vm_plugin.DELETE_VM_PATH), cmd)
def _async_api_call(action):
api_client = api.Api()
session_uuid = api_client.login_as_admin()
api_client.set_session_to_api_message(action, session_uuid)
return api_client.async_call_wait_for_complete(action)
def _sync_api_call(action):
api_client = api.Api()
session_uuid = api_client.login_as_admin()
api_client.set_session_to_api_message(action, session_uuid)
return api_client.sync_call(action)
def _clean_image_violently(backup_storage_uuid, account_uuid, zone_uuid, host_ip, session_uuid):
result = res_ops.get_resource(res_ops.IMAGE, session_uuid)
image_path = None
for image in result:
for backup_storage in image.backupStorageRefs:
if backup_storage.uuid == backup_storage_uuid:
image_path = os.path.dirname(backup_storage.installPath)
#Delete Backup Storage Files
if image_path:
_delete_file(host_ip, image_path)
def _clean_volume_violently(vm_all_volumes, backup_storage_uuid, account_uuid, zone_uuid, host_ip, session_uuid):
volume_path = None
for volume in vm_all_volumes:
if not volume.installPath:
continue
volume_path = os.path.dirname(volume.installPath)
#Delete Root Volume Files
if volume_path:
_delete_file(host_ip, volume_path)
result = res_ops.get_resource(res_ops.PRIMARY_STORAGE, session_uuid)
image_cache_path = None
for pri_storage in result:
if pri_storage.zoneUuid == zone_uuid:
image_cache_path = "%s/imagecache/template/%s" % (pri_storage.mountPath, backup_storage_uuid)
#Delete Primary Storage Cache files
if image_cache_path:
_delete_file(host_ip, image_cache_path)
def umount_primary_storage_violently(host_ip, storage_mount_path):
if storage_mount_path:
_rm_folder_contents_violently(host_ip, storage_mount_path)
_umount_folder_violently(host_ip, storage_mount_path)
try:
os.rmdir(storage_mount_path)
except OSError as ex:
if ex.errno == errno.ENOTEMPTY:
test_util.test_logger("Folder %s is not safely umounted" % storage_mount_path)
def destroy_vm_and_storage_violently(vm, session_uuid):
destroy_all_vm_and_vips()
vm_inv = inventory.VmInstanceInventory()
vm_inv.evaluate(vm)
vm_uuid = vm_inv.uuid
vm_all_volumes = vm_inv.allVolumes
backup_storage_uuid = vm_inv.imageUuid
account_uuid = None
zone_uuid = vm_inv.zoneUuid
host_ip = _get_host_ip(vm_inv, session_uuid)
if not host_ip:
test_util.test_logger("Can't find Host for VM: %s" % vm_uuid)
return
_destroy_vm_violently(host_ip, vm_uuid)
_clean_volume_violently(vm_all_volumes, backup_storage_uuid, account_uuid, zone_uuid, host_ip, session_uuid)
_clean_image_violently(backup_storage_uuid, account_uuid, zone_uuid, host_ip, session_uuid)
def cleanup_all_vms_violently():
destroy_all_vm_and_vips()
session_uuid = acc_ops.login_as_admin()
result = res_ops.get_resource(res_ops.VM_INSTANCE, session_uuid)
for vm in result:
thread = threading.Thread(target = destroy_vm_and_storage_violently\
, args = (vm, session_uuid, ))
thread.start()
while threading.active_count() > 1:
time.sleep(0.1)
acc_ops.logout(session_uuid)
#Find a vm, whose zone use primary_storage.
def _get_host_from_primary_storage(primary_storage_uuid, session_uuid):
result = res_ops.get_resource(res_ops.PRIMARY_STORAGE, session_uuid, uuid=primary_storage_uuid)[0]
hosts = res_ops.get_resource(res_ops.HOST, session_uuid)
for host in hosts:
if host.zoneUuid == result.zoneUuid:
return host
def cleanup_none_vm_volumes_violently():
session_uuid = acc_ops.login_as_admin()
try:
priSto_host_list = {}
result = res_ops.get_resource(res_ops.VOLUME, session_uuid)
for volume in result:
if not volume.installPath:
continue
volume_path = os.path.dirname(volume.installPath)
#VM volume has been cleanup in destroy_vm_and_storage_violently()
if not volume.hasattr('vmInstanceUuid'):
pri_sto_uuid = volume.primaryStorageUuid
if priSto_host_list.has_key(pri_sto_uuid):
host_ip = priSto_host_list[pri_sto_uuid]
else:
#TODO: need to add multi hosts, if primary storage is local storage.
host = _get_host_from_primary_storage(pri_sto_uuid, session_uuid)
host_ip = host.managementIp
priSto_host_list[pri_sto_uuid] = host_ip
thread = threading.Thread(target = _delete_file, \
args = (host_ip, volume_path))
thread.start()
while threading.active_count() > 1:
time.sleep(0.1)
except Exception as e:
test_util.test_logger("cleanup volumes violently meet exception")
traceback.print_exc(file=sys.stdout)
raise e
finally:
acc_ops.logout(session_uuid)
def umount_all_primary_storages_violently():
session_uuid = acc_ops.login_as_admin()
zones = res_ops.query_resource(res_ops.ZONE)
for zone in zones:
conditions = res_ops.gen_query_conditions('zoneUuid', '=', zone.uuid)
conditions = res_ops.gen_query_conditions('state', '=', 'Enabled', conditions)
pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, conditions, session_uuid)
conditions = res_ops.gen_query_conditions('zoneUuid', '=', zone.uuid)
conditions = res_ops.gen_query_conditions('state', '=', host_header.ENABLED, conditions)
conditions = res_ops.gen_query_conditions('status', '=', host_header.CONNECTED, conditions)
conditions = res_ops.gen_query_conditions('hypervisorType', '=', inventory.KVM_HYPERVISOR_TYPE, conditions)
all_hosts = res_ops.query_resource(res_ops.HOST, conditions, session_uuid)
for host in all_hosts:
for ps in pss:
ps_url = ps.mountPath
thread = threading.Thread(\
target = umount_primary_storage_violently, \
args = (host.managementIp, ps_url))
thread.start()
while threading.active_count() > 1:
time.sleep(0.1)
acc_ops.logout(session_uuid)
delete_ps_ceph_pools()
def cleanup_backup_storage():
cleanup_sftp_backup_storage()
delete_bs_ceph_pools()
def cleanup_sftp_backup_storage():
backup_obj = test_lib.deploy_config.backupStorages
sftp_backupstorages = backup_obj.get_child_node_as_list('sftpBackupStorage')
for storage in sftp_backupstorages:
cmd = 'rm -rf %s/rootVolumeTemplates/*' % storage.url_
ssh.execute(cmd, storage.hostname_, storage.username_, storage.password_)
cmd = 'rm -rf %s/dataVolumeTemplates/*' % storage.url_
ssh.execute(cmd, storage.hostname_, storage.username_, storage.password_)
def do_destroy_vips(vips, thread_threshold = 1000):
for vip in vips:
thread = threading.Thread(target=net_ops.delete_vip, args=(vip.uuid,))
while threading.active_count() > thread_threshold:
time.sleep(0.5)
exc = sys.exc_info()
if exc[0]:
raise info1, None, info2
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
if exc[0]:
raise info1, None, info2
time.sleep(0.1)
def do_destroy_vms(vms, thread_threshold = 1000):
for vm in vms:
thread = threading.Thread(target=vm_ops.destroy_vm, args=(vm.uuid,))
while threading.active_count() > thread_threshold:
time.sleep(0.5)
exc = sys.exc_info()
if exc[0]:
raise info1, None, info2
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
if exc[0]:
raise info1, None, info2
time.sleep(0.1)
def do_delete_volumes(volumes, thread_threshold = 1000):
for volume in volumes:
thread = threading.Thread(target=vol_ops.delete_volume, args=(volume.uuid,))
while threading.active_count() > thread_threshold:
time.sleep(0.5)
exc = sys.exc_info()
if exc[0]:
raise info1, None, info2
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
if exc[0]:
raise info1, None, info2
time.sleep(0.1)
def delete_accounts(thread_threshold = 1000):
def do_delete_accounts(accounts, session_uuid):
for account in accounts:
thread = threading.Thread(target=acc_ops.delete_account, \
args=(account.uuid, session_uuid))
while threading.active_count() > thread_threshold:
time.sleep(0.5)
exc = sys.exc_info()
if exc[0]:
raise info1, None, info2
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
if exc[0]:
raise info1, None, info2
time.sleep(0.1)
session_uuid = acc_ops.login_as_admin()
session_to = con_ops.change_global_config('identity', 'session.timeout', '720000')
session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000')
cond = []
cond = res_ops.gen_query_conditions('name', '!=', 'admin', cond)
num = res_ops.query_resource_count(res_ops.ACCOUNT, cond)
if num <= thread_threshold:
accounts = res_ops.query_resource(res_ops.ACCOUNT, cond)
do_delete_accounts(accounts, session_uuid)
else:
start = 0
limit = thread_threshold - 1
curr_num = start
accs = []
while curr_num < num:
acc_tmp= res_ops.query_resource_fields(res_ops.ACCOUNT, \
[], session_uuid, ['uuid'], start, limit)
accs.extend(acc_temp)
curr_num += limit
start += limit
do_delete_accounts(accs, thread_threshold)
def destroy_all_vm_and_vips(thread_threshold = 1000):
session_uuid = acc_ops.login_as_admin()
session_to = con_ops.change_global_config('identity', 'session.timeout', '720000')
session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000')
delete_policy = test_lib.lib_set_delete_policy('vm', 'Direct')
expunge_time = test_lib.lib_set_expunge_time('vm', 1)
cond = res_ops.gen_query_conditions('state', '!=', 'Destroyed')
num = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond)
if num <= thread_threshold:
vms = res_ops.query_resource(res_ops.VM_INSTANCE, cond)
do_destroy_vms(vms, thread_threshold)
else:
start = 0
limit = thread_threshold - 1
curr_num = start
vms = []
while curr_num < num:
vms_temp = res_ops.query_resource_fields(res_ops.VM_INSTANCE, \
cond, None, ['uuid'], start, limit)
vms.extend(vms_temp)
curr_num += limit
start += limit
do_destroy_vms(vms, thread_threshold)
vip_num = res_ops.query_resource_count(res_ops.VIP, [], session_uuid)
if vip_num <= thread_threshold:
vips = res_ops.query_resource(res_ops.VIP, [], session_uuid)
do_destroy_vips(vips, thread_threshold)
else:
start = 0
limit = thread_threshold - 1
curr_num = start
vms = []
while curr_num < vip_num:
vips_temp = res_ops.query_resource_fields(res_ops.VIP, \
[], session_uuid, ['uuid'], start, limit)
vips.extend(vips_temp)
curr_num += limit
start += limit
do_destroy_vips(vips, thread_threshold)
test_lib.lib_set_delete_policy('vm', delete_policy)
test_lib.lib_set_expunge_time('vm', expunge_time)
test_util.test_logger('vms destroy Success. Destroy %d VMs.' % num)
def delete_zones():
zones = res_ops.query_resource(res_ops.ZONE, [])
for zone in zones:
zone_ops.delete_zone(zone.uuid)
def delete_ps_ceph_pools():
pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, [])
for ps in pss:
if ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE:
ceph_host, username, password = \
test_lib.lib_get_ps_ceph_info_by_ps_inventory(ps)
test_lib.lib_delete_ceph_pool(ceph_host, username, password, \
ps.dataVolumePoolName)
test_lib.lib_delete_ceph_pool(ceph_host, username, password, \
ps.imageCachePoolName)
test_lib.lib_delete_ceph_pool(ceph_host, username, password, \
ps.rootVolumePoolName)
def delete_bs_ceph_pools():
bss = res_ops.query_resource(res_ops.BACKUP_STORAGE, [])
for bs in bss:
if bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE:
ceph_host, username, password = \
test_lib.lib_get_bs_ceph_info_by_bs_inventory(bs)
test_lib.lib_delete_ceph_pool(ceph_host, username, password, \
bs.poolName)
def delete_all_volumes(thread_threshold = 1000):
session_uuid = acc_ops.login_as_admin()
session_to = con_ops.change_global_config('identity', 'session.timeout', '720000')
session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000')
delete_policy = test_lib.lib_set_delete_policy('volume', 'Direct')
expunge_time = test_lib.lib_set_expunge_time('volume', 1)
cond = res_ops.gen_query_conditions('status', '!=', 'Deleted')
num = res_ops.query_resource_count(res_ops.VOLUME, cond)
if num <= thread_threshold:
volumes = res_ops.query_resource(res_ops.VOLUME, cond)
do_delete_volumes(volumes, thread_threshold)
else:
start = 0
limit = thread_threshold - 1
curr_num = start
volumes = []
while curr_num < num:
volumes_temp = res_ops.query_resource_fields(res_ops.VOLUME, \
cond, None, ['uuid'], start, limit)
volumes.extend(volumes_temp)
curr_num += limit
start += limit
do_delete_volumes(volumes, thread_threshold)
test_lib.lib_set_delete_policy('volume', delete_policy)
test_lib.lib_set_expunge_time('volume', expunge_time)
test_util.test_logger('Volumes destroy Success. Destroy %d Volumes.' % num)
|
|
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2015 by Jan-Hendrik Dolling.
:license: Apache 2.0, see LICENSE for more details.
"""
import os
import sys
try:
import mock
except ImportError:
from unittest import mock
import re
import types
try:
import unittest2 as unittest
except ImportError:
import unittest
from configvalidator import ValidatorException
from configvalidator import ParserException, InitException
import testutils
PYPY3 = hasattr(sys, 'pypy_version_info') and sys.version_info.major >= 3
class Test(unittest.TestCase):
def setUp(self):
with open(os.path.join(testutils.get_test_utils_base(), "certs", "not_before", "key"), "r") as f:
self.not_before_key = f.read()
with open(os.path.join(testutils.get_test_utils_base(), "certs", "not_before", "cert"), "r") as f:
self.not_before_cert = f.read()
@unittest.skipIf(PYPY3, "...")
def test_cert(self):
not_before_key = self.not_before_key
not_before_cert = self.not_before_cert
from configvalidator.validators import CertValidator
# normal
v1 = CertValidator(
privateKey=os.path.join(
testutils.get_test_utils_base(),
"data",
"exist",
"certs",
"key_1.pem"))
v1.validate(
os.path.join(
testutils.get_test_utils_base(),
"data",
"exist",
"certs",
"cert_1.crt"))
with self.assertRaises(ValidatorException) as e1:
v1.validate(
os.path.join(
testutils.get_test_utils_base(),
"data",
"exist",
"certs",
"cert_2.crt"))
self.assertEqual(str(e1.exception), 'cert <-> key mismatch')
with self.assertRaises(ValidatorException) as e2:
v1.validate(
os.path.join(
testutils.get_test_utils_base(),
"data",
"not_exist",
"certs",
"cert_3.crt"))
self.assertEqual(str(e2.exception), "path \"{testutils}\" doesn't exist".format(testutils=os.path.join(testutils.get_test_utils_base(), "data", "not_exist", "certs", "cert_3.crt")))
# crypt
v2 = CertValidator(
privateKey=os.path.join(
testutils.get_test_utils_base(),
"data",
"exist",
"certs",
"key_2_crypt.pem"),
pw="123456")
v2.validate(
os.path.join(
testutils.get_test_utils_base(),
"data",
"exist",
"certs",
"cert_2.crt"))
with self.assertRaises(ValidatorException) as e3:
v2.validate(
os.path.join(
testutils.get_test_utils_base(),
"data",
"exist",
"certs",
"cert_1.crt"))
self.assertEqual(str(e3.exception), 'cert <-> key mismatch')
# no key
with self.assertRaises(InitException) as e4:
CertValidator(
privateKey=os.path.join(
testutils.get_test_utils_base(),
"data",
"exist",
"certs",
"key_2_crypt.pem"))
self.assertEqual(
str(e4.exception),
"Key is encrypted but no password is given")
# not valid (before)
v3 = CertValidator(privateKey=not_before_key)
with self.assertRaises(ValidatorException) as e5:
v3.validate(value=not_before_cert)
self.assertEqual(
str(e5.exception),
"the certificate is valid not before 2115-05-21 11:02:17+00:00.")
# not valid (before) <-> not validation
v4 = CertValidator(valid=False)
v4.validate(value=not_before_cert)
# not valid (after)
v5 = CertValidator(
privateKey=os.path.join(
testutils.get_test_utils_base(),
"data",
"exist",
"certs",
"key_3.pem"))
with self.assertRaises(ValidatorException) as e6:
v5.validate(
value=os.path.join(
testutils.get_test_utils_base(),
"data",
"exist",
"certs",
"cert_3.crt"))
self.assertEqual(
str(e6.exception),
"the certificate is valid not after 2015-06-14 12:14:42+00:00.")
# not valid (after) <-> not validation
v6 = CertValidator(
privateKey=os.path.join(
testutils.get_test_utils_base(),
"data",
"exist",
"certs",
"key_3.pem"),
valid=False)
v6.validate(
value=os.path.join(
testutils.get_test_utils_base(),
"data",
"exist",
"certs",
"cert_3.crt"))
v7 = CertValidator(valid=False)
v7.validate(
value=os.path.join(
testutils.get_test_utils_base(),
"data",
"exist",
"certs",
"cert_3.crt"))
v8 = CertValidator()
with self.assertRaises(ValidatorException) as e7:
v8.validate(
value=os.path.join(
testutils.get_test_utils_base(),
"data",
"exist",
"certs",
"cert_3.crt"))
self.assertEqual(
str(e7.exception),
"the certificate is valid not after 2015-06-14 12:14:42+00:00.")
# disallowed values
v9 = CertValidator(disallowed_X509Name=dict(commonName="cn"))
with self.assertRaises(ValidatorException) as e8:
v9.validate(
value=os.path.join(
testutils.get_test_utils_base(),
"data",
"exist",
"certs",
"cert_1.crt"))
self.assertEqual(
str(e8.exception),
"The value cn is disallowed for CN: ['cn']")
v10 = CertValidator(disallowed_X509Name=dict(CN="cn"))
with self.assertRaises(ValidatorException) as e9:
v10.validate(
value=os.path.join(
testutils.get_test_utils_base(),
"data",
"exist",
"certs",
"cert_1.crt"))
self.assertEqual(
str(e9.exception),
"The value cn is disallowed for CN: ['cn']")
#
v11 = CertValidator(allowed_X509Name=dict(CN="foo"))
with self.assertRaises(ValidatorException) as e10:
v11.validate(
value=os.path.join(
testutils.get_test_utils_base(),
"data",
"exist",
"certs",
"cert_1.crt"))
self.assertEqual(
str(e10.exception),
"The value cn is not in the allowed values for CN: ['foo']")
def test_cert_1(self):
from configvalidator.validators import CertValidator
with self.assertRaises(InitException) as e1:
CertValidator(privateKey=os.path.join(testutils.get_test_utils_base(), "data", "not_exist", "key.pem"))
self.assertEqual("can not load key: path \"{testutils}\" doesn't exist".format(testutils=os.path.join(testutils.get_test_utils_base(), "data", "not_exist", "key.pem")), str(e1.exception))
not_before_key_2 = """
-----BEGIN RSA PRIVATE KEY-----
12345
67890
1
-----END RSA PRIVATE KEY-----
"""
with self.assertRaises(InitException) as e2:
CertValidator(privateKey=not_before_key_2)
self.assertEqual("can not load key: base64 encoding error", str(e2.exception))
with self.assertRaises(InitException) as e3:
CertValidator(privateKey=os.path.join(testutils.get_test_utils_base(), "data", "exist", "empty.ini"))
self.assertEqual("path \"{path}\" contains no valid data.".format(path=os.path.join(testutils.get_test_utils_base(), "data", "exist", "empty.ini")), str(e3.exception))
def test_cert_CertValidator(self):
from configvalidator.validators import CertValidator
class Exc(Exception):
message = []
res = CertValidator.get_exception_msg(Exc())
self.assertEqual("Unknown Error", res)
def test_cert_norm_X509Name(self):
from configvalidator.validators import CertValidator
in1 = dict(
C="C_1",
ST="ST_1",
L="L_1",
O="O_1",
OU="OU_1",
CN="CN_1",
emailAddress="emailAddress_1",
)
res1 = CertValidator.norm_X509Name(val_in=in1)
in2 = dict(
emailAddress="emailAddress_1",
countryName="C_1",
stateOrProvinceName="ST_1",
localityName="L_1",
organizationName="O_1",
organizationalUnitName="OU_1",
commonName="CN_1",
)
res2 = CertValidator.norm_X509Name(val_in=in2)
self.assertDictEqual(res1, res2)
with self.assertRaises(ParserException) as e:
CertValidator.norm_X509Name(val_in={"fooo": "bar"})
self.assertEqual("invalid X509Name key: fooo", str(e.exception))
@unittest.skipIf(PYPY3, "...")
def test_cert_XXXXXXXXXXX(self):
# FIXME anderen pricate KEY!!!!!!
key = """
-----BEGIN PRIVATE KEY-----
Proc-Type: ENCRYPTED
MIICxzBBBgkqhkiG9w0BBQ0wNDAbBgkqhkiG9w0BBQwwDgQIEOK1z4orG5QCAggA
MBUGCSsGAQQBl1UBAgQIo8g5+FaUsaQEggKAS9/E3kXpI/gW88MCVsTYgwQEA0KC
bifNcytbPZmGSEe1xOhz3KX8N3BTlLZfATeRXxVwPF1v6Rsss0J+4FeVLSH+1NNc
L7mDB+gTSq09skYHeHCzAHDCZaCur0luMgoRqoFVZ6Eh8kIlkJZqHoXBX7Ndlglv
sHHho9GKWUjjqhOjBIJqHPG4u2o8sQbVNTvwX2swDgQ5HxrKrRspBBC82QdLL0J5
apHGu/ev+ArGvrttttDiDJtzIjresiUurG0J0V4ozf633cW2Fb3rKT6yng4f1NM5
k18nsCD4AH+aZm3yTsQNDdrDugqBt6hPaoP0Jyl799+8K/LX7hUgFy9f/gTTqTXM
8lw9fA5jVRV9runh6XvLgLy/xJXNknrnapSpRaw0EpDmzmB2y6WsRtGoJCK4mB1P
5bBhe3d7yMq0qL+l2GvUP0T5Dox763Pb0h78U9PAnijMtzeeEoeoPYLWj7bjTDQU
tJ/AHSsEfZ5jN0FM8nXCkDK+eeAGbXU9UsrJdKJaXz3yVg4qEo2dMii5dkZfEk5o
8BLjD6zHFkcd6tGUaKOp00X4NxRfD2ZlMYdT7K1AmePJkZ3BoH96QEf7gafpbbmm
iNZtaGvzcb2ZvUt5IXs/unbZklOUcBK1xXINRo4zL8IUPY6z/a5x+qU5V9qKJpHp
DudeioqqfAb/1Lr8Zjk0qm8VzJNEIEq7WuxVZvvGvmsp+ReeBEu1jDXOGl8G7qWf
qDC87meLLKGvm4TM26dzjIrOIojgyTJxISGO/trmRef9YUFUDcodfyPNAxJTLk3N
sSQ+pr1dDQrM/dvU5Wy/og8wrw7es5uxqmxbmYW/G/znxoC22HbWdgS2FQ==
-----END PRIVATE KEY-----
"""
from configvalidator.validators import CertValidator
with self.assertRaises(InitException) as e1:
CertValidator(privateKey=key)
self.assertTrue(str(e1.exception).startswith("Key is encrypted but no password is given"))
@unittest.skipIf(PYPY3, "...")
def test_cert_load(self):
from configvalidator.validators import CertValidator
with self.assertRaises(InitException) as e:
CertValidator(privateKey="-----BEGIN PRIVATE KEY-----\n....")
self.assertTrue(str(e.exception).startswith("can not load key: "))
c = CertValidator()
with self.assertRaises(ValidatorException) as e2:
c.validate("-----BEGIN CERTIFICATE-----\n.....")
self.assertTrue(str(e2.exception).startswith("can not load certificate: "))
with self.assertRaises(InitException) as e3:
CertValidator(
privateKey=os.path.join(testutils.get_test_utils_base(), "data", "exist", "certs", "key_2_crypt.pem"),
pw=object())
self.assertEqual("Key pw must be an string", str(e3.exception))
@unittest.skipIf(PYPY3, "mock error with pypy3 version 2.4")
def test_CertValidator_error(self):
key = self.not_before_key
from configvalidator.validators import CertValidator
from configvalidator.validators import Base64Validator
# mock
with mock.patch.object(Base64Validator, 'validate', side_effect=Exception('FAIL')) as mock_method:
with self.assertRaises(InitException) as e:
CertValidator(privateKey=key)
self.assertEqual(1, len(e.exception.errors))
error_msg, excp = e.exception.errors[0]
self.assertEquals('can not load key: see error log', error_msg)
self.assertEquals("FAIL", str(excp))
# reset mock
v = Base64Validator()
self.assertEqual("Hallo Welt!", v.validate("SGFsbG8gV2VsdCE="))
if __name__ == '__main__':
unittest.main()
|
|
"""The signal processor for Review Board search."""
from __future__ import unicode_literals
import logging
import threading
from functools import partial
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.signals import post_delete, post_save, m2m_changed
from django.utils import six
from djblets.siteconfig.models import SiteConfiguration
from haystack.signals import BaseSignalProcessor
from reviewboard.accounts.models import Profile
from reviewboard.reviews.models import Group, ReviewRequest
from reviewboard.reviews.signals import review_request_published
from reviewboard.search import search_backend_registry
logger = logging.getLogger(__name__)
class SignalProcessor(BaseSignalProcessor):
""""Listens for signals and updates the search index.
This will listen for any signals that would affect the search index, and
invokes a suitable Haystack callback to immediately update the data stored
in the index.
This only updates the search index if:
1) Search is enabled.
2) The current search engine backend supports on-the-fly indexing.
"""
save_signals = [
(ReviewRequest, review_request_published, 'review_request'),
(User, post_save, 'instance'),
(Profile, post_save, 'instance'),
]
delete_signals = [
(ReviewRequest, post_delete),
(User, post_delete),
]
def __init__(self, *args, **kwargs):
"""Initialize the signal processor.
Args:
*args (tuple):
Positional arguments to pass to the parent constructor.
**kwargs (dict):
Keyword arguments to pass to the parent constructor.
"""
self.is_setup = False
self._can_process_signals = False
self._handlers = {}
self._pending_user_changes = threading.local()
super(SignalProcessor, self).__init__(*args, **kwargs)
@property
def can_process_signals(self):
"""Whether the signal processor can currently process signals."""
if not self._can_process_signals:
try:
SiteConfiguration.objects.get_current()
self._can_process_signals = True
except ObjectDoesNotExist:
pass
return self._can_process_signals
def setup(self):
"""Register the signal handlers for this processor."""
# We define this here instead of at the class level because we cannot
# reference class members during the class' definition.
m2m_changed_signals = [
(Group.users.through, self._handle_group_m2m_changed),
]
if not self.is_setup:
for cls, signal, instance_kwarg in self.save_signals:
handler = partial(self.check_handle_save,
instance_kwarg=instance_kwarg)
self._handlers[(cls, signal)] = handler
for cls, signal in self.delete_signals:
self._handlers[(cls, signal)] = self.check_handle_delete
for cls, handler in m2m_changed_signals:
self._handlers[(cls, m2m_changed)] = handler
for (cls, signal), handler in six.iteritems(self._handlers):
signal.connect(handler, sender=cls)
self.is_setup = True
def teardown(self):
"""Unregister all signal handlers for this processor."""
if self.is_setup:
for (cls, signal), handler in six.iteritems(self._handlers):
signal.disconnect(handler, sender=cls)
self.is_setup = False
def check_handle_save(self, instance_kwarg, **kwargs):
"""Conditionally update the search index when an object is updated.
Args:
instance_kwarg (unicode):
The name of the instance parameter.
**kwargs (dict):
Signal arguments. These will be passed to
:py:meth:`handle_save`.
"""
if not self.can_process_signals:
return
instance = kwargs.pop(instance_kwarg)
backend = search_backend_registry.current_backend
if backend and search_backend_registry.on_the_fly_indexing_enabled:
if isinstance(instance, Profile):
# When we save a Profile, we want to update the User index.
kwargs['sender'] = User
instance = instance.user
self.handle_save(instance=instance, **kwargs)
def check_handle_delete(self, **kwargs):
"""Conditionally update the search index when an object is deleted.
Args:
**kwargs (dict):
Signal arguments. These will be passed to
:py:meth:`handle_delete`.
"""
if not self.can_process_signals:
return
backend = search_backend_registry.current_backend
if backend and search_backend_registry.on_the_fly_indexing_enabled:
self.handle_delete(**kwargs)
def handle_save(self, **kwargs):
"""Update the search index when an object is updated.
If there's any error writing to the search backend, the error will
be caught and logged.
Args:
**kwargs (dict):
Signal arguments. These will be passed to
:py:meth:`handle_save`.
"""
try:
super(SignalProcessor, self).handle_save(**kwargs)
except Exception as e:
logger.error('Error updating the search index. Check to '
'make sure the search backend is running and '
'configured correctly, and then rebuild the search '
'index. Error: %s',
e)
def handle_delete(self, **kwargs):
"""Update the search index when an object is deleted.
If there's any error writing to the search backend, the error will
be caught and logged.
Args:
**kwargs (dict):
Signal arguments. These will be passed to
:py:meth:`handle_save`.
"""
try:
super(SignalProcessor, self).handle_delete(**kwargs)
except Exception as e:
logger.error('Error updating the search index. Check to '
'make sure the search backend is running and '
'configured correctly, and then rebuild the search '
'index. Error: %s',
e)
def _handle_group_m2m_changed(self, instance, action, pk_set, reverse,
**kwargs):
"""Handle a Group.users relation changing.
When the :py:attr:`Group.users
<reviewboard.reviews.models.group.Group.users>` field changes, we don't
get a corresponding :py:data:`~django.db.signals.post_save` signal
(because the related model wasn't saved). Instead, we will get multiple
:py:data:`~django.db.signals.m2m_changed` signals that indicate how the
relation is changing. This method will handle those signals and
call the correct save method so that they can be re-indexed.
Args:
instance (django.contrib.auth.models.User or reviewboward.reviews.models.group.Group):
The model that updated.
action (unicode):
The update action. This will be one of:
* ``'pre_add'``
* ``'post_add'``
* ``'pre_remove'``
* ``'post_remove'``
* ``'pre_clear'``
* ``'post_clear'``
pk_set (set of int):
The primary keys of the related objects that changed.
When the action is ``'pre_clear'`` or ``'post_clear'``,
this argument will be an empty set.
reverse (bool):
Whether or not the reverse relation was modified. If
true, this indicated that ``instance`` is a
:py:class:`~django.contrib.auth.models.User` object and
``pk_set`` is the set of primary keys of the added or removed
groups.
When this argument is false, ``instance`` is a
:py:class:`~reviewboard.reviews.models.group.Group`
object and ``pk_set`` is the set of primary keys of the added
or removed users.
**kwargs (dict):
Additional keyword arguments.
"""
backend = search_backend_registry.current_backend
if not (backend and
search_backend_registry.on_the_fly_indexing_enabled):
return
if not hasattr(self._pending_user_changes, 'data'):
self._pending_user_changes.data = {}
if action in ('post_add', 'post_remove'):
if reverse:
# When using the reverse relation, the instance is the User and
# the pk_set is the PKs of the groups being added or removed.
users = [instance]
else:
# Otherwise the instance is the Group and the pk_set is the set
# of User primary keys.
users = User.objects.filter(pk__in=pk_set)
for user in users:
self.handle_save(instance=user, instance_kwarg='instance',
sender=User)
elif action == 'pre_clear':
# When ``reverse`` is ``True``, a User is having their groups
# cleared so we don't need to worry about storing any state in the
# pre_clear phase.
#
# Otherwise, a ReviewGroup is having their users cleared. In both
# the pre_clear and post_clear phases, the ``pk_set`` argument will
# be empty, so we cache the PKs of the current members of the
# groups so we know to reindex them.
if not reverse:
self._pending_user_changes.data[instance.pk] = list(
instance.users.values_list('pk', flat=True))
elif action == 'post_clear':
if reverse:
# When ``reverse`` is ``True``, we just have to reindex a
# single user.
self.handle_save(instance=instance, instance_kwarg='instance',
sender=User)
else:
# Here, we are reindexing every user that got removed from the
# group via clearing.
pks = self._pending_user_changes.data.pop(instance.pk)
for user in User.objects.filter(pk__in=pks):
self.handle_save(instance=user, instance_kwarg='instance',
sender=User)
|
|
# -*- coding: utf-8 -*-
"""Tests for `twodolib.cli` module."""
import unittest
import subprocess
from twodolib import TwoDoTask
from twodolib import cli
class TestCliParseArguments(unittest.TestCase):
"""Test the command line interface and argument parsing."""
def test_default_task_is_task_type(self):
"""Default task type should be 'task'."""
parsed = cli.parse_arguments(['TestTask'])
self.assertEqual(parsed.task_type, TwoDoTask.TASK_TYPE)
def test_set_task_type_project(self):
"""A task can be a 'project' type: -t 1."""
args = "TestTask -t 1".split()
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.task_type, TwoDoTask.PROJECT_TYPE)
def test_set_task_type_project_long_option(self):
"""A task can be a 'project' type: --type 1."""
args = "TestTask --type 1".split()
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.task_type, TwoDoTask.PROJECT_TYPE)
def test_set_task_type_checklist(self):
"""A task can be a 'checklist' type: -t 2."""
args = "TestTask -t 2".split()
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.task_type, TwoDoTask.CHECKLIST_TYPE)
def test_set_task_type_checklist_long_option(self):
"""A task can be a 'checklist' type: --type 2."""
args = "TestTask --type 2".split()
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.task_type, TwoDoTask.CHECKLIST_TYPE)
def test_set_list_name(self):
"""A task can belong to a list: -l listname."""
args = "TestTask -l business".split()
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.for_list, 'business')
def test_set_project_name(self):
"""A task can be a subtask in a project."""
args = "TestTask -l business --project webpage".split()
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.for_list, 'business')
self.assertEqual(parsed.in_project, 'webpage')
def test_set_list_name_long_option(self):
"""A task can belong to a list: --list listname."""
args = "TestTask --list business".split()
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.for_list, 'business')
def test_set_note_of_task(self):
"""A task can have a note: -n "some notes to the task ..."."""
args = [
'TestTask',
'-n',
'This is a note for the task.'
]
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.note, 'This is a note for the task.')
def test_set_note_of_task_long_option(self):
"""A task can have a note: --note "some notes to the task ..."."""
args = [
'TestTask',
'--note',
'This is a note for the task.'
]
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.note, 'This is a note for the task.')
def test_set_priority(self):
"""Priority of a task can be set: -p 2."""
args = "TestTask -p 2".split()
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.priority, '2')
def test_set_priority_long_option(self):
"""Priority of a task can be set: --priority 2."""
args = "TestTask --priority 2".split()
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.priority, '2')
def test_not_starring_task_is_default(self):
"""The default task is not starred."""
args = ["TestTask"]
parsed = cli.parse_arguments(args)
self.assertFalse(parsed.starred)
def test_starr_task(self):
"""Starr a task with: -s."""
args = "TestTask -s".split()
parsed = cli.parse_arguments(args)
self.assertTrue(parsed.starred)
def test_starr_task_long_option(self):
"""Starr a task with: --starred."""
args = "TestTask --starred".split()
parsed = cli.parse_arguments(args)
self.assertTrue(parsed.starred)
def test_set_tags_in_task(self):
"""A Task can have tags."""
args = "TestTask --tags business,customer,important".split()
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.tags, "business,customer,important")
def test_get_taskid(self):
"""Print the task id"""
args = "TestTask --taskid -l mylist".split()
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.taskid, True)
def test_set_due_date_isoformat(self):
"""Set the tasks due date: -d YYYY-MM-DD."""
args = "TestTask -d 2015-09-10".split()
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.due, "2015-09-10")
def test_use_parenttask(self):
"""Set the parent task of a subtask"""
args = "SubTask -f Parenttask -l mylist".split()
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.for_parent_name, "Parenttask")
def test_set_due_date_isoformat_long_option(self):
"""Set the tasks due date: --due YYYY-MM-DD."""
args = "TestTask --due 2015-09-10".split()
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.due, "2015-09-10")
def test_set_due_date_n_format(self):
"""Set the tasks due date: -d 2."""
args = "TestTask -d 2".split()
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.due, "2")
def test_set_due_date_n_format_long_option(self):
"""Set the tasks due date: --due 14."""
args = "TestTask --due 14".split()
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.due, "14")
def test_set_duetime(self):
"""Set the tasks due time: --dueTime '2015-09-10 12:00'."""
args = "TestTask --dueTime".split()
args.append('2015-09-10 12:00')
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.dueTime, "2015-09-10 12:00")
def test_set_start_isoformat(self):
"""Set the tasks start time: --start '2015-09-10 12:00'."""
args = "TestTask --start".split()
args.append('2015-09-10 12:00')
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.start, "2015-09-10 12:00")
def test_set_start_n_format(self):
"""Set the tasks start time: --start 7."""
args = "TestTask --start 7".split()
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.start, "7")
def test_repeat_task(self):
"""Create a repeating task: --repeat 2."""
args = "TestTask --repeat 2".split()
parsed = cli.parse_arguments(args)
self.assertEqual(parsed.repeat, "2")
def test_ignoredefaults_default_to_false(self):
"""Do not ignore defaults."""
parsed = cli.parse_arguments(['TestTask'])
self.assertFalse(parsed.ignoreDefaults)
def test_set_ignoredefaults(self):
"""Ignore defaults: -i."""
args = "TestTask -i".split()
parsed = cli.parse_arguments(args)
self.assertTrue(parsed.ignoreDefaults)
def test_set_ignoredefaults_long_option(self):
"""Ignore defaults: --ignoerDefaults."""
args = "TestTask --ignoreDefaults".split()
parsed = cli.parse_arguments(args)
self.assertTrue(parsed.ignoreDefaults)
def test_missing_args_raise_system_exit(self):
"""Raise SystemExit, if args are missing."""
with self.assertRaises(SystemExit):
cli.main([])
class TestCliGeneratesCorrectTwoDoTaskObject(unittest.TestCase):
"""Command line args should create correct task object."""
def test_create_simple_task(self):
"""Create a task with a title."""
parsed = cli.parse_arguments(['TestTask'])
task = TwoDoTask(**vars(parsed))
self.assertEqual(task.task, 'TestTask')
def test_create_simple_task_has_correct_defaults(self):
"""Create a task with correct defaults."""
parsed = cli.parse_arguments(['TestTask'])
task = TwoDoTask(**vars(parsed))
self.assertEqual(task.task, 'TestTask')
self.assertEqual(task.type, TwoDoTask.TASK_TYPE)
self.assertIsNone(task.for_list)
self.assertIsNone(task.forParentName)
self.assertIsNone(task.note)
self.assertEqual(task.priority, '0')
self.assertEqual(task.starred, '0')
self.assertIsNone(task.tags)
self.assertIsNone(task.due)
self.assertIsNone(task.dueTime)
self.assertIsNone(task.start)
self.assertIsNone(task.repeat)
self.assertIsNone(task.action)
self.assertEqual(task.ignoreDefaults, '0')
def test_task_has_repeat_and_priority(self):
"""Create a task with weekly repetition and high priority."""
args = "TestTask --repeat 1 --priority 2".split()
parsed = cli.parse_arguments(args)
task = TwoDoTask(**vars(parsed))
self.assertEqual(task.task, 'TestTask')
self.assertEqual(task.type, TwoDoTask.TASK_TYPE)
self.assertIsNone(task.for_list)
self.assertIsNone(task.forParentName)
self.assertIsNone(task.note)
self.assertEqual(task.priority, '2')
self.assertEqual(task.starred, '0')
self.assertIsNone(task.tags)
self.assertIsNone(task.due)
self.assertIsNone(task.dueTime)
self.assertIsNone(task.start)
self.assertEqual(task.repeat, '1')
self.assertIsNone(task.action)
self.assertEqual(task.ignoreDefaults, '0')
def test_task_is_starred(self):
"""Create a starred task."""
args = "TestTask -s".split()
parsed = cli.parse_arguments(args)
task = TwoDoTask(**vars(parsed))
self.assertEqual(task.task, 'TestTask')
self.assertEqual(task.starred, '1')
def test_set_task_to_ignore_defaults(self):
"""Create a task, which ignores date and time defaults."""
args = "TestTask -i".split()
parsed = cli.parse_arguments(args)
task = TwoDoTask(**vars(parsed))
self.assertEqual(task.task, 'TestTask')
self.assertEqual(task.ignoreDefaults, '1')
def test_call_of_cli_generates_output(self):
"""Call command generates no output."""
msg = subprocess.check_output(['python', '-m', 'twodolib.cli', 'test'])
self.assertGreater(len(msg), 0)
def test_task_gets_action_long(self):
"""Create a task with an action."""
args = "TestTask --action url:https://www.2doapp.com".split()
parsed = cli.parse_arguments(args)
task = TwoDoTask(**vars(parsed))
self.assertEqual(task.task, 'TestTask')
self.assertEqual(task.type, TwoDoTask.TASK_TYPE)
self.assertIsNone(task.for_list)
self.assertIsNone(task.forParentName)
self.assertIsNone(task.note)
self.assertEqual(task.priority, '0')
self.assertEqual(task.starred, '0')
self.assertIsNone(task.tags)
self.assertIsNone(task.due)
self.assertIsNone(task.dueTime)
self.assertIsNone(task.start)
self.assertIsNone(task.repeat)
self.assertIsNotNone(task.action)
self.assertEqual(task.ignoreDefaults, '0')
def test_task_gets_action_short(self):
"""Create a task with an action."""
args = "TestTask -a url:https://www.2doapp.com".split()
parsed = cli.parse_arguments(args)
task = TwoDoTask(**vars(parsed))
self.assertEqual(task.task, 'TestTask')
self.assertEqual(task.type, TwoDoTask.TASK_TYPE)
self.assertIsNone(task.for_list)
self.assertIsNone(task.forParentName)
self.assertIsNone(task.note)
self.assertEqual(task.priority, '0')
self.assertEqual(task.starred, '0')
self.assertIsNone(task.tags)
self.assertIsNone(task.due)
self.assertIsNone(task.dueTime)
self.assertIsNone(task.start)
self.assertIsNone(task.repeat)
self.assertIsNotNone(task.action)
self.assertEqual(task.ignoreDefaults, '0')
|
|
#!/usr/bin/env python
"""
Ascii memcached test client.
"""
import socket
import select
import exceptions
import memcacheConstants
class MemcachedError(exceptions.Exception):
"""Error raised when a command fails."""
def __init__(self, status, msg):
supermsg='Memcached error #' + `status`
if msg: supermsg += ": " + msg
exceptions.Exception.__init__(self, supermsg)
self.status=status
self.msg=msg
def __repr__(self):
return "<MemcachedError #%d ``%s''>" % (self.status, self.msg)
class MemcachedAsciiClient(object):
"""Simple ascii memcached client."""
def __init__(self, host='127.0.0.1', port=11211, timeout=30):
self.host = host
self.port = port
self.s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.timeout = timeout
self.s.connect_ex((host, port))
def close(self):
self.s.close()
def __del__(self):
self.close()
def _sendMsg(self, cmd):
_, w, _ = select.select([], [self.s], [], self.timeout)
if w:
self.s.send(cmd)
else:
raise exceptions.EOFError("Timeout waiting for socket send. from {0}".format(self.host))
def _recvMsg(self):
r, _, _ = select.select([self.s], [], [], self.timeout)
if r:
response = ""
while not response.endswith("\r\n"):
data = self.s.recv(1)
if data == '':
raise exceptions.EOFError("Got empty data (remote died?). from {0}".format(self.host))
response += data
return response[:-2]
else:
raise exceptions.EOFError("Timeout waiting for socket recv. from {0}".format(self.host))
def _recvData(self, length):
r, _, _ = select.select([self.s], [], [], self.timeout)
if r:
response = ""
while len(response) < length + 2:
data = self.s.recv((length + 2) - len(response))
if data == '':
raise exceptions.EOFError("Got empty data (remote died?). from {0}".format(self.host))
response += data
return response[:-2]
else:
raise exceptions.EOFError("Timeout waiting for socket recv. from {0}".format(self.host))
def _doStore(self, cmd):
"""Send a command and await its response."""
self._sendMsg(cmd)
return self._recvMsg()
def _doRetrieve(self, cmd):
"""Send a command and await its response."""
self._sendMsg(cmd)
msg = self._recvMsg()
result = {}
error = ""
while msg.split(" ")[0] == "VALUE":
key = msg.split(" ")[1]
flags = int(msg.split(" ")[2]) % 2**32
length = int(msg.split(" ")[3])
cas = int(msg.split(" ")[4])
data = self._recvData(length)
result[key] = (flags,cas,data)
msg = self._recvMsg()
if msg != "END":
error = msg
return result, error
def _doStatsVersion(self, cmd):
"""Send a command and await its response."""
self._sendMsg(cmd)
msg = self._recvMsg()
result = {}
error = ""
while msg.split(" ")[0] == "STAT" or \
msg.split(" ")[0] == "VERSION":
print "msg:",msg
kind = msg.split(" ")[0]
key = msg.split(" ")[1]
if kind == "VERSION":
return key, ""
value = msg.split(" ")[2]
result[key] = value
msg = self._recvMsg()
if msg != "END":
error = msg
return result, error
def _doIncrDecr(self, cmd):
"""Send a command and await its response."""
self._sendMsg(cmd)
msg = self._recvMsg()
try:
# asci incr/decr doesn't give us the cas
return (int(msg), 0), ""
except ValueError:
return None, msg
def append(self, key, value, cas=0):
response = self._doStore("append {0} 0 0 {1} {2}\r\n{3}\r\n".format(key, len(value), cas, value))
if response != "STORED":
raise MemcachedError(-1, response)
def prepend(self, key, value, cas=0):
response = self._doStore("prepend {0} 0 0 {1} {2}\r\n{3}\r\n".format(key, len(value), cas, value))
if response != "STORED":
raise MemcachedError(-1, response)
def incr(self, key, amt=1, init=0, exp=0):
"""Increment or create the named counter."""
response, error = self._doIncrDecr("incr {0} {1}\r\n".format(key, amt))
if error:
raise MemcachedError(-1, error)
return response
def decr(self, key, amt=1, init=0, exp=0):
"""Decrement or create the named counter."""
response, error = self._doIncrDecr("decr {0} {1}\r\n".format(key, amt))
if error:
raise MemcachedError(-1, error)
return response
def set(self, key, exp, flags, val):
"""Set a value in the memcached server."""
response = self._doStore("set {0} {1} {2} {3}\r\n{4}\r\n".format(key, flags, exp, len(val), val))
if response != "STORED":
raise MemcachedError(-1, response)
def add(self, key, exp, flags, val):
"""Add a value in the memcached server iff it doesn't already exist."""
response = self._doStore("add {0} {1} {2} {3}\r\n{4}\r\n".format(key, flags, exp, len(val), val))
if response != "STORED":
raise MemcachedError(-1, response)
def replace(self, key, exp, flags, val):
"""Replace a value in the memcached server iff it already exists."""
response = self._doStore("replace {0} {1} {2} {3}\r\n{4}\r\n".format(key, flags, exp, len(val), val))
if response != "STORED":
raise MemcachedError(-1, response)
def get(self, key):
"""Get the value for a given key within the memcached server."""
response, error = self._doRetrieve("gets {0}\r\n".format(key))
if error:
raise MemcachedError(-1, error)
return response.items()[0][1]
def getl(self, key, exp=15):
"""Get the value for a given key within the memcached server."""
response, error = self._doRetrieve("getl {0} {1}\r\n".format(key, exp))
if error:
raise MemcachedError(-1, error)
return response.items()[0][1]
def cas(self, key, exp, flags, oldVal, val):
"""CAS in a new value for the given key and comparison value."""
response = self._doStore("cas {0} {1} {2} {3} {4}\r\n{5}\r\n".format(key, flags, exp, len(val), oldVal, val))
if response != "STORED":
raise MemcachedError(-1, response)
def touch(self, key, exp):
"""Touch a key in the memcached server."""
response = self._doStore("touch {0} {1}\r\n".format(key, exp))
if response != "STORED":
raise MemcachedError(-1, response)
def gat(self, key, exp):
"""Get the value for a given key and touch it within the memcached server."""
response, error = self._doRetrieve("gat {0} {1}\r\n".format(key, exp))
if error:
raise MemcachedError(-1, error)
return response.items()[0][1]
def version(self):
"""Get the value for a given key within the memcached server."""
response, error = self._doStatsVersion("version\r\n")
if error:
raise MemcachedError(-1, error)
return response
def getMulti(self, keys):
"""Get values for any available keys in the given iterable.
Returns a dict of matched keys to their values."""
cmd = "gets"
for key in keys:
cmd += " " + key
cmd += "\r\n"
response, error = self._doRetrieve(cmd)
if error:
raise MemcachedError(-1, error)
return response
def stats(self, sub=''):
"""Get stats."""
if sub:
sub = " " + sub
response, error = self._doStatsVersion("stats{0}\r\n".format(sub))
if error:
raise MemcachedError(-1, error)
return response
def delete(self, key, cas=0):
"""Delete the value for a given key within the memcached server."""
response = self._doStore("delete {0} {1}\r\n".format(key, cas))
if response != "DELETED":
raise MemcachedError(-1, response)
def flush(self, timebomb=0):
"""Flush all storage in a memcached instance."""
return self._doCmd(memcacheConstants.CMD_FLUSH, '', '',
struct.pack(memcacheConstants.FLUSH_PKT_FMT, timebomb))
|
|
# -*- coding: utf-8 -*-
"""
PLIF Toolset
Created on Thu Jan 21 13:56:54 2016
@author: Darren Banks
PLIF Temperature Calculator uses grayscale-average values from video recording
of rhodamine fluorescence to estimate the temperature field from the images.
It assumes an exponential relationship between temperature and fluorescent
intensity, as spelled out by Lemoine, et al, 'Simultaneous temperature and 2D
velocity measurements in a turblent heated jet using combined laser-induced
fluorescence and LDA', Experiments in Fluids, 26, p.315-323, 1999.
"""
import logging
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import PIL as pil
logger = logging.getLogger('plif.tools')
def listImages(image_directory, extension='.tif'):
""" Returns a list of images within image_directory that have the
specified extension.
Current camera software saves video to '.tif' files, hence the
default argument. As long as files are readable by PIL, any
extension should work.
"""
file_path = image_directory + '\\'
image_names = [file_path + file for file in os.listdir(image_directory)
if extension in file]
num_images = len(image_names)
logger = logging.getLogger('plif.tools.listImages')
if len(image_directory) > 16:
short_dir = '...' + image_directory[-16:]
else:
short_dir = image_directory
logger.debug('{} images loaded from {}'.format(num_images, short_dir))
return image_names
def exptDirectory(root_directory, prime_directory='images',
sub_directory='calibration'):
""" Returns image_dir, a list with a primary image directory in
its first entry, and a sub-list containing calibration
directories second. It assumes the images are contained in
a subfolder appropriately named 'images', and that any
subfolders with the word 'calibration' in their names
contain calibration images.
"""
if root_directory != prime_directory and prime_directory != '':
""" If root_directory and prime_directory are not the same,
the prime should be a subfolder of root and the
subDirectories contained within the prime.
"""
root_path = root_directory + '\\'
image_dir = [root_path + entry for entry in os.listdir(root_directory)
if os.path.isdir(root_path + entry)
if prime_directory in entry]
""" Creates image_dir, the directory within the root_directory
that contains the name listed in prime_directory.
"""
elif root_directory == prime_directory or prime_directory == '':
""" If root_directory and prime_directory are the same, or
prime_directory is specified as an empty string, the
images are located within the root_directory itself.
"""
image_dir = [root_directory]
image_dir.append([image_dir[0] + '\\' + entry
for entry in os.listdir(image_dir[0])
if sub_directory in entry])
""" Appends a sublist of folders to the second entry in image_dir,
which are the subfolders containing subDirectory name.
"""
return image_dir
def gridAverage(images, grid_num=32):
""" Using PIL/pillow, divides the input images into a gridNum x
gridNum square grid. Then calculates the average RGB values
for each of the cells of that grid. Returns a pandas
DataFrame containing a row for each image in images and a
column for each grid cell.
"""
image_average_list = []
for image in images:
current_image = pil.Image.open(image)
width, height = current_image.size[:]
x_step = width / grid_num
x_coords = np.arange(0, width, x_step)
y_step = height / grid_num
y_coords = np.arange(0, height, y_step)
""" Based on the image's size, determine the width and
coordinates of each grid square in pixels.
"""
grid_set = [(x_coord, y_coord, x_coord + x_step - 1,
y_coord + y_step - 1)
for y_coord in y_coords for x_coord in x_coords]
""" gridBoxSet is the collection of left, upper, right,
and lower bounds of each grid cell based on the image
size and the desired number of cells.
"""
grid_averages = []
""" Pre-defining gridAvgs as a list, also, clearing it out
so fresh values are appended for each image.
"""
for grid_box in grid_set:
grid_box = [int(num) for num in grid_box]
""" gridBox is the iterating collection of each
coordinate of a cell on the grid. The values are
forced to integers because the image cropping
function doesn't accept float values.
"""
current_box = current_image.crop((grid_box[0:4]))
grid_averages.append(np.mean(current_box))
""" The orignal image is cropped to a single grid cell,
and the average RGB value is added to the list of
all cell values in the working image.
"""
image_average_list.append(grid_averages)
""" imageAverageList collects averages of each image. """
image_averages = pd.DataFrame(image_average_list)
return(image_averages)
def getCalibrationAverages(calib_images, cal_temperatures, grid_num=32):
""" Using the gridAverage function, returns a DataFrame containing
the RGB averages for each set of images denoted as
calibrations. The DataFrame is indexed by the calibration
temperatures.
"""
cal_avg_list = [gridAverage(cal_set, grid_num) for cal_set in calib_images]
cal_averages = pd.concat(cal_avg_list, keys=cal_temperatures)
return cal_averages
def getGridSlopes(cal_averages, cal_temperatures):
calib_range = [[cal_temperatures[i], cal_temperatures[i+1],
int(cal_temperatures[i]) - int(cal_temperatures[i+1])]
for i in range(len(cal_temperatures)-1)]
grid_slopes_set = [np.mean(cal_averages.ix[temp[0]] -
cal_averages.ix[temp[1]]) / temp[2]
for temp in calib_range]
grid_slopes = np.mean(pd.DataFrame(grid_slopes_set))
return(grid_slopes)
def getAspectRatio(image_path, decimal_point=1):
""" Returns the input image's aspect ratio.
(width/height) rounded to a default of 1 decimal point.
"""
reference_image = pil.Image.open(image_path)
image_width, image_height = reference_image.size
aspect_ratio = round(image_width/image_height, decimal_point)
logger = logging.getLogger('plif.tools.aspectRatio')
logger.debug('Aspect ratio {}'.format(aspect_ratio))
return aspect_ratio
def getTemperatureStats(temperatures, image_path, statistic='statistics.xlsx'):
""" Returns stats_list, a list of the maximum, average, and standard
deviation of temperatures within each row of the temperatures
DataFrame.
"""
logger = logging.getLogger('plif.tools.getTempStats')
temperature_rows = temperatures.iterrows()
max_temperatures = pd.Series([max(T) for i, T in temperature_rows])
mean_temperatures = pd.Series([np.mean(T) for i, T in temperature_rows])
stddev_temperatures = pd.Series([np.std(T) for i, T in temperature_rows])
logger.info('Maximum: {}'.format(round(max(temperatures.max()))))
logger.info('Minimum: {}'.format(round(min(temperatures.min()))))
logger.info('Median: {}'.format(round(np.median(temperatures.median()))))
logger.debug('{} frames to be analyzed'.format(len(temperatures)))
stats_list = [max_temperatures, mean_temperatures, stddev_temperatures]
thermal_statistics = pd.concat(stats_list, keys=['max', 'mean', 's.dev'])
thermal_statistics.to_frame().to_excel(image_path + '\\' + statistic)
return(stats_list)
def plotTemperatureStats(stats_list, image_path, plot_type='.png'):
""" Using the stats_list produced by getTemperatureStats, plots the
maximum, mean, and standard deviation of temperatures in each row
of the temperatures in the DataFrame fed to getTemperatureStats.
"""
plt.plot(stats_list[0])
plt.title('Maximum temperature per frame.', fontname='Times New Roman')
plt.ylabel('Deg. C')
plt.xlabel('Frame')
plt.savefig(image_path + '\\max_temperatures' + plot_type, dpi=100)
plt.clf()
plt.plot(stats_list[1])
plt.title('Average temperature per frame.')
plt.ylabel('Deg. C')
plt.xlabel('Frame')
plt.savefig(image_path + '\\mean_temperatures' + plot_type, dpi=100)
plt.clf()
plt.plot(stats_list[2])
plt.title('Standard deviation in temperature per frame.')
plt.ylabel('Deg. C')
plt.xlabel('Frame')
plt.savefig(image_path + '\\std_dev_temperatures' + plot_type, dpi=100)
plt.clf()
pass
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import core.models.plcorebase
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AccessMap',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
('updated', models.DateTimeField(default=django.utils.timezone.now, auto_now=True)),
('enacted', models.DateTimeField(default=None, null=True, blank=True)),
('policed', models.DateTimeField(default=None, null=True, blank=True)),
('backend_register', models.CharField(default=b'{}', max_length=140, null=True)),
('backend_status', models.CharField(default=b'0 - Provisioning in progress', max_length=1024)),
('deleted', models.BooleanField(default=False)),
('write_protect', models.BooleanField(default=False)),
('lazy_blocked', models.BooleanField(default=False)),
('no_sync', models.BooleanField(default=False)),
('name', models.CharField(help_text=b'Name of the Access Map', max_length=64)),
('description', models.TextField(max_length=130, null=True, blank=True)),
('map', models.FileField(help_text=b'specifies which client requests are allowed', upload_to=b'maps/')),
],
options={
},
bases=(models.Model, core.models.plcorebase.PlModelMixIn),
),
migrations.CreateModel(
name='CDNPrefix',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
('updated', models.DateTimeField(default=django.utils.timezone.now, auto_now=True)),
('enacted', models.DateTimeField(default=None, null=True, blank=True)),
('policed', models.DateTimeField(default=None, null=True, blank=True)),
('backend_register', models.CharField(default=b'{}', max_length=140, null=True)),
('backend_status', models.CharField(default=b'0 - Provisioning in progress', max_length=1024)),
('deleted', models.BooleanField(default=False)),
('write_protect', models.BooleanField(default=False)),
('lazy_blocked', models.BooleanField(default=False)),
('no_sync', models.BooleanField(default=False)),
('cdn_prefix_id', models.IntegerField(null=True, blank=True)),
('prefix', models.CharField(help_text=b'Registered Prefix for Domain', max_length=200)),
('description', models.TextField(help_text=b'Description of Content Provider', max_length=254, null=True, blank=True)),
('enabled', models.BooleanField(default=True)),
],
options={
},
bases=(models.Model, core.models.plcorebase.PlModelMixIn),
),
migrations.CreateModel(
name='ContentProvider',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
('updated', models.DateTimeField(default=django.utils.timezone.now, auto_now=True)),
('enacted', models.DateTimeField(default=None, null=True, blank=True)),
('policed', models.DateTimeField(default=None, null=True, blank=True)),
('backend_register', models.CharField(default=b'{}', max_length=140, null=True)),
('backend_status', models.CharField(default=b'0 - Provisioning in progress', max_length=1024)),
('deleted', models.BooleanField(default=False)),
('write_protect', models.BooleanField(default=False)),
('lazy_blocked', models.BooleanField(default=False)),
('no_sync', models.BooleanField(default=False)),
('content_provider_id', models.IntegerField(null=True, blank=True)),
('name', models.CharField(max_length=254)),
('enabled', models.BooleanField(default=True)),
('description', models.TextField(help_text=b'Description of Content Provider', max_length=254, null=True, blank=True)),
],
options={
},
bases=(models.Model, core.models.plcorebase.PlModelMixIn),
),
migrations.CreateModel(
name='HpcHealthCheck',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
('updated', models.DateTimeField(default=django.utils.timezone.now, auto_now=True)),
('enacted', models.DateTimeField(default=None, null=True, blank=True)),
('policed', models.DateTimeField(default=None, null=True, blank=True)),
('backend_register', models.CharField(default=b'{}', max_length=140, null=True)),
('backend_status', models.CharField(default=b'0 - Provisioning in progress', max_length=1024)),
('deleted', models.BooleanField(default=False)),
('write_protect', models.BooleanField(default=False)),
('lazy_blocked', models.BooleanField(default=False)),
('no_sync', models.BooleanField(default=False)),
('kind', models.CharField(default=b'dns', max_length=30, choices=[(b'dns', b'DNS'), (b'http', b'HTTP'), (b'nameserver', b'Name Server')])),
('resource_name', core.models.plcorebase.StrippedCharField(max_length=1024)),
('result_contains', core.models.plcorebase.StrippedCharField(max_length=1024, null=True, blank=True)),
('result_min_size', models.IntegerField(null=True, blank=True)),
('result_max_size', models.IntegerField(null=True, blank=True)),
],
options={
},
bases=(models.Model, core.models.plcorebase.PlModelMixIn),
),
migrations.CreateModel(
name='HpcService',
fields=[
('service_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='core.Service')),
('cmi_hostname', core.models.plcorebase.StrippedCharField(max_length=254, null=True, blank=True)),
('hpc_port80', models.BooleanField(default=True, help_text=b'Enable port 80 for HPC')),
('watcher_hpc_network', core.models.plcorebase.StrippedCharField(help_text=b'Network for hpc_watcher to contact hpc instance', max_length=254, null=True, blank=True)),
('watcher_dnsdemux_network', core.models.plcorebase.StrippedCharField(help_text=b'Network for hpc_watcher to contact dnsdemux instance', max_length=254, null=True, blank=True)),
('watcher_dnsredir_network', core.models.plcorebase.StrippedCharField(help_text=b'Network for hpc_watcher to contact dnsredir instance', max_length=254, null=True, blank=True)),
],
options={
'verbose_name': 'HPC Service',
},
bases=('core.service',),
),
migrations.CreateModel(
name='OriginServer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
('updated', models.DateTimeField(default=django.utils.timezone.now, auto_now=True)),
('enacted', models.DateTimeField(default=None, null=True, blank=True)),
('policed', models.DateTimeField(default=None, null=True, blank=True)),
('backend_register', models.CharField(default=b'{}', max_length=140, null=True)),
('backend_status', models.CharField(default=b'0 - Provisioning in progress', max_length=1024)),
('deleted', models.BooleanField(default=False)),
('write_protect', models.BooleanField(default=False)),
('lazy_blocked', models.BooleanField(default=False)),
('no_sync', models.BooleanField(default=False)),
('origin_server_id', models.IntegerField(null=True, blank=True)),
('url', models.CharField(max_length=1024)),
('authenticated', models.BooleanField(default=False, help_text=b'Status for this Site')),
('enabled', models.BooleanField(default=True, help_text=b'Status for this Site')),
('protocol', models.CharField(default=b'HTTP', max_length=12, choices=[(b'http', b'HTTP'), (b'rtmp', b'RTMP'), (b'rtp', b'RTP'), (b'shout', b'SHOUTcast')])),
('redirects', models.BooleanField(default=True, help_text=b'Indicates whether Origin Server redirects should be used for this Origin Server')),
('description', models.TextField(max_length=255, null=True, blank=True)),
('contentProvider', models.ForeignKey(to='hpc.ContentProvider')),
],
options={
},
bases=(models.Model, core.models.plcorebase.PlModelMixIn),
),
migrations.CreateModel(
name='ServiceProvider',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
('updated', models.DateTimeField(default=django.utils.timezone.now, auto_now=True)),
('enacted', models.DateTimeField(default=None, null=True, blank=True)),
('policed', models.DateTimeField(default=None, null=True, blank=True)),
('backend_register', models.CharField(default=b'{}', max_length=140, null=True)),
('backend_status', models.CharField(default=b'0 - Provisioning in progress', max_length=1024)),
('deleted', models.BooleanField(default=False)),
('write_protect', models.BooleanField(default=False)),
('lazy_blocked', models.BooleanField(default=False)),
('no_sync', models.BooleanField(default=False)),
('service_provider_id', models.IntegerField(null=True, blank=True)),
('name', models.CharField(help_text=b'Service Provider Name', max_length=254)),
('description', models.TextField(help_text=b'Description of Service Provider', max_length=254, null=True, blank=True)),
('enabled', models.BooleanField(default=True)),
('hpcService', models.ForeignKey(to='hpc.HpcService')),
],
options={
},
bases=(models.Model, core.models.plcorebase.PlModelMixIn),
),
migrations.CreateModel(
name='SiteMap',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
('updated', models.DateTimeField(default=django.utils.timezone.now, auto_now=True)),
('enacted', models.DateTimeField(default=None, null=True, blank=True)),
('policed', models.DateTimeField(default=None, null=True, blank=True)),
('backend_register', models.CharField(default=b'{}', max_length=140, null=True)),
('backend_status', models.CharField(default=b'0 - Provisioning in progress', max_length=1024)),
('deleted', models.BooleanField(default=False)),
('write_protect', models.BooleanField(default=False)),
('lazy_blocked', models.BooleanField(default=False)),
('no_sync', models.BooleanField(default=False)),
('name', models.CharField(help_text=b'Name of the Site Map', max_length=64)),
('description', models.TextField(max_length=130, null=True, blank=True)),
('map', models.FileField(help_text=b'specifies how to map requests to hpc instances', upload_to=b'maps/')),
('map_id', models.IntegerField(null=True, blank=True)),
('cdnPrefix', models.ForeignKey(blank=True, to='hpc.CDNPrefix', null=True)),
('contentProvider', models.ForeignKey(blank=True, to='hpc.ContentProvider', null=True)),
('hpcService', models.ForeignKey(blank=True, to='hpc.HpcService', null=True)),
('serviceProvider', models.ForeignKey(blank=True, to='hpc.ServiceProvider', null=True)),
],
options={
},
bases=(models.Model, core.models.plcorebase.PlModelMixIn),
),
migrations.AddField(
model_name='hpchealthcheck',
name='hpcService',
field=models.ForeignKey(blank=True, to='hpc.HpcService', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='contentprovider',
name='serviceProvider',
field=models.ForeignKey(to='hpc.ServiceProvider'),
preserve_default=True,
),
migrations.AddField(
model_name='contentprovider',
name='users',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='cdnprefix',
name='contentProvider',
field=models.ForeignKey(to='hpc.ContentProvider'),
preserve_default=True,
),
migrations.AddField(
model_name='cdnprefix',
name='defaultOriginServer',
field=models.ForeignKey(blank=True, to='hpc.OriginServer', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='accessmap',
name='contentProvider',
field=models.ForeignKey(to='hpc.ContentProvider'),
preserve_default=True,
),
]
|
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Ternary(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout"
_path_str = "layout.ternary"
_valid_props = {"aaxis", "baxis", "bgcolor", "caxis", "domain", "sum", "uirevision"}
# aaxis
# -----
@property
def aaxis(self):
"""
The 'aaxis' property is an instance of Aaxis
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.ternary.Aaxis`
- A dict of string/value properties that will be passed
to the Aaxis constructor
Supported dict properties:
color
Sets default for all colors associated with
this axis all at once: line, font, tick, and
grid colors. Grid color is lightened by
blending this with the plot background
Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
gridcolor
Sets the color of the grid lines.
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f
ormat. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
layer
Sets the layer on which this axis is displayed.
If *above traces*, this axis is displayed above
all the subplot's traces If *below traces*,
this axis is displayed below all the subplot's
traces, but above the grid lines. Useful when
used together with scatter-like traces with
`cliponaxis` set to False to show markers
and/or text nodes above this axis.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
min
The minimum value visible on this axis. The
maximum is determined by the sum minus the
minimum values of the other two axes. The full
view corresponds to all the minima set to zero.
minexponent
Hide SI prefix for 10^n if |n| is below this
number. This only has an effect when
`tickformat` is "SI" or "B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showgrid
Determines whether or not grid lines are drawn.
If True, the grid lines are drawn at every tick
mark.
showline
Determines whether or not a line bounding this
axis is drawn.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f
ormat. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.
ternary.aaxis.Tickformatstop` instances or
dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.lay
out.ternary.aaxis.tickformatstopdefaults), sets
the default property values to use for elements
of layout.ternary.aaxis.tickformatstops
ticklabelstep
Sets the spacing between tick labels as
compared to the spacing between ticks. A value
of 1 (default) means each tick gets a label. A
value of 2 means shows every 2nd label. A
larger value n means only every nth tick is
labeled. `tick0` determines which labels are
shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is
"array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for `ticktext`.
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for `tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.ternary.aax
is.Title` instance or dict with compatible
properties
titlefont
Deprecated: Please use
layout.ternary.aaxis.title.font instead. Sets
this axis' title font. Note that the title's
font used to be customized by the now
deprecated `titlefont` attribute.
uirevision
Controls persistence of user-driven changes in
axis `min`, and `title` if in `editable: true`
configuration. Defaults to
`ternary<N>.uirevision`.
Returns
-------
plotly.graph_objs.layout.ternary.Aaxis
"""
return self["aaxis"]
@aaxis.setter
def aaxis(self, val):
self["aaxis"] = val
# baxis
# -----
@property
def baxis(self):
"""
The 'baxis' property is an instance of Baxis
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.ternary.Baxis`
- A dict of string/value properties that will be passed
to the Baxis constructor
Supported dict properties:
color
Sets default for all colors associated with
this axis all at once: line, font, tick, and
grid colors. Grid color is lightened by
blending this with the plot background
Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
gridcolor
Sets the color of the grid lines.
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f
ormat. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
layer
Sets the layer on which this axis is displayed.
If *above traces*, this axis is displayed above
all the subplot's traces If *below traces*,
this axis is displayed below all the subplot's
traces, but above the grid lines. Useful when
used together with scatter-like traces with
`cliponaxis` set to False to show markers
and/or text nodes above this axis.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
min
The minimum value visible on this axis. The
maximum is determined by the sum minus the
minimum values of the other two axes. The full
view corresponds to all the minima set to zero.
minexponent
Hide SI prefix for 10^n if |n| is below this
number. This only has an effect when
`tickformat` is "SI" or "B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showgrid
Determines whether or not grid lines are drawn.
If True, the grid lines are drawn at every tick
mark.
showline
Determines whether or not a line bounding this
axis is drawn.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f
ormat. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.
ternary.baxis.Tickformatstop` instances or
dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.lay
out.ternary.baxis.tickformatstopdefaults), sets
the default property values to use for elements
of layout.ternary.baxis.tickformatstops
ticklabelstep
Sets the spacing between tick labels as
compared to the spacing between ticks. A value
of 1 (default) means each tick gets a label. A
value of 2 means shows every 2nd label. A
larger value n means only every nth tick is
labeled. `tick0` determines which labels are
shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is
"array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for `ticktext`.
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for `tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.ternary.bax
is.Title` instance or dict with compatible
properties
titlefont
Deprecated: Please use
layout.ternary.baxis.title.font instead. Sets
this axis' title font. Note that the title's
font used to be customized by the now
deprecated `titlefont` attribute.
uirevision
Controls persistence of user-driven changes in
axis `min`, and `title` if in `editable: true`
configuration. Defaults to
`ternary<N>.uirevision`.
Returns
-------
plotly.graph_objs.layout.ternary.Baxis
"""
return self["baxis"]
@baxis.setter
def baxis(self, val):
self["baxis"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Set the background color of the subplot
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# caxis
# -----
@property
def caxis(self):
"""
The 'caxis' property is an instance of Caxis
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.ternary.Caxis`
- A dict of string/value properties that will be passed
to the Caxis constructor
Supported dict properties:
color
Sets default for all colors associated with
this axis all at once: line, font, tick, and
grid colors. Grid color is lightened by
blending this with the plot background
Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
gridcolor
Sets the color of the grid lines.
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f
ormat. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
layer
Sets the layer on which this axis is displayed.
If *above traces*, this axis is displayed above
all the subplot's traces If *below traces*,
this axis is displayed below all the subplot's
traces, but above the grid lines. Useful when
used together with scatter-like traces with
`cliponaxis` set to False to show markers
and/or text nodes above this axis.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
min
The minimum value visible on this axis. The
maximum is determined by the sum minus the
minimum values of the other two axes. The full
view corresponds to all the minima set to zero.
minexponent
Hide SI prefix for 10^n if |n| is below this
number. This only has an effect when
`tickformat` is "SI" or "B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showgrid
Determines whether or not grid lines are drawn.
If True, the grid lines are drawn at every tick
mark.
showline
Determines whether or not a line bounding this
axis is drawn.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f
ormat. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.
ternary.caxis.Tickformatstop` instances or
dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.lay
out.ternary.caxis.tickformatstopdefaults), sets
the default property values to use for elements
of layout.ternary.caxis.tickformatstops
ticklabelstep
Sets the spacing between tick labels as
compared to the spacing between ticks. A value
of 1 (default) means each tick gets a label. A
value of 2 means shows every 2nd label. A
larger value n means only every nth tick is
labeled. `tick0` determines which labels are
shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is
"array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for `ticktext`.
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for `tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.ternary.cax
is.Title` instance or dict with compatible
properties
titlefont
Deprecated: Please use
layout.ternary.caxis.title.font instead. Sets
this axis' title font. Note that the title's
font used to be customized by the now
deprecated `titlefont` attribute.
uirevision
Controls persistence of user-driven changes in
axis `min`, and `title` if in `editable: true`
configuration. Defaults to
`ternary<N>.uirevision`.
Returns
-------
plotly.graph_objs.layout.ternary.Caxis
"""
return self["caxis"]
@caxis.setter
def caxis(self, val):
self["caxis"] = val
# domain
# ------
@property
def domain(self):
"""
The 'domain' property is an instance of Domain
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.ternary.Domain`
- A dict of string/value properties that will be passed
to the Domain constructor
Supported dict properties:
column
If there is a layout grid, use the domain for
this column in the grid for this ternary
subplot .
row
If there is a layout grid, use the domain for
this row in the grid for this ternary subplot .
x
Sets the horizontal domain of this ternary
subplot (in plot fraction).
y
Sets the vertical domain of this ternary
subplot (in plot fraction).
Returns
-------
plotly.graph_objs.layout.ternary.Domain
"""
return self["domain"]
@domain.setter
def domain(self, val):
self["domain"] = val
# sum
# ---
@property
def sum(self):
"""
The number each triplet should sum to, and the maximum range of
each axis
The 'sum' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["sum"]
@sum.setter
def sum(self, val):
self["sum"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of user-driven changes in axis `min` and
`title`, if not overridden in the individual axes. Defaults to
`layout.uirevision`.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
aaxis
:class:`plotly.graph_objects.layout.ternary.Aaxis`
instance or dict with compatible properties
baxis
:class:`plotly.graph_objects.layout.ternary.Baxis`
instance or dict with compatible properties
bgcolor
Set the background color of the subplot
caxis
:class:`plotly.graph_objects.layout.ternary.Caxis`
instance or dict with compatible properties
domain
:class:`plotly.graph_objects.layout.ternary.Domain`
instance or dict with compatible properties
sum
The number each triplet should sum to, and the maximum
range of each axis
uirevision
Controls persistence of user-driven changes in axis
`min` and `title`, if not overridden in the individual
axes. Defaults to `layout.uirevision`.
"""
def __init__(
self,
arg=None,
aaxis=None,
baxis=None,
bgcolor=None,
caxis=None,
domain=None,
sum=None,
uirevision=None,
**kwargs
):
"""
Construct a new Ternary object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.Ternary`
aaxis
:class:`plotly.graph_objects.layout.ternary.Aaxis`
instance or dict with compatible properties
baxis
:class:`plotly.graph_objects.layout.ternary.Baxis`
instance or dict with compatible properties
bgcolor
Set the background color of the subplot
caxis
:class:`plotly.graph_objects.layout.ternary.Caxis`
instance or dict with compatible properties
domain
:class:`plotly.graph_objects.layout.ternary.Domain`
instance or dict with compatible properties
sum
The number each triplet should sum to, and the maximum
range of each axis
uirevision
Controls persistence of user-driven changes in axis
`min` and `title`, if not overridden in the individual
axes. Defaults to `layout.uirevision`.
Returns
-------
Ternary
"""
super(Ternary, self).__init__("ternary")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.Ternary
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Ternary`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("aaxis", None)
_v = aaxis if aaxis is not None else _v
if _v is not None:
self["aaxis"] = _v
_v = arg.pop("baxis", None)
_v = baxis if baxis is not None else _v
if _v is not None:
self["baxis"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("caxis", None)
_v = caxis if caxis is not None else _v
if _v is not None:
self["caxis"] = _v
_v = arg.pop("domain", None)
_v = domain if domain is not None else _v
if _v is not None:
self["domain"] = _v
_v = arg.pop("sum", None)
_v = sum if sum is not None else _v
if _v is not None:
self["sum"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
|
#!/usr/bin/python2
# -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Integer parameter type testcases - UINT8_Max
List of tested functions :
--------------------------
- [setParameter] function
- [getParameter] function
Initial Settings :
------------------
UINT8_Max :
- unsigned
- size = 8
- range : [0, 255]
Test cases :
------------
- UINT8_Max parameter min value = 0
- UINT8_Max parameter min value out of bounds = -1
- UINT8_Max parameter max value = 255
- UINT8_Max parameter max value out of bounds = 256
- UINT8_Max parameter in nominal case = 50
"""
import os
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
# Test of type UINT8_Max - range [0, 255]
class TestCases(PfwTestCase):
def setUp(self):
self.param_name = "/Test/Test/TEST_DIR/UINT8_Max"
self.pfw.sendCmd("setTuningMode", "on")
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def test_Nominal_Case(self):
"""
Testing UINT8_Max in nominal case = 50
--------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT8_Max parameter in nominal case = 50
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT8_Max parameter set to 50
- Blackboard and filesystem values checked
"""
log.D(self.test_Nominal_Case.__doc__)
log.I("UINT8_Max parameter in nominal case = 50")
value = "50"
hex_value = "0x32"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/UINT8_Max").read()[:-1] == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin(self):
"""
Testing UINT8_Max minimal value = 0
-----------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT8_Max parameter min value = 0
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT8_Max parameter set to 0
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin.__doc__)
log.I("UINT8_Max parameter min value = 0")
value = "0"
hex_value = "0x0"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/UINT8_Max").read()[:-1] == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin_Overflow(self):
"""
Testing UINT8_Max parameter value out of negative range
-------------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT8_Max to -1
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- UINT8_Max parameter not updated
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin_Overflow.__doc__)
log.I("UINT8_Max parameter min value out of bounds = -1")
value = "-1"
param_check = open(os.environ["PFW_RESULT"] + "/UINT8_Max").read()[:-1]
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value, expectSuccess=False)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/UINT8_Max").read()[:-1] == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
def test_TypeMax(self):
"""
Testing UINT8_Max parameter maximum value
-----------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT8_Max to 255
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT8_Max parameter set to 255
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMax.__doc__)
log.I("UINT8_Max parameter max value = 255")
value = "255"
hex_value = "0xff"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/UINT8_Max").read()[:-1] == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMax_Overflow(self):
"""
Testing UINT8_Max parameter value out of positive range
-------------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT8_Max to 256
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- UINT8_Max parameter not updated
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMax_Overflow.__doc__)
log.I("UINT8_Max parameter max value out of bounds = 256")
value = "256"
param_check = open(os.environ["PFW_RESULT"] + "/UINT8_Max").read()[:-1]
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value, expectSuccess=False)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/UINT8_Max").read()[:-1] == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensor_util tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
import tensorflow as tf
class FloatDTypeTest(tf.test.TestCase):
def test_assert_same_float_dtype(self):
self.assertIs(
tf.float32, tf.contrib.framework.assert_same_float_dtype(None, None))
self.assertIs(
tf.float32, tf.contrib.framework.assert_same_float_dtype([], None))
self.assertIs(
tf.float32,
tf.contrib.framework.assert_same_float_dtype([], tf.float32))
self.assertIs(
tf.float32,
tf.contrib.framework.assert_same_float_dtype(None, tf.float32))
self.assertIs(
tf.float32,
tf.contrib.framework.assert_same_float_dtype([None, None], None))
self.assertIs(
tf.float32,
tf.contrib.framework.assert_same_float_dtype([None, None], tf.float32))
const_float = tf.constant(3.0, dtype=tf.float32)
self.assertIs(
tf.float32,
tf.contrib.framework.assert_same_float_dtype([const_float], tf.float32))
self.assertRaises(
ValueError,
tf.contrib.framework.assert_same_float_dtype, [const_float], tf.int32)
sparse_float = tf.SparseTensor(
tf.constant([[111], [232]], tf.int64),
tf.constant([23.4, -43.2], tf.float32),
tf.constant([500], tf.int64))
self.assertIs(tf.float32, tf.contrib.framework.assert_same_float_dtype(
[sparse_float], tf.float32))
self.assertRaises(
ValueError,
tf.contrib.framework.assert_same_float_dtype, [sparse_float], tf.int32)
self.assertRaises(
ValueError, tf.contrib.framework.assert_same_float_dtype,
[const_float, None, sparse_float], tf.float64)
self.assertIs(
tf.float32,
tf.contrib.framework.assert_same_float_dtype(
[const_float, sparse_float]))
self.assertIs(tf.float32, tf.contrib.framework.assert_same_float_dtype(
[const_float, sparse_float], tf.float32))
const_int = tf.constant(3, dtype=tf.int32)
self.assertRaises(ValueError, tf.contrib.framework.assert_same_float_dtype,
[sparse_float, const_int])
self.assertRaises(ValueError, tf.contrib.framework.assert_same_float_dtype,
[sparse_float, const_int], tf.int32)
self.assertRaises(ValueError, tf.contrib.framework.assert_same_float_dtype,
[sparse_float, const_int], tf.float32)
self.assertRaises(
ValueError, tf.contrib.framework.assert_same_float_dtype, [const_int])
class AssertScalarTest(tf.test.TestCase):
def test_assert_scalar(self):
tf.contrib.framework.assert_scalar(tf.constant(3))
tf.contrib.framework.assert_scalar(tf.constant("foo"))
tf.contrib.framework.assert_scalar(3)
tf.contrib.framework.assert_scalar("foo")
with self.assertRaisesRegexp(ValueError, "Unexpected shape"):
tf.contrib.framework.assert_scalar(tf.constant([3, 4]))
def test_assert_scalar_int(self):
tf.contrib.framework.assert_scalar_int(tf.constant(3, dtype=tf.int32))
tf.contrib.framework.assert_scalar_int(tf.constant(3, dtype=tf.int64))
tf.contrib.framework.assert_scalar_int(3)
with self.assertRaisesRegexp(ValueError, "Unexpected type"):
tf.contrib.framework.assert_scalar_int(tf.constant(3, dtype=tf.float32))
with self.assertRaisesRegexp(ValueError, "Unexpected shape"):
tf.contrib.framework.assert_scalar_int(
tf.constant([3, 4], dtype=tf.int32))
class LocalVariabletest(tf.test.TestCase):
def test_local_variable(self):
with self.test_session() as sess:
self.assertEquals([], tf.local_variables())
value0 = 42
tf.contrib.framework.local_variable(value0)
value1 = 43
tf.contrib.framework.local_variable(value1)
variables = tf.local_variables()
self.assertEquals(2, len(variables))
self.assertRaises(tf.OpError, sess.run, variables)
tf.initialize_variables(variables).run()
self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
class ReduceSumNTest(tf.test.TestCase):
def test_reduce_sum_n(self):
with self.test_session():
a = tf.constant(1)
b = tf.constant([2])
c = tf.constant([[3, 4], [5, 6]])
self.assertEqual(21, tf.contrib.framework.reduce_sum_n([a, b, c]).eval())
class WithShapeTest(tf.test.TestCase):
def _assert_with_shape(
self, tensor, expected_value, expected_shape, unexpected_shapes):
for unexpected_shape in unexpected_shapes:
self.assertRaises(
ValueError, tf.contrib.framework.with_shape, unexpected_shape, tensor)
pattern = (
r"\[Wrong shape for %s \[expected\] \[actual\].\] \[%s\] \[%s\]" %
(tensor.name,
" ".join([str(dim) for dim in unexpected_shape]),
" ".join([str(dim) for dim in expected_shape])))
self.assertRaisesRegexp(
tf.OpError,
re.compile(pattern),
tf.contrib.framework.with_shape(
tf.constant(unexpected_shape), tensor).eval)
expected_placeholder = tf.placeholder(tf.float32)
self.assertRaisesRegexp(
tf.OpError,
re.compile(pattern),
tf.contrib.framework.with_same_shape(
expected_placeholder, tensor).eval, {
expected_placeholder: np.ones(unexpected_shape)
})
self.assertIs(tensor, tf.contrib.framework.with_shape(
expected_shape, tensor))
self.assertIs(tensor, tf.contrib.framework.with_same_shape(
tf.constant(1, shape=expected_shape), tensor))
tensor_with_shape = tf.contrib.framework.with_shape(
tf.constant(expected_shape), tensor)
np.testing.assert_array_equal(expected_value, tensor_with_shape.eval())
tensor_with_same_shape = tf.contrib.framework.with_same_shape(
expected_placeholder, tensor)
np.testing.assert_array_equal(expected_value, tensor_with_same_shape.eval({
expected_placeholder: np.ones(expected_shape)
}))
def test_with_shape_invalid_expected_shape(self):
with self.test_session():
self.assertRaisesRegexp(
ValueError, "Invalid rank", tf.contrib.framework.with_shape,
[[1], [2]], tf.constant(1.0))
def test_with_shape_invalid_type(self):
with self.test_session():
self.assertRaisesRegexp(
ValueError, "Invalid dtype", tf.contrib.framework.with_shape,
[1.1], tf.constant([1.0]))
self.assertRaisesRegexp(
ValueError, "Invalid dtype", tf.contrib.framework.with_shape,
np.array([1.1]), tf.constant(1.0))
self.assertRaisesRegexp(
ValueError, "Invalid dtype", tf.contrib.framework.with_shape,
tf.constant(np.array([1.1])), tf.constant(1.0))
def test_with_shape_0(self):
with self.test_session():
value = 42
shape = [0]
unexpected_shapes = [[1], [2], [1, 1]]
self._assert_with_shape(
tf.constant(value, shape=shape), value, shape, unexpected_shapes)
def test_with_shape_1(self):
with self.test_session():
value = [42]
shape = [1]
unexpected_shapes = [[0], [2], [1, 1]]
self._assert_with_shape(
tf.constant(value, shape=shape), value, shape, unexpected_shapes)
def test_with_shape_2(self):
with self.test_session():
value = [42, 43]
shape = [2]
unexpected_shapes = [[0], [1], [2, 1]]
self._assert_with_shape(
tf.constant(value, shape=shape), value, shape, unexpected_shapes)
def test_with_shape_2x2(self):
with self.test_session():
value = [[42, 43], [44, 45]]
shape = [2, 2]
unexpected_shapes = [[0], [1], [2, 1]]
self._assert_with_shape(
tf.constant(value, shape=shape), value, shape, unexpected_shapes)
def test_with_shape_none(self):
with self.test_session():
tensor_no_shape = tf.placeholder(tf.float32)
compatible_shape = [2, 2]
with_present_2x2 = tf.contrib.framework.with_shape(
compatible_shape, tensor_no_shape)
self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims)
with_future_2x2 = tf.contrib.framework.with_shape(
tf.constant(compatible_shape), tensor_no_shape)
array_2x2 = [[42.0, 43.0], [44.0, 45.0]]
for tensor_2x2 in [with_present_2x2, with_future_2x2]:
np.testing.assert_array_equal(
array_2x2, tensor_2x2.eval({tensor_no_shape: array_2x2}))
self.assertRaisesRegexp(
tf.OpError, "Wrong shape", tensor_2x2.eval,
{tensor_no_shape: [42.0, 43.0]})
self.assertRaisesRegexp(
tf.OpError, "Wrong shape", tensor_2x2.eval,
{tensor_no_shape: [42.0]})
def test_with_shape_partial(self):
with self.test_session():
tensor_partial_shape = tf.placeholder(tf.float32)
tensor_partial_shape.set_shape([None, 2])
for incompatible_shape in [[0], [1]]:
self.assertRaisesRegexp(
ValueError, r"Shapes \(\?, 2\) and \([01],\) are not compatible",
tf.contrib.framework.with_shape,
incompatible_shape, tensor_partial_shape)
for incompatible_shape in [[1, 2, 1]]:
self.assertRaisesRegexp(
ValueError, "Dimensions must be equal",
tf.contrib.framework.with_shape,
incompatible_shape, tensor_partial_shape)
for incompatible_shape in [[2, 1]]:
self.assertRaisesRegexp(
ValueError, r"Shapes \(\?, 2\) and \(2, 1\) are not compatible",
tf.contrib.framework.with_shape,
incompatible_shape, tensor_partial_shape)
compatible_shape = [2, 2]
with_present_2x2 = tf.contrib.framework.with_shape(
compatible_shape, tensor_partial_shape)
self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims)
with_future_2x2 = tf.contrib.framework.with_shape(
tf.constant(compatible_shape), tensor_partial_shape)
array_2x2 = [[42.0, 43.0], [44.0, 45.0]]
for tensor_2x2 in [with_present_2x2, with_future_2x2]:
np.testing.assert_array_equal(
array_2x2, tensor_2x2.eval({tensor_partial_shape: array_2x2}))
self.assertRaises(
ValueError, tensor_2x2.eval, {tensor_partial_shape: [42.0, 43.0]})
self.assertRaises(
ValueError, tensor_2x2.eval, {tensor_partial_shape: [42.0]})
class RemoveSqueezableDimensionsTest(tf.test.TestCase):
def testRemoveSqueezableDimensions(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False, predictions_have_extra_dim=False,
labels_have_static_shape=False, labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False, predictions_have_extra_dim=False,
labels_have_static_shape=False, labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_staticLabel(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False, predictions_have_extra_dim=False,
labels_have_static_shape=True, labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticLabel_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False, predictions_have_extra_dim=False,
labels_have_static_shape=True, labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False, predictions_have_extra_dim=True,
labels_have_static_shape=False, labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_extraPredictionDim_staticLabel(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False, predictions_have_extra_dim=True,
labels_have_static_shape=True, labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticPrediction(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True, predictions_have_extra_dim=False,
labels_have_static_shape=False, labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticPrediction_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True, predictions_have_extra_dim=False,
labels_have_static_shape=False, labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_static(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True, predictions_have_extra_dim=False,
labels_have_static_shape=True, labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_static_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True, predictions_have_extra_dim=False,
labels_have_static_shape=True, labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_staticPrediction_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True, predictions_have_extra_dim=True,
labels_have_static_shape=False, labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_static_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True, predictions_have_extra_dim=True,
labels_have_static_shape=True, labels_have_extra_dim=False)
# TODO(ptucker): Replace this with parameterized test.
def _testRemoveSqueezableDimensions(
self,
predictions_have_static_shape,
predictions_have_extra_dim,
labels_have_static_shape,
labels_have_extra_dim):
assert not (predictions_have_extra_dim and labels_have_extra_dim)
predictions_value = (0, 1, 1, 0, 0, 1, 0)
labels_value = (0, 0, 1, 1, 0, 0, 0)
input_predictions_value = (
[[p] for p in predictions_value] if predictions_have_extra_dim else
predictions_value)
input_labels_value = (
[[l] for l in labels_value] if labels_have_extra_dim else labels_value)
with tf.Graph().as_default() as g:
feed_dict = {}
if predictions_have_static_shape:
predictions = tf.constant(input_predictions_value, dtype=tf.int32)
else:
predictions = tf.placeholder(dtype=tf.int32, name="predictions")
feed_dict[predictions] = input_predictions_value
if labels_have_static_shape:
labels = tf.constant(input_labels_value, dtype=tf.int32)
else:
labels = tf.placeholder(dtype=tf.int32, name="labels")
feed_dict[labels] = input_labels_value
squeezed_predictions, squeezed_labels = (
tf.contrib.framework.remove_squeezable_dimensions(
predictions, labels))
with self.test_session(g):
tf.local_variables_initializer().run()
self.assertAllClose(
predictions_value, squeezed_predictions.eval(feed_dict=feed_dict))
self.assertAllClose(
labels_value, squeezed_labels.eval(feed_dict=feed_dict))
if __name__ == "__main__":
tf.test.main()
|
|
# Author: Alexander Fabisch -- <[email protected]>
# Author: Christopher Moody <[email protected]>
# Author: Nick Travers <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
from time import time
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix
from ..neighbors import NearestNeighbors
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..decomposition import PCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
from . import _barnes_hut_tsne
from ..externals.six import string_types
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = distances.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples, k)
Distances of samples to its k nearest neighbors.
neighbors : array, shape (n_samples, k)
Indices of the k nearest-neighbors for each samples.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : csr sparse matrix, shape (n_samples, n_samples)
Condensed joint probability matrix with only nearest neighbors.
"""
t0 = time()
# Compute conditional probabilities such that they approximately match
# the desired perplexity
n_samples, k = neighbors.shape
distances = distances.astype(np.float32, copy=False)
neighbors = neighbors.astype(np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
assert np.all(np.isfinite(conditional_P)), \
"All probabilities should be finite"
# Symmetrize the joint probability distribution using sparse operations
P = csr_matrix((conditional_P.ravel(), neighbors.ravel(),
range(0, n_samples * k + 1, k)),
shape=(n_samples, n_samples))
P = P + P.T
# Normalize the joint probability distribution
sum_P = np.maximum(P.sum(), MACHINE_EPSILON)
P /= sum_P
assert np.all(np.abs(P.data) <= 1.0)
if verbose >= 2:
duration = time() - t0
print("[t-SNE] Computed conditional probabilities in {:.3f}s"
.format(duration))
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
dist = pdist(X_embedded, "sqeuclidean")
dist /= degrees_of_freedom
dist += 1.
dist **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))
# Gradient: dC/dY
# pdist always returns double precision distances. Thus we need to take
grad = np.ndarray((n_samples, n_components), dtype=params.dtype)
PQd = squareform((P - Q) * dist)
for i in range(skip_num_points, n_samples):
grad[i] = np.dot(np.ravel(PQd[i], order='K'),
X_embedded[i] - X_embedded)
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_bh(params, P, degrees_of_freedom, n_samples, n_components,
angle=0.5, skip_num_points=0, verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : csr sparse matrix, shape (n_samples, n_sample)
Sparse approximate joint probability matrix, computed only for the
k nearest-neighbors and symmetrized.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = params.astype(np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
val_P = P.data.astype(np.float32, copy=False)
neighbors = P.indices.astype(np.int64, copy=False)
indptr = P.indptr.astype(np.int64, copy=False)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(val_P, X_embedded, neighbors, indptr,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter,
n_iter_check=1, n_iter_without_progress=300,
momentum=0.8, learning_rate=200.0, min_gain=0.01,
min_grad_norm=1e-7, verbose=0, args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.8)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = i = it
tic = time()
for i in range(it, n_iter):
error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
toc = time()
duration = toc - tic
tic = toc
if verbose >= 2:
print("[t-SNE] Iteration %d: error = %.7f,"
" gradient norm = %.7f"
" (%s iterations in %0.3fs)"
% (i + 1, error, grad_norm, n_iter_check, duration))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in \mathcal{N}_{i}^{k}} \max(0, (r(i, j) - k))
where for each sample i, :math:`\mathcal{N}_{i}^{k}` are its k nearest
neighbors in the output space, and every sample j is its :math:`r(i, j)`-th
nearest neighbor in the input space. In other words, any unexpected nearest
neighbors in the output space are penalised in proportion to their rank in
the input space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = NearestNeighbors(n_neighbors).fit(X_embedded).kneighbors(
return_distance=False)
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 12.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 250.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be stopped.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string or numpy array, optional (default: "random")
Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, n_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default: None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Note that different initializations might result in
different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> X_embedded = TSNE(n_components=2).fit_transform(X)
>>> X_embedded.shape
(4, 2)
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
# Control the number of exploration iterations with early_exaggeration on
_EXPLORATION_N_ITER = 250
# Control the number of iterations between progress checks
_N_ITER_CHECK = 50
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=12.0, learning_rate=200.0, n_iter=1000,
n_iter_without_progress=300, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.metric == "precomputed":
if isinstance(self.init, string_types) and self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
if np.any(X < 0):
raise ValueError("All distances should be positive, the "
"precomputed distances given as X is not "
"correct")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
if self.method == 'barnes_hut':
X = check_array(X, ensure_min_samples=2,
dtype=[np.float32, np.float64])
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float32, np.float64])
if self.method == 'barnes_hut' and self.n_components > 3:
raise ValueError("'n_components' should be inferior to 4 for the "
"barnes_hut algorithm as it relies on "
"quad-tree or oct-tree.")
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is {}"
.format(self.early_exaggeration))
if self.n_iter < 250:
raise ValueError("n_iter should be at least 250")
n_samples = X.shape[0]
neighbors_nn = None
if self.method == "exact":
# Retrieve the distance matrix, either using the precomputed one or
# computing it.
if self.metric == "precomputed":
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if np.any(distances < 0):
raise ValueError("All distances should be positive, the "
"metric given is not correct")
# compute the joint probability distribution for the input space
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be non-negative"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
else:
# Cpmpute the number of nearest neighbors to find.
# LvdM uses 3 * perplexity as the number of neighbors.
# In the event that we have very small # of points
# set the neighbors to n - 1.
k = min(n_samples - 1, int(3. * self.perplexity + 1))
if self.verbose:
print("[t-SNE] Computing {} nearest neighbors...".format(k))
# Find the nearest neighbors for every point
knn = NearestNeighbors(algorithm='auto', n_neighbors=k,
metric=self.metric)
t0 = time()
knn.fit(X)
duration = time() - t0
if self.verbose:
print("[t-SNE] Indexed {} samples in {:.3f}s...".format(
n_samples, duration))
t0 = time()
distances_nn, neighbors_nn = knn.kneighbors(
None, n_neighbors=k)
duration = time() - t0
if self.verbose:
print("[t-SNE] Computed neighbors for {} samples in {:.3f}s..."
.format(n_samples, duration))
# Free the memory used by the ball_tree
del knn
if self.metric == "euclidean":
# knn return the euclidean distance but we need it squared
# to be consistent with the 'exact' method. Note that the
# the method was derived using the euclidean method as in the
# input space. Not sure of the implication of using a different
# metric.
distances_nn **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities_nn(distances_nn, neighbors_nn,
self.perplexity, self.verbose)
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'pca':
pca = PCA(n_components=self.n_components, svd_solver='randomized',
random_state=random_state)
X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
elif self.init == 'random':
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
X_embedded = 1e-4 * random_state.randn(
n_samples, self.n_components).astype(np.float32)
else:
raise ValueError("'init' must be 'pca', 'random', or "
"a numpy array")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
return self._tsne(P, degrees_of_freedom, n_samples,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
@property
@deprecated("Attribute n_iter_final was deprecated in version 0.19 and "
"will be removed in 0.21. Use ``n_iter_`` instead")
def n_iter_final(self):
return self.n_iter_
def _tsne(self, P, degrees_of_freedom, n_samples, X_embedded,
neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {
"it": 0,
"n_iter_check": self._N_ITER_CHECK,
"min_grad_norm": self.min_grad_norm,
"learning_rate": self.learning_rate,
"verbose": self.verbose,
"kwargs": dict(skip_num_points=skip_num_points),
"args": [P, degrees_of_freedom, n_samples, self.n_components],
"n_iter_without_progress": self._EXPLORATION_N_ITER,
"n_iter": self._EXPLORATION_N_ITER,
"momentum": 0.5,
}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exageration parameter
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
y : Ignored
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
y : Ignored
"""
self.fit_transform(X)
return self
|
|
"""
''' File: grey_harvest.py
''' Author: Gabriel "s0lst1c3" Ryan
''' Created: Tue May 26 2015
''' Source: https://github.com/s0lst1c3/grey_harvest
''' License: MIT (see attached)
''' Description: Scrapes the web for reliable http or https proxies and prints
''' them to stdout. Can also be used as a python library to
''' easily generate reliable proxies for use within Python
''' application (see README.md).
"""
__version__ = '0.1.6'
__author__ = 'Gabriel "s0lst1c3" Ryan'
__license__ = 'MIT'
__copyright__ = 'Copyright (c) 2015'
import requests
import socket
import sys
import argparse
from time import sleep
from bs4 import BeautifulSoup
from lxml import etree
''' configs '''
DOC_ROOT = 'http://freeproxylists.com'
ELITE_PAGE = 'elite.html'
HTTPS_ONLY = True
ALLOWED_COUNTRIES = None
DENIED_COUNTRIES = ['China']
MAX_TIMEOUT = 1
TEST_SLEEPTIME = 1
TEST_DOMAIN = 'example.com'
class Proxy(dict):
def __init__(self, ip, port, country=None,
latency=None, https=False, last_checked=None):
dict.__init__(self)
self.ip = ip
self.port = int(port)
self.country = country
self.latency = int(latency)
self.https = https
self['ip'] = ip
self['port'] = port
self['country'] = country
self['latency'] = latency
self['https'] = https
def test(self,
test_domain=TEST_DOMAIN,
test_sleeptime=TEST_SLEEPTIME,
max_timeout=MAX_TIMEOUT):
''' get ready for test '''
protocol = 'https' if self['https'] else 'http'
test_url = '%s://%s' % (protocol, test_domain)
proxies = {
'https://%s' : str(self),
'http://%s' : str(self),
}
''' make a brief HEAD request to test_domain and see if it times out '''
requests.head(test_url, timeout=max_timeout, proxies=proxies)
try:
response = requests.head(test_url, timeout=max_timeout, proxies=proxies)
if test_sleeptime > 0:
sleep(test_sleeptime)
return True
except requests.exceptions.ConnectionError:
if test_sleeptime > 0:
sleep(test_sleeptime)
return False
def __str__(self):
return '%s:%s' % (self.ip, self.port)
class GreyHarvester(object):
def __init__(self,
test_domain=TEST_DOMAIN,
test_sleeptime=TEST_SLEEPTIME,
https_only=HTTPS_ONLY,
allowed_countries=ALLOWED_COUNTRIES,
denied_countries=DENIED_COUNTRIES,
ports=None,
max_timeout=MAX_TIMEOUT):
self.allowed_countries = allowed_countries
self.denied_countries = denied_countries
self.max_timeout = max_timeout
self.test_sleeptime = test_sleeptime
self.test_domain = test_domain
self.https_only = https_only
self.all_ports = ports is None
self.ports = ports
def run(self):
for endpoint in self._extract_ajax_endpoints():
for proxy in self._extract_proxies(endpoint):
if self._passes_filter(proxy) and proxy.test(
test_domain=self.test_domain,
test_sleeptime=self.test_sleeptime,
max_timeout = self.max_timeout,
) == True: yield proxy
def _extract_proxies(self, ajax_endpoint):
''' request the xml object '''
proxy_xml = requests.get(ajax_endpoint)
print(proxy_xml.content)
root = etree.XML(proxy_xml.content)
quote = root.xpath('quote')[0]
''' extract the raw text from the body of the quote tag '''
raw_text = quote.text
''' eliminate the stuff we don't need '''
proxy_data = raw_text.split('You will definitely love it! Give it a try!</td></tr>')[1]
''' get rid of the </table> at the end of proxy_data '''
proxy_data = proxy_data[:-len('</table>')]
''' split proxy_data into rows '''
table_rows = proxy_data.split('<tr>')
''' convert each row into a Proxy object '''
for row in table_rows:
''' get rid of the </tr> at the end of each row '''
row = row[:-len('</tr>')]
''' split each row into a list of items '''
items = row.split('<td>')
''' sometimes we get weird lists containing only an empty string '''
if len(items) != 7:
continue
''' we'll use this to remove the </td> from the end of each item '''
tdlen = len('</td>')
''' create proxy dict '''
proxy = Proxy(
ip=items[1][:-tdlen],
port=int(items[2][:-tdlen]),
https=bool(items[3][:-tdlen]),
latency=int(items[4][:-tdlen]),
last_checked=items[5][:-tdlen],
country=items[6][:-tdlen],
)
yield proxy
def _passes_filter(self, proxy):
''' avoid redudant and space consuming calls to 'self' '''
''' validate proxy based on provided filters '''
if self.allowed_countries is not None and proxy['country'] not in self.allowed_countries:
return False
if self.denied_countries is not None and proxy['country'] in self.denied_countries:
return False
if self.https_only and proxy['https'] == False:
return False
if not self.all_ports and str(proxy.port) not in self.ports:
return False
return True
def _extract_ajax_endpoints(self):
''' make a GET request to freeproxylists.com/elite.html '''
url = '/'.join([DOC_ROOT, ELITE_PAGE])
response = requests.get(url)
''' extract the raw HTML doc from the response '''
raw_html = response.text
''' convert raw html into BeautifulSoup object '''
soup = BeautifulSoup(raw_html, 'lxml')
for url in soup.select('table tr td table tr td a'):
if 'elite #' in url.text:
yield '%s/load_elite_d%s' % (DOC_ROOT, url['href'].lstrip('elite/'))
def setup(parser):
parser.add_argument('-a', '--allowed-countries',
dest='allowed_countries',
nargs='*',
metavar='<country>',
required=False,
default=ALLOWED_COUNTRIES,
help='''Only use proxies physically located in the specified countries.'''
)
parser.add_argument('-p', '--ports',
dest='ports',
nargs='*',
metavar='<port>',
required=False,
help='''Only use proxies running on the specified ports.'''
)
parser.add_argument('-d', '--denied-countries',
dest='denied_countries',
nargs='*',
metavar='<country_1>',
default=DENIED_COUNTRIES,
required=False,
help='Do not use proxies physically located these countries. This flag takes precedence over --allowed-countries.'''
)
parser.add_argument('-t', '--max-timeout',
dest='max_timeout',
nargs=1,
type=int,
metavar='<N>',
default=MAX_TIMEOUT,
required=False,
help='Discard proxies that do not respond within <N> seconds of HEAD request.'
)
parser.add_argument('-H', '--https-only',
action='store_true',
dest='https_only',
default=HTTPS_ONLY,
help='Only keep proxies with https support.',
)
parser.add_argument('-D', '--test-domain',
dest='test_domain',
type=str,
metavar='<test_domain>',
default=TEST_DOMAIN,
required=False,
help='Test proxies by making HEAD request to <test domain>',
)
parser.add_argument('-n', '--num-proxies',
dest='num_proxies',
nargs=1,
type=int,
metavar='<N>',
required=True,
help='Harvest <N> working and free proxies from teh interwebz',
)
args = parser.parse_args()
if args.ports:
ALL_PORTS = False
return {
'num_proxies' : args.num_proxies[0],
'test_domain' : args.test_domain,
'https_only' : args.https_only,
'max_timeout' : args.max_timeout,
'allowed_countries' : args.allowed_countries,
'denied_countries' : args.denied_countries,
'ports' : args.ports,
}
def main():
''' set things up '''
configs = setup(argparse.ArgumentParser())
harvester = GreyHarvester(
test_domain=configs['test_domain'],
test_sleeptime=TEST_SLEEPTIME,
https_only=configs['https_only'],
allowed_countries=configs['allowed_countries'],
denied_countries=configs['denied_countries'],
ports=configs['ports'],
max_timeout=configs['max_timeout']
)
''' harvest free and working proxies from teh interwebz '''
count = 0
for proxy in harvester.run():
if count >= configs['num_proxies']:
break
print(proxy)
count += 1
if __name__ == '__main__':
main()
|
|
################################################################################
# Copyright (C) 2011-2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
import numpy as np
from bayespy.utils import misc
from .node import Node
from .deterministic import Deterministic
from .gaussian import Gaussian, GaussianMoments
from .gaussian import GaussianGammaMoments
class SumMultiply(Deterministic):
r"""
Node for computing general products and sums of Gaussian nodes.
The node is similar to `numpy.einsum`, which is a very general
function for computing dot products, sums, products and other sums
of products of arrays.
For instance, consider the following arrays:
>>> import numpy as np
>>> X = np.random.randn(2, 3, 4)
>>> Y = np.random.randn(3, 5)
>>> Z = np.random.randn(4, 2)
Then, the Einstein summation can be used as:
>>> np.einsum('abc,bd,ca->da', X, Y, Z)
array([[...]])
SumMultiply node can be used similarly for Gaussian nodes. For instance,
consider the following Gaussian nodes:
>>> from bayespy.nodes import GaussianARD
>>> X = GaussianARD(0, 1, shape=(2, 3, 4))
>>> Y = GaussianARD(0, 1, shape=(3, 5))
>>> Z = GaussianARD(0, 1, shape=(4, 2))
Then, similarly to `numpy.einsum`, SumMultiply could be used as:
>>> from bayespy.nodes import SumMultiply
>>> SumMultiply('abc,bd,ca->da', X, Y, Z)
<bayespy.inference.vmp.nodes.dot.SumMultiply object at 0x...>
or
>>> SumMultiply(X, [0,1,2], Y, [1,3], Z, [2,0], [3,0])
<bayespy.inference.vmp.nodes.dot.SumMultiply object at 0x...>
which is similar to the alternative syntax of numpy.einsum.
This node operates similarly as numpy.einsum. However, you must use all the
elements of each node, that is, an operation like np.einsum('ii->i',X) is
not allowed. Thus, for each node, each axis must be given unique id. The id
identifies which axes correspond to which axes between the different
nodes. Also, Ellipsis ('...') is not yet supported for simplicity. It would
also have some problems with constant inputs (because how to determine
ndim), so let us just forget it for now.
Each output axis must appear in the input mappings.
The keys must refer to variable dimension axes only, not plate axes.
The input nodes may be Gaussian-gamma (isotropic) nodes.
The output message is Gaussian-gamma (isotropic) if any of the input nodes
is Gaussian-gamma.
Examples
--------
Sum over the rows:
'ij->j'
Inner product of three vectors:
'i,i,i'
Matrix-vector product:
'ij,j->i'
Matrix-matrix product:
'ik,kj->ij'
Outer product:
'i,j->ij'
Vector-matrix-vector product:
'i,ij,j'
Notes
-----
This operation can be extremely slow if not used wisely. For large and
complex operations, it is sometimes more efficient to split the operation
into multiple nodes. For instance, the example above could probably be
computed faster by
>>> XZ = SumMultiply(X, [0,1,2], Z, [2,0], [0,1])
>>> F = SumMultiply(XZ, [0,1], Y, [1,2], [2,0])
because the third axis ('c') could be summed out already in the first
operation. This same effect applies also to numpy.einsum in general.
"""
def __init__(self, *args, iterator_axis=None, **kwargs):
"""
SumMultiply(Node1, map1, Node2, map2, ..., NodeN, mapN [, map_out])
"""
args = list(args)
if len(args) < 2:
raise ValueError("Not enough inputs")
if iterator_axis is not None:
raise NotImplementedError("Iterator axis not implemented yet")
if iterator_axis is not None and not isinstance(iterator_axis, int):
raise ValueError("Iterator axis must be integer")
# Two different parsing methods, depends on how the arguments are given
if misc.is_string(args[0]):
# This is the format:
# SumMultiply('ik,k,kj->ij', X, Y, Z)
strings = args[0]
nodes = args[1:]
# Remove whitespace
strings = misc.remove_whitespace(strings)
# Split on '->' (should contain only one '->' or none)
strings = strings.split('->')
if len(strings) > 2:
raise ValueError('The string contains too many ->')
strings_in = strings[0]
if len(strings) == 2:
string_out = strings[1]
else:
string_out = ''
# Split former part on ',' (the number of parts should be equal to
# nodes)
strings_in = strings_in.split(',')
if len(strings_in) != len(nodes):
raise ValueError('Number of given input nodes is different '
'from the input keys in the string')
# Split strings into key lists using single character keys
keysets = [list(string_in) for string_in in strings_in]
keys_out = list(string_out)
else:
# This is the format:
# SumMultiply(X, [0,2], Y, [2], Z, [2,1], [0,1])
# If given, the output mapping is the last argument
if len(args) % 2 == 0:
keys_out = []
else:
keys_out = args.pop(-1)
# Node and axis mapping are given in turns
nodes = args[::2]
keysets = args[1::2]
# Find all the keys (store only once each)
full_keyset = []
for keyset in keysets:
full_keyset += keyset
#full_keyset += list(keyset.keys())
full_keyset = list(set(full_keyset))
# Input and output messages are Gaussian unless there is at least one
# Gaussian-gamma message from the parents
self.gaussian_gamma = False
for i in range(len(nodes)):
try:
nodes[i] = self._ensure_moments(
nodes[i],
GaussianMoments,
ndim=len(keysets[i])
)
except GaussianMoments.NoConverterError:
self.gaussian_gamma = True
if self.gaussian_gamma:
nodes = [
self._ensure_moments(
node,
GaussianGammaMoments,
ndim=len(keyset)
)
for (node, keyset) in zip(nodes, keysets)
]
self._parent_moments = tuple(node._moments for node in nodes)
#
# Check the validity of each node
#
for n in range(len(nodes)):
# Check that the maps and the size of the variable are consistent
if len(nodes[n].dims[0]) != len(keysets[n]):
raise ValueError("Wrong number of keys (%d) for the node "
"number %d with %d dimensions"
% (len(keysets[n]),
n,
len(nodes[n].dims[0])))
# Check that the keys are unique
if len(set(keysets[n])) != len(keysets[n]):
raise ValueError("Axis keys for node number %d are not unique"
% n)
# Check the validity of output keys: each output key must be included in
# the input keys
if len(keys_out) != len(set(keys_out)):
raise ValueError("Output keys are not unique")
for key in keys_out:
if key not in full_keyset:
raise ValueError("Output key %s does not appear in any input"
% key)
# Check the validity of the nodes with respect to the key mapping.
# Check that the node dimensions map and broadcast properly, that is,
# all the nodes using the same key for axes must have equal size for
# those axes (or size 1).
broadcasted_size = {}
for key in full_keyset:
broadcasted_size[key] = 1
for (node, keyset) in zip(nodes, keysets):
try:
# Find the axis for the key
index = keyset.index(key)
except ValueError:
# OK, this node doesn't use this key for any axis
pass
else:
# Length of the axis for that key
node_size = node.dims[0][index]
if node_size != broadcasted_size[key]:
if broadcasted_size[key] == 1:
# Apply broadcasting
broadcasted_size[key] = node_size
elif node_size != 1:
# Different sizes and neither has size 1
raise ValueError("Axes using key %s do not "
"broadcast properly"
% key)
# Compute the shape of the output
shape = tuple([broadcasted_size[key] for key in keys_out])
if self.gaussian_gamma:
self._moments = GaussianGammaMoments(shape)
else:
self._moments = GaussianMoments(shape)
# Rename the keys to [0,1,...,N-1] where N is the total number of keys
self.N_keys = len(full_keyset)
self.out_keys = [full_keyset.index(key) for key in keys_out]
self.in_keys = [ [full_keyset.index(key) for key in keyset]
for keyset in keysets ]
super().__init__(*nodes,
dims=self._moments.dims,
**kwargs)
def _compute_function(self, *x_parents):
# TODO: Add unit tests for this function
(xs, alphas) = (
(x_parents, 1) if not self.gaussian_gamma else
zip(*x_parents)
)
# Add Ellipsis for the plates
in_keys = [[Ellipsis] + k for k in self.in_keys]
out_keys = [Ellipsis] + self.out_keys
samples_and_keys = misc.zipper_merge(xs, in_keys)
y = np.einsum(*(samples_and_keys + [out_keys]))
return (
y if not self.gaussian_gamma else
(y, misc.multiply(*alphas))
)
def _compute_moments(self, *u_parents):
# Compute the number of plate axes for each node
plate_counts0 = [(np.ndim(u_parent[0]) - len(keys))
for (keys,u_parent) in zip(self.in_keys, u_parents)]
plate_counts1 = [(np.ndim(u_parent[1]) - 2*len(keys))
for (keys,u_parent) in zip(self.in_keys, u_parents)]
# The number of plate axes for the output
N0 = max(plate_counts0)
N1 = max(plate_counts1)
# The total number of unique keys used (keys are 0,1,...,N_keys-1)
D = self.N_keys
#
# Compute the mean
#
out_all_keys = list(range(D+N0-1, D-1, -1)) + self.out_keys
#nodes_dim_keys = self.nodes_dim_keys
in_all_keys = [list(range(D+plate_count-1, D-1, -1)) + keys
for (plate_count, keys) in zip(plate_counts0,
self.in_keys)]
u0 = [u[0] for u in u_parents]
args = misc.zipper_merge(u0, in_all_keys) + [out_all_keys]
x0 = np.einsum(*args)
#
# Compute the covariance
#
out_all_keys = (list(range(2*D+N1-1, 2*D-1, -1))
+ [D+key for key in self.out_keys]
+ self.out_keys)
in_all_keys = [list(range(2*D+plate_count-1, 2*D-1, -1))
+ [D+key for key in node_keys]
+ node_keys
for (plate_count, node_keys) in zip(plate_counts1,
self.in_keys)]
u1 = [u[1] for u in u_parents]
args = misc.zipper_merge(u1, in_all_keys) + [out_all_keys]
x1 = np.einsum(*args)
if not self.gaussian_gamma:
return [x0, x1]
# Compute Gaussian-gamma specific moments
x2 = 1
x3 = 0
for i in range(len(u_parents)):
x2 = x2 * u_parents[i][2]
x3 = x3 + u_parents[i][3]
return [x0, x1, x2, x3]
def get_parameters(self):
# Compute mean and variance
u = self.get_moments()
u[1] -= u[0]**2
return u
def _message_to_parent(self, index, u_parent=None):
"""
Compute the message and mask to a parent node.
"""
# Check index
if index >= len(self.parents):
raise ValueError("Parent index larger than the number of parents")
# Get messages from other parents and children
u_parents = self._message_from_parents(exclude=index)
m = self._message_from_children()
mask = self.mask
# Normally we don't need to care about masks when computing the
# message. However, in this node we want to avoid computing huge message
# arrays so we sum some axes already here. Thus, we need to apply the
# mask.
#
# Actually, we don't need to care about masks because the message from
# children has already been masked.
parent = self.parents[index]
#
# Compute the first message
#
msg = [None, None]
# Compute the two messages
for ind in range(2):
# The total number of keys for the non-plate dimensions
N = (ind+1) * self.N_keys
parent_num_dims = len(parent.dims[ind])
parent_num_plates = len(parent.plates)
parent_plate_keys = list(range(N + parent_num_plates,
N,
-1))
parent_dim_keys = self.in_keys[index]
if ind == 1:
parent_dim_keys = ([key + self.N_keys
for key in self.in_keys[index]]
+ parent_dim_keys)
args = []
# This variable counts the maximum number of plates of the
# arguments, thus it will tell the number of plates in the result
# (if the artificially added plates above were ignored).
result_num_plates = 0
result_plates = ()
# Mask and its keysr
mask_num_plates = np.ndim(mask)
mask_plates = np.shape(mask)
mask_plate_keys = list(range(N + mask_num_plates,
N,
-1))
result_num_plates = max(result_num_plates,
mask_num_plates)
result_plates = misc.broadcasted_shape(result_plates,
mask_plates)
# Moments and keys of other parents
for (k, u) in enumerate(u_parents):
if k != index:
num_dims = (ind+1) * len(self.in_keys[k])
num_plates = np.ndim(u[ind]) - num_dims
plates = np.shape(u[ind])[:num_plates]
plate_keys = list(range(N + num_plates,
N,
-1))
dim_keys = self.in_keys[k]
if ind == 1:
dim_keys = ([key + self.N_keys
for key in self.in_keys[k]]
+ dim_keys)
args.append(u[ind])
args.append(plate_keys + dim_keys)
result_num_plates = max(result_num_plates, num_plates)
result_plates = misc.broadcasted_shape(result_plates,
plates)
# Message and keys from children
child_num_dims = (ind+1) * len(self.out_keys)
child_num_plates = np.ndim(m[ind]) - child_num_dims
child_plates = np.shape(m[ind])[:child_num_plates]
child_plate_keys = list(range(N + child_num_plates,
N,
-1))
child_dim_keys = self.out_keys
if ind == 1:
child_dim_keys = ([key + self.N_keys
for key in self.out_keys]
+ child_dim_keys)
args.append(m[ind])
args.append(child_plate_keys + child_dim_keys)
result_num_plates = max(result_num_plates, child_num_plates)
result_plates = misc.broadcasted_shape(result_plates,
child_plates)
# Output keys, that is, the keys of the parent[index]
parent_keys = parent_plate_keys + parent_dim_keys
# Performance trick: Check which axes can be summed because they
# have length 1 or are non-existing in parent[index]. Thus, remove
# keys corresponding to unit length axes in parent[index] so that
# einsum sums over those axes. After computations, these axes must
# be added back in order to get the correct shape for the message.
# Also, remove axes/keys that are in output (parent[index]) but not in
# any inputs (children and other parents).
parent_shape = parent.get_shape(ind)
removed_axes = []
for j in range(len(parent_keys)):
if parent_shape[j] == 1:
# Remove the key (take into account the number of keys that
# have already been removed)
del parent_keys[j-len(removed_axes)]
removed_axes.append(j)
else:
# Remove the key if it doesn't appear in any of the
# messages from children or other parents.
if not np.any([parent_keys[j-len(removed_axes)] in keys
for keys in args[1::2]]):
del parent_keys[j-len(removed_axes)]
removed_axes.append(j)
args.append(parent_keys)
# THE BEEF: Compute the message
msg[ind] = np.einsum(*args)
# Find the correct shape for the message array
message_shape = list(np.shape(msg[ind]))
# First, add back the axes with length 1
for ax in removed_axes:
message_shape.insert(ax, 1)
# Second, remove leading axes for plates that were not present in
# the child nor other parents' messages. This is not really
# necessary, but it is just elegant to remove the leading unit
# length axes that we added artificially at the beginning just
# because we wanted the key mapping to be simple.
if parent_num_plates > result_num_plates:
del message_shape[:(parent_num_plates-result_num_plates)]
# Then, the actual reshaping
msg[ind] = np.reshape(msg[ind], message_shape)
# Broadcasting is not supported for variable dimensions, thus force
# explicit correct shape for variable dimensions
var_dims = parent.dims[ind]
msg[ind] = msg[ind] * np.ones(var_dims)
# Apply plate multiplier: If this node has non-unit plates that are
# unit plates in the parent, those plates are summed. However, if
# the message has unit axis for that plate, it should be first
# broadcasted to the plates of this node and then summed to the
# plates of the parent. In order to avoid this broadcasting and
# summing, it is more efficient to just multiply by the correct
# factor.
r = self.broadcasting_multiplier(self.plates,
result_plates,
parent.plates)
if r != 1:
msg[ind] *= r
if self.gaussian_gamma:
alphas = [u_parents[i][2]
for i in range(len(u_parents)) if i != index]
m2 = self._compute_message(m[2], mask, *alphas,
ndim=0,
plates_from=self.plates,
plates_to=parent.plates)
m3 = self._compute_message(m[3], mask,
ndim=0,
plates_from=self.plates,
plates_to=parent.plates)
msg = msg + [m2, m3]
return msg
def Dot(*args, **kwargs):
"""
Node for computing inner product of several Gaussian vectors.
This is a simple wrapper of the much more general SumMultiply. For now, it
is here for backward compatibility.
"""
einsum = 'i' + ',i'*(len(args)-1)
return SumMultiply(einsum, *args, **kwargs)
|
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Python module to download videos.
This module contains the actual downloaders responsible
for downloading the video files.
"""
from __future__ import unicode_literals
import re
import os
import sys
import locale
import signal
import subprocess
from time import sleep
from Queue import Queue
from threading import Thread
from .utils import convert_item
class PipeReader(Thread):
"""Helper class to avoid deadlocks when reading from subprocess pipes.
This class uses python threads and queues in order to read from subprocess
pipes in an asynchronous way.
Attributes:
WAIT_TIME (float): Time in seconds to sleep.
Args:
queue (Queue.Queue): Python queue to store the output of the subprocess.
Warnings:
All the operations are based on 'str' types. The caller has to convert
the queued items back to 'unicode' if he needs to.
"""
WAIT_TIME = 0.1
def __init__(self, queue):
super(PipeReader, self).__init__()
self._filedescriptor = None
self._running = True
self._queue = queue
self.start()
def run(self):
# Flag to ignore specific lines
ignore_line = False
while self._running:
if self._filedescriptor is not None:
for line in iter(self._filedescriptor.readline, str('')):
# Ignore ffmpeg stderr
if str('ffmpeg version') in line:
ignore_line = True
if not ignore_line:
self._queue.put_nowait(line)
self._filedescriptor = None
ignore_line = False
sleep(self.WAIT_TIME)
def attach_filedescriptor(self, filedesc):
"""Attach a filedescriptor to the PipeReader. """
self._filedescriptor = filedesc
def join(self, timeout=None):
self._running = False
super(PipeReader, self).join(timeout)
class YoutubeDLDownloader(object):
"""Python class for downloading videos using youtube-dl & subprocess.
Attributes:
OK, ERROR, STOPPED, ALREADY, FILESIZE_ABORT, WARNING (int): Integers
that describe the return code from the download() method. The
larger the number the higher is the hierarchy of the code.
Codes with smaller hierachy cannot overwrite codes with higher
hierarchy.
Args:
youtubedl_path (string): Absolute path to youtube-dl binary.
data_hook (function): Optional callback function to retrieve download
process data.
log_data (function): Optional callback function to write data to
the log file.
Warnings:
The caller is responsible for calling the close() method after he has
finished with the object in order for the object to be able to properly
close down itself.
Example:
How to use YoutubeDLDownloader from a python script.
from downloaders import YoutubeDLDownloader
def data_hook(data):
print data
downloader = YoutubeDLDownloader('/usr/bin/youtube-dl', data_hook)
downloader.download(<URL STRING>, ['-f', 'flv'])
"""
OK = 0
WARNING = 1
ERROR = 2
FILESIZE_ABORT = 3
ALREADY = 4
STOPPED = 5
def __init__(self, youtubedl_path, data_hook=None, log_data=None):
self.youtubedl_path = youtubedl_path
self.data_hook = data_hook
self.log_data = log_data
self._return_code = self.OK
self._proc = None
self._stderr_queue = Queue()
self._stderr_reader = PipeReader(self._stderr_queue)
def download(self, url, options):
"""Download url using given options.
Args:
url (string): URL string to download.
options (list): Python list that contains youtube-dl options.
Returns:
An integer that shows the status of the download process.
There are 6 different return codes.
OK (0): The download process completed successfully.
WARNING (1): A warning occured during the download process.
ERROR (2): An error occured during the download process.
FILESIZE_ABORT (3): The corresponding url video file was larger or
smaller from the given filesize limit.
ALREADY (4): The given url is already downloaded.
STOPPED (5): The download process was stopped by the user.
"""
self._return_code = self.OK
cmd = self._get_cmd(url, options)
self._create_process(cmd)
if self._proc is not None:
self._stderr_reader.attach_filedescriptor(self._proc.stderr)
while self._proc_is_alive():
stdout = self._proc.stdout.readline().rstrip()
stdout = convert_item(stdout, to_unicode=True)
if stdout:
data_dict = extract_data(stdout)
self._extract_info(data_dict)
self._hook_data(data_dict)
# Read stderr after download process has been completed
# We don't need to read stderr in real time
while not self._stderr_queue.empty():
stderr = self._stderr_queue.get_nowait().rstrip()
stderr = convert_item(stderr, to_unicode=True)
self._log(stderr)
if self._is_warning(stderr):
self._set_returncode(self.WARNING)
else:
self._set_returncode(self.ERROR)
# Set return code to ERROR if we could not start the download process
# or the childs return code is greater than zero
# NOTE: In Linux if the called script is just empty Python exits
# normally (ret=0), so we cant detect this or similar cases
# using the code below
# NOTE: In Unix a negative return code (-N) indicates that the child
# was terminated by signal N (e.g. -9 = SIGKILL)
if self._proc is None or self._proc.returncode > 0:
self._return_code = self.ERROR
if self._proc is not None and self._proc.returncode > 0:
self._log('Child process exited with non-zero code: {}'.format(self._proc.returncode))
self._last_data_hook()
return self._return_code
def stop(self):
"""Stop the download process and set return code to STOPPED. """
if self._proc_is_alive():
if os.name == 'nt':
# os.killpg is not available on Windows
# See: https://bugs.python.org/issue5115
self._proc.kill()
# When we kill the child process on Windows the return code
# gets set to 1, so we want to reset the return code back to 0
# in order to avoid creating logging output in the download(...)
# method
self._proc.returncode = 0
else:
os.killpg(self._proc.pid, signal.SIGKILL)
self._set_returncode(self.STOPPED)
def close(self):
"""Destructor like function for the object. """
self._stderr_reader.join()
def _set_returncode(self, code):
"""Set self._return_code only if the hierarchy of the given code is
higher than the current self._return_code. """
if code >= self._return_code:
self._return_code = code
def _is_warning(self, stderr):
return stderr.split(':')[0] == 'WARNING'
def _last_data_hook(self):
"""Set the last data information based on the return code. """
data_dictionary = {}
if self._return_code == self.OK:
data_dictionary['status'] = 'Finished'
elif self._return_code == self.ERROR:
data_dictionary['status'] = 'Error'
data_dictionary['speed'] = ''
data_dictionary['eta'] = ''
elif self._return_code == self.WARNING:
data_dictionary['status'] = 'Warning'
data_dictionary['speed'] = ''
data_dictionary['eta'] = ''
elif self._return_code == self.STOPPED:
data_dictionary['status'] = 'Stopped'
data_dictionary['speed'] = ''
data_dictionary['eta'] = ''
elif self._return_code == self.ALREADY:
data_dictionary['status'] = 'Already Downloaded'
else:
data_dictionary['status'] = 'Filesize Abort'
self._hook_data(data_dictionary)
def _extract_info(self, data):
"""Extract informations about the download process from the given data.
Args:
data (dict): Python dictionary that contains different
keys. The keys are not standar the dictionary can also be
empty when there are no data to extract. See extract_data().
"""
if 'status' in data:
if data['status'] == 'Already Downloaded':
# Set self._return_code to already downloaded
# and trash that key
self._set_returncode(self.ALREADY)
data['status'] = None
if data['status'] == 'Filesize Abort':
# Set self._return_code to filesize abort
# and trash that key
self._set_returncode(self.FILESIZE_ABORT)
data['status'] = None
def _log(self, data):
"""Log data using the callback function. """
if self.log_data is not None:
self.log_data(data)
def _hook_data(self, data):
"""Pass data back to the caller. """
if self.data_hook is not None:
self.data_hook(data)
def _proc_is_alive(self):
"""Returns True if self._proc is alive else False. """
if self._proc is None:
return False
return self._proc.poll() is None
def _get_cmd(self, url, options):
"""Build the subprocess command.
Args:
url (string): URL string to download.
options (list): Python list that contains youtube-dl options.
Returns:
Python list that contains the command to execute.
"""
if os.name == 'nt':
cmd = [self.youtubedl_path] + options + [url]
else:
cmd = ['python', self.youtubedl_path] + options + [url]
return cmd
def _create_process(self, cmd):
"""Create new subprocess.
Args:
cmd (list): Python list that contains the command to execute.
"""
info = preexec = None
# Keep a unicode copy of cmd for the log
ucmd = cmd
if os.name == 'nt':
# Hide subprocess window
info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
# Make subprocess the process group leader
# in order to kill the whole process group with os.killpg
preexec = os.setsid
# Encode command for subprocess
# Refer to http://stackoverflow.com/a/9951851/35070
if sys.version_info < (3, 0):
cmd = convert_item(cmd, to_unicode=False)
try:
self._proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=preexec,
startupinfo=info)
except (ValueError, OSError) as error:
self._log('Failed to start process: {}'.format(ucmd))
self._log(convert_item(str(error), to_unicode=True))
def extract_data(stdout):
"""Extract data from youtube-dl stdout.
Args:
stdout (string): String that contains the youtube-dl stdout.
Returns:
Python dictionary. The returned dictionary can be empty if there are
no data to extract else it may contain one or more of the
following keys:
'status' : Contains the status of the download process.
'path' : Destination path.
'extension' : The file extension.
'filename' : The filename without the extension.
'percent' : The percentage of the video being downloaded.
'eta' : Estimated time for the completion of the download process.
'speed' : Download speed.
'filesize' : The size of the video file being downloaded.
'playlist_index' : The playlist index of the current video file being downloaded.
'playlist_size' : The number of videos in the playlist.
"""
# REFACTOR
def extract_filename(input_data):
path, fullname = os.path.split(input_data.strip("\""))
filename, extension = os.path.splitext(fullname)
return path, filename, extension
data_dictionary = {}
if not stdout:
return data_dictionary
# We want to keep the spaces in order to extract filenames with
# multiple whitespaces correctly. We also keep a copy of the old
# 'stdout' for backward compatibility with the old code
stdout_with_spaces = stdout.split(' ')
stdout = stdout.split()
stdout[0] = stdout[0].lstrip('\r')
if stdout[0] == '[download]':
data_dictionary['status'] = 'Downloading'
# Get path, filename & extension
if stdout[1] == 'Destination:':
path, filename, extension = extract_filename(' '.join(stdout_with_spaces[2:]))
data_dictionary['path'] = path
data_dictionary['filename'] = filename
data_dictionary['extension'] = extension
# Get progress info
if '%' in stdout[1]:
if stdout[1] == '100%':
data_dictionary['speed'] = ''
data_dictionary['eta'] = ''
data_dictionary['percent'] = '100%'
data_dictionary['filesize'] = stdout[3]
else:
data_dictionary['percent'] = stdout[1]
data_dictionary['filesize'] = stdout[3]
data_dictionary['speed'] = stdout[5]
data_dictionary['eta'] = stdout[7]
# Get playlist info
if stdout[1] == 'Downloading' and stdout[2] == 'video':
data_dictionary['playlist_index'] = stdout[3]
data_dictionary['playlist_size'] = stdout[5]
# Remove the 'and merged' part from stdout when using ffmpeg to merge the formats
if stdout[-3] == 'downloaded' and stdout [-1] == 'merged':
stdout = stdout[:-2]
stdout_with_spaces = stdout_with_spaces[:-2]
data_dictionary['percent'] = '100%'
# Get file already downloaded status
if stdout[-1] == 'downloaded':
data_dictionary['status'] = 'Already Downloaded'
path, filename, extension = extract_filename(' '.join(stdout_with_spaces[1:-4]))
data_dictionary['path'] = path
data_dictionary['filename'] = filename
data_dictionary['extension'] = extension
# Get filesize abort status
if stdout[-1] == 'Aborting.':
data_dictionary['status'] = 'Filesize Abort'
elif stdout[0] == '[hlsnative]':
# native hls extractor
# see: https://github.com/rg3/youtube-dl/blob/master/youtube_dl/downloader/hls.py#L54
data_dictionary['status'] = 'Downloading'
if len(stdout) == 7:
segment_no = float(stdout[6])
current_segment = float(stdout[4])
# Get the percentage
percent = '{0:.1f}%'.format(current_segment / segment_no * 100)
data_dictionary['percent'] = percent
elif stdout[0] == '[ffmpeg]':
data_dictionary['status'] = 'Post Processing'
# Get final extension after merging process
if stdout[1] == 'Merging':
path, filename, extension = extract_filename(' '.join(stdout_with_spaces[4:]))
data_dictionary['path'] = path
data_dictionary['filename'] = filename
data_dictionary['extension'] = extension
# Get final extension ffmpeg post process simple (not file merge)
if stdout[1] == 'Destination:':
path, filename, extension = extract_filename(' '.join(stdout_with_spaces[2:]))
data_dictionary['path'] = path
data_dictionary['filename'] = filename
data_dictionary['extension'] = extension
# Get final extension after recoding process
if stdout[1] == 'Converting':
path, filename, extension = extract_filename(' '.join(stdout_with_spaces[8:]))
data_dictionary['path'] = path
data_dictionary['filename'] = filename
data_dictionary['extension'] = extension
elif stdout[0][0] != '[' or stdout[0] == '[debug]':
pass # Just ignore this output
else:
data_dictionary['status'] = 'Pre Processing'
return data_dictionary
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.compat.v1.test.compute_gradient and tf.compute_gradient_error."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
@ops.RegisterGradient("BadGrad")
def _bad_grad(unused_op, grad):
"""A gradient that returns the wrong shape."""
return array_ops.transpose(grad)
@ops.RegisterGradient("NaNGrad")
def _nan_grad(unused_op, grad):
"""A gradient that returns NaN."""
return np.nan * grad
class GradientCheckerTest(test.TestCase):
@test_util.run_deprecated_v1
def testAddSimple(self):
np.random.seed(1) # Fix seed to avoid flakiness
with self.session(use_gpu=False):
# a test case for Add operation
size = (2, 3)
x1 = constant_op.constant(2.0, shape=size, name="x1")
x2 = constant_op.constant(3.0, shape=size, name="x2")
y = math_ops.add(x1, x2, name="y")
# checking gradients for x1
error = gradient_checker.compute_gradient_error(x1, size, y, size)
tf_logging.info("x1 error = %f", error)
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testAddSimpleGPU(self):
np.random.seed(2) # Fix seed to avoid flakiness
with self.session():
# a test case for Add operation
size = (2, 3)
x1 = constant_op.constant(2.0, shape=size, name="x1")
x2 = constant_op.constant(3.0, shape=size, name="x2")
y = math_ops.add(x1, x2, name="y")
# checking gradients for x1
error = gradient_checker.compute_gradient_error(x1, size, y, size)
tf_logging.info("x1 error = %f", error)
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testAddCustomized(self):
np.random.seed(3) # Fix seed to avoid flakiness
with self.cached_session():
# a test case for Add operation
size = (2, 3)
x1 = constant_op.constant(
2.0, shape=size, dtype=dtypes.float64, name="x1")
x2 = constant_op.constant(
3.0, shape=size, dtype=dtypes.float64, name="x2")
y = math_ops.add(x1, x2, name="y")
# checkint gradients for x2 using a special init_value and delta
x_init_value = np.asarray(np.arange(6, dtype=np.float64).reshape(2, 3))
error = gradient_checker.compute_gradient_error(
x2, size, y, size, x_init_value=x_init_value, delta=1e-2)
tf_logging.info("x2 error = %f", error)
self.assertLess(error, 1e-10)
@test_util.run_deprecated_v1
def testGather(self):
np.random.seed(4) # Fix seed to avoid flakiness
with self.cached_session():
p_shape = (4, 2)
p_size = 8
index_values = [1, 3]
y_shape = [2, 2]
params = constant_op.constant(
np.arange(p_size).astype(np.float), shape=p_shape, name="p")
indices = constant_op.constant(index_values, name="i")
y = array_ops.gather(params, indices, name="y")
error = gradient_checker.compute_gradient_error(params, p_shape, y,
y_shape)
tf_logging.info("gather error = %f", error)
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testNestedGather(self):
np.random.seed(5) # Fix seed to avoid flakiness
with self.cached_session():
p_shape = (8, 2)
p_size = 16
index_values = [1, 3, 5, 6]
index_values2 = [0, 2]
y2_shape = [2, 2]
params = constant_op.constant(
np.arange(p_size).astype(np.float), shape=p_shape, name="p")
indices = constant_op.constant(index_values, name="i")
y = array_ops.gather(params, indices, name="y")
indices2 = constant_op.constant(index_values2, name="i2")
y2 = array_ops.gather(y, indices2, name="y2")
error = gradient_checker.compute_gradient_error(params, p_shape, y2,
y2_shape)
tf_logging.info("nested gather error = %f", error)
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testComplexMul(self):
with self.cached_session():
size = ()
c = constant_op.constant(5 + 7j, dtype=dtypes.complex64)
x = constant_op.constant(11 - 13j, dtype=dtypes.complex64)
y = c * x
analytical, numerical = gradient_checker.compute_gradient(x, size, y,
size)
correct = np.array([[5, 7], [-7, 5]])
self.assertAllEqual(correct, analytical)
self.assertAllClose(correct, numerical, rtol=1e-4)
self.assertLess(
gradient_checker.compute_gradient_error(x, size, y, size), 3e-4)
@test_util.run_deprecated_v1
def testComplexConj(self):
with self.cached_session():
size = ()
x = constant_op.constant(11 - 13j, dtype=dtypes.complex64)
y = math_ops.conj(x)
analytical, numerical = gradient_checker.compute_gradient(x, size, y,
size)
correct = np.array([[1, 0], [0, -1]])
self.assertAllEqual(correct, analytical)
self.assertAllClose(correct, numerical, rtol=2e-5)
self.assertLess(
gradient_checker.compute_gradient_error(x, size, y, size), 2e-5)
@test_util.run_deprecated_v1
def testEmptySucceeds(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32)
y = array_ops.identity(x)
for grad in gradient_checker.compute_gradient(x, (0, 3), y, (0, 3)):
self.assertEqual(grad.shape, (0, 0))
error = gradient_checker.compute_gradient_error(x, (0, 3), y, (0, 3))
self.assertEqual(error, 0)
def testEmptyFails(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = array_ops.placeholder(dtypes.float32)
with g.gradient_override_map({"Identity": "BadGrad"}):
y = array_ops.identity(x)
bad = r"Empty gradient has wrong shape: expected \(0, 3\), got \(3, 0\)"
with self.assertRaisesRegex(ValueError, bad):
gradient_checker.compute_gradient(x, (0, 3), y, (0, 3))
with self.assertRaisesRegex(ValueError, bad):
gradient_checker.compute_gradient_error(x, (0, 3), y, (0, 3))
def testNaNGradFails(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = array_ops.placeholder(dtypes.float32)
with g.gradient_override_map({"Identity": "NaNGrad"}):
y = array_ops.identity(x)
error = gradient_checker.compute_gradient_error(x, (), y, ())
# Typical test would assert error < max_err, so assert this test would
# raise AssertionError, since NaN is not < 1.0.
with self.assertRaisesRegex(AssertionError, "False is not true"):
self.assertTrue(error < 1.0)
class MiniMNISTTest(test.TestCase):
# Gradient checker for MNIST.
def _BuildAndTestMiniMNIST(self, param_index, tag):
# Fix seed to avoid occasional flakiness
np.random.seed(6)
# Hyperparameters
batch = 3
inputs = 16
features = 32
classes = 10
# Define the parameters
inp_data = np.random.random_sample(inputs * batch)
hidden_weight_data = np.random.randn(inputs * features) / np.sqrt(inputs)
hidden_bias_data = np.random.random_sample(features)
sm_weight_data = np.random.randn(features * classes) / np.sqrt(features)
sm_bias_data = np.random.random_sample(classes)
# special care for labels since they need to be normalized per batch
label_data = np.random.random(batch * classes).reshape((batch, classes))
s = label_data.sum(axis=1)
label_data /= s[:, None]
with self.session():
# We treat the inputs as "parameters" here
inp = constant_op.constant(
inp_data.tolist(),
shape=[batch, inputs],
dtype=dtypes.float64,
name="inp")
hidden_weight = constant_op.constant(
hidden_weight_data.tolist(),
shape=[inputs, features],
dtype=dtypes.float64,
name="hidden_weight")
hidden_bias = constant_op.constant(
hidden_bias_data.tolist(),
shape=[features],
dtype=dtypes.float64,
name="hidden_bias")
softmax_weight = constant_op.constant(
sm_weight_data.tolist(),
shape=[features, classes],
dtype=dtypes.float64,
name="softmax_weight")
softmax_bias = constant_op.constant(
sm_bias_data.tolist(),
shape=[classes],
dtype=dtypes.float64,
name="softmax_bias")
# List all the parameter so that we can test them one at a time
all_params = [
inp, hidden_weight, hidden_bias, softmax_weight, softmax_bias
]
param_sizes = [
[batch, inputs], # inp
[inputs, features], # hidden_weight,
[features], # hidden_bias
[features, classes], # softmax_weight,
[classes]
] # softmax_bias
# Now, Building MNIST
features = nn_ops.relu(
nn_ops.xw_plus_b(inp, hidden_weight, hidden_bias), name="features")
logits = nn_ops.xw_plus_b(
features, softmax_weight, softmax_bias, name="logits")
labels = constant_op.constant(
label_data.tolist(),
shape=[batch, classes],
dtype=dtypes.float64,
name="labels")
cost = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name="cost")
# Test the gradients.
err = gradient_checker.compute_gradient_error(
all_params[param_index],
param_sizes[param_index],
cost, [batch],
delta=1e-5)
tf_logging.info("Mini MNIST: %s gradient error = %g", tag, err)
return err
@test_util.run_deprecated_v1
def testInputGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(0, "input"), 1e-8)
@test_util.run_deprecated_v1
def testHiddenWeightGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(1, "hidden_weight"), 1e-8)
@test_util.run_deprecated_v1
def testHiddenBiasGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(2, "hidden_bias"), 1e-8)
@test_util.run_deprecated_v1
def testSoftmaxWeightGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(3, "softmax_weight"), 1e-8)
@test_util.run_deprecated_v1
def testSoftmaxBiasGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(4, "softmax_bias"), 1e-8)
if __name__ == "__main__":
test.main()
|
|
# License: none (public domain)
import errno
import gzip
import hashlib
import logging
import os
import pickle
import smtplib
from datetime import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from itertools import count, groupby
from bs4 import BeautifulSoup
import tinycss
from lib import AlamoDrafthouseAPI as api
from lib.InlineCSS import styled
logging.basicConfig()
log = logging.getLogger(__name__)
RESOURCES_DIRECTORY = 'resources'
PICKLE_FILE = os.path.join(RESOURCES_DIRECTORY, 'cache', 'pancake.pickle')
RECIPIENTS_FILE = os.path.join(RESOURCES_DIRECTORY, 'config', 'pancake.list')
OVERRIDES_FILE = os.path.join(RESOURCES_DIRECTORY, 'config', 'overrides.list')
USER_FILE = os.path.join(RESOURCES_DIRECTORY, 'config', 'user')
PASS_FILE = os.path.join(RESOURCES_DIRECTORY, 'config', 'pass')
STYLE_FILE = os.path.join(RESOURCES_DIRECTORY, 'css', 'pancake.css')
TEMPLATE_FILE = os.path.join(RESOURCES_DIRECTORY, 'template', 'pancake.html')
DATE_FORMAT = '%A, %B %d, %Y'
TIME_FORMAT = '%I:%M%p'
def date_string(dt):
"""Returns a date string representation of the given datetime object."""
return datetime.strftime(dt, DATE_FORMAT)
def time_string(dt):
"""Returns a time string representation of the given datetime object."""
ts = datetime.strftime(dt, TIME_FORMAT).lstrip('0')
return ts[:-2] + 'p' if ts.endswith('PM') else ts[:-2]
def datetime_string(dt):
"""Returns a date and time string representation of the given datetime object."""
return date_string(dt) + ' - ' + time_string(dt)
def pancake_sort_key(pancake):
"""Key to sort pancakes by film title, cinema location, and datetime of the show."""
return pancake.film_name, pancake.cinema.cinema_name, pancake.film_datetime
def html_showtimes(pancakes):
"""Returns a list of pancake showtimes, as pancake HTML."""
showtimes = []
for pancake in pancakes:
soup = BeautifulSoup('<span></span>')
soup.span['class'] = pancake.film_status
if pancake.film_status == 'onsale':
anchor = soup.new_tag('a', href=pancake.film_url)
anchor.append(time_string(pancake.film_datetime))
soup.span.append(anchor)
else: # pancake.film_status == 'soldout' or pancake.film_status == 'notonsale'
soup.span.append(time_string(pancake.film_datetime))
showtimes.append(str(soup))
return showtimes
def html_digest(pancakes):
"""Returns pancake styled HTML digest of the given pancakes."""
pancakes = sorted(pancakes, key=pancake_sort_key)
# things to group by
by_film_cinema = lambda p: (p.film_id, p.film_name, p.cinema.cinema_url, p.cinema.cinema_name)
by_day = lambda p: p.film_datetime.date()
soup = BeautifulSoup('')
for key, pancakes in groupby(pancakes, key=by_film_cinema):
film_id, film, cinema_url, cinema = key
film_heading = BeautifulSoup('<h1><a></a></h1>')
film_heading.h1['class'] = 'film_heading'
film_heading.a['href'] = 'https://drafthouse.com/uid/' + film_id
film_heading.a.append(film)
cinema_name = 'Alamo Drafthouse ' + cinema
if cinema_url:
cinema_heading = BeautifulSoup('<h2><a></a></h2>')
cinema_heading.h2['class'] = 'cinema_heading'
cinema_heading.a['href'] = cinema_url
cinema_heading.a.append(cinema_name)
else:
cinema_heading = BeautifulSoup('<h2></h2>')
cinema_heading.h2['class'] = 'cinema_heading'
cinema_heading.h2.append(cinema_name)
item_data = []
for day, pancakes in groupby(pancakes, key=by_day):
item_data.append((date_string(day), ', '.join(html_showtimes(pancakes))))
item_list = BeautifulSoup('<ul></ul>')
item_list.ul['class'] = 'film_items'
for data, n in zip(item_data, count(1)):
day, showtimes = data
item = item_list.new_tag('li')
item['class'] = 'film_item'
item_content = '<span>{day} - {showtimes}</span>'.format(day=day, showtimes=showtimes)
item.append(BeautifulSoup(item_content))
item_list.ul.append(item)
soup.append(film_heading)
soup.append(cinema_heading)
soup.append(item_list)
content = str(soup)
# load CSS stylesheet
try:
parser = tinycss.make_parser('page3')
stylesheet = parser.parse_stylesheet_file(STYLE_FILE)
style = {
r.selector.as_css(): {d.name: d.value.as_css()
for d in r.declarations}
for r in stylesheet.rules
}
except Exception as e:
log.warn('could not load CSS style file: {}'.format(e))
style = None
# load HTML template
try:
with open(TEMPLATE_FILE, 'r') as f:
template = f.read()
return styled(template.format(content=content), style)
except Exception as e:
log.warn('could not load HTML template file: {}'.format(e))
return styled(content, style)
def text_digest(pancakes):
"""Returns a plain text digest of the given pancakes."""
text = ''
for pancake in sorted(pancakes, key=pancake_sort_key):
if pancake.film_status == 'onsale':
status = 'On sale now!'
elif pancake.film_status == 'soldout':
status = 'Sold out.'
else: # pancake.film_status == 'notonsale'
status = 'Not on sale yet.'
params = (
pancake.film_name.encode('utf-8'),
pancake.cinema.cinema_name,
date_string(pancake.film_datetime),
time_string(pancake.film_datetime),
status, )
text += '{}\n{}\n{}\n{}\n{}'.format(*params)
if pancake.film_status == 'onsale':
text += '\n{}'.format(pancake.film_url)
text += '\n\n'
return text
def notify(pancakes, recipients):
"""Sends digest email(s) to recipients given pancakes (no email sent if pancakes is empty)."""
if not pancakes:
return
plain = text_digest(pancakes)
log.info('digest:\n{}'.format(plain))
if not recipients:
return
msg = MIMEMultipart('alternative')
msg['Subject'] = 'Pancake Master: {}'.format(datetime_string(datetime.now()))
msg['To'] = 'undisclosed-recipients'
msg['From'] = recipients[0]
msg.attach(MIMEText(plain, 'plain'))
msg.attach(MIMEText(html_digest(pancakes), 'html'))
try:
s = smtplib.SMTP('localhost')
s.set_debuglevel(1)
s.login(load_user(), load_pass())
s.sendmail(msg['From'], recipients, msg.as_string())
s.quit()
log.info('sent email(s) to {}'.format(', '.join(recipients)))
except Exception as e:
log.error('email fail: {}'.format(e))
raise
def pancake_key(pancake):
"""Creates a unique id for a given pancake."""
m = hashlib.md5()
m.update(pancake.film_name.encode('utf-8'))
m.update(pancake.cinema.cinema_name.encode('utf-8'))
m.update(datetime_string(pancake.film_datetime).encode('utf-8'))
return m.hexdigest()
def save_database(db):
"""Saves pancake database to disk."""
filename = PICKLE_FILE
log.info('saving {}'.format(filename))
try:
with gzip.GzipFile(filename, 'wb') as f:
f.write(pickle.dumps(db, 1))
except Exception as e:
log.error('save failure: {}'.format(e))
raise
def load_database():
"""Unpickles and decompresses the given file and returns the created object."""
filename = PICKLE_FILE
log.info('loading {}'.format(filename))
try:
with gzip.open(filename, 'rb') as f:
data = f.read()
return pickle.loads(data)
except Exception as e:
log.error('load failure: {}'.format(e))
log.warn('creating new pancake database...')
return {}
def update_pancakes(db, pancakes):
"""Updates database given the list of all pancakes, returns list of updated pancakes."""
updated = []
for pancake in pancakes:
key = pancake_key(pancake)
if key in db:
if db[key].film_status == 'notonsale' and pancake.film_status == 'onsale':
updated.append(pancake)
else:
updated.append(pancake)
db[key] = pancake
return updated
def prune_database(db):
"""Removes old pancakes from the database."""
for key, pancake in db.items():
if pancake.film_datetime.date() < datetime.now().date():
del db[key]
def load_user():
with open(USER_FILE) as f:
return f.readlines()[0].strip()
def load_pass():
with open(PASS_FILE) as f:
return f.readlines()[0].strip()
def load_recipients():
"""Returns list of email addresses to notify."""
try:
with open(RECIPIENTS_FILE) as f:
return [line for line in (line.strip() for line in f.readlines()) if line]
except:
log.warn('no email recipients found, not sending email notifications...')
return []
def load_overrides():
"""Returns list of film overrides to notify for in addition to pancakes."""
try:
with open(OVERRIDES_FILE) as f:
return [line for line in (line.strip() for line in f.readlines()) if line]
except:
pass
return []
def mkdir_p(path):
"""Make directory without error if it already exists."""
try:
os.makedirs(path, exist_ok=True) # python 3.2+
except TypeError as e:
try:
os.makedirs(path)
except OSError as e: # python <2.5
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def clear_cache():
"""Deletes existing pancake database."""
try:
os.remove(PICKLE_FILE)
except:
log.exception('clearing cache:')
def show_cache():
"""Shows text digest of existing pancake database."""
try:
db = load_database()
log.info(text_digest(db.values()))
except:
log.exception('loading cache:')
def main(market, disable_notify=False, disable_fetch=False):
"""Fetches pancake data, send notifications, and reports updates."""
mkdir_p(os.path.join(RESOURCES_DIRECTORY, 'config'))
mkdir_p(os.path.join(RESOURCES_DIRECTORY, 'cache'))
db = load_database()
recipients = load_recipients()
overrides = load_overrides()
if not disable_fetch:
try:
pancakes = api.query_pancakes(market, overrides)
except:
pancakes = []
log.exception('api error:')
updated = update_pancakes(db, pancakes)
else:
updated = db.values()
if not disable_notify:
try:
notify(updated, recipients)
except:
log.exception('notification error:')
prune_database(db)
save_database(db)
|
|
from collections import defaultdict
import re
import hashlib
from urllib import quote
#from util import numericOnly, alphaOnly
#from addresses import standardize_country
# for JSON, there would be
# six distinguished attributes:
#
# for database, we don't have to worry about this
# no binning
# def nearest5(x):
# return 5*(int(2.5 + x)/5)
# def nearest2(x):
# return 2*(int(1 + x)/2)
USPhonePattern = re.compile(r"^\([0-9]{3}\) [0-9]{3}\-[0-9]{4}$")
def clean_phone(x):
"""Return the phone as a 10 digit number,
or as close to that as we can make it.
Prefix with country code '+1' at the end.
"""
if (len(x)>0):
x = x.strip().lower()
cc = ''
if x.find("+") == 0:
end1 = x.find(" ")
end2 = x.find("-")
if end1 == -1: end1 = 10000
if end2 == -1: end2 = 10000
if end1 != 10000 or end2 != 10000:
end = min(end1, end2)
cc = x[1:end]
ph = numericOnly(x[end+1:])
else:
testCC = detectCountryCode(x)
if testCC:
cc = testCC
ccLen = len(cc)
ph = x[ccLen+1:]
ph = numericOnly(ph)
else:
ph = numericOnly(x)
else:
valid = USPhonePattern.match(x)
if valid:
ph = valid.group()
cc = "1"
ph = numericOnly(ph)
else:
ph = numericOnly(x)
# If there are 11 numbers
if (len(ph)==11 and ph[0]=="1"):
ph = ph[1:]
cc = "1"
if len(cc) > 0:
ph = "+" + cc + "-" + ph
return ph;
return ''
def phone_uri(x):
"""Return the uri for a phone
as countrycode-phone
Use 'x-' as country code if not present in the number
"""
x = clean_phone(x)
if len(x) > 0:
dashIdx = x.find('-');
if(dashIdx != -1):
return "phonenumber/" + x[1:]
return "phonenumber/x-" + x
return ''
def phonenumber_uri(x):
return phone_uri(x)
# age 15647
def clean_age(x):
"""Return the clean age
"""
stripped = x.strip().lower()
# take only first value of any range
stripped = stripped.split('-')[0].strip()
try:
age = int(stripped)
if age<1 or age>99:
return None
except:
return None
return age
def age_uri(x):
cx = clean_age(x)
if (cx>0):
return "person_age/" + str(cx)
return ''
def person_age_uri(x):
if x:
return "person_age/%s" % x
return ''
# email 7105
def clean_email(x):
"""Return a clean email address
"""
if (len(x)>0 and x.find("@") != -1):
em = x.strip().lower()
em = nonWhitespace(em)
return em
return ''
def emailaddress_uri(x):
if x:
return "emailaddress/%s" % x
return ''
# gender
def clean_gender(x):
if (len(x)>0):
g = x.strip().lower();
if g in ["female", "f"]:
return "f"
elif g in ["male", "m"]:
return "m"
return ''
def person_gender_uri(cleaned):
if cleaned:
return "person_gender/%s" % cleaned
return ''
# rate
# rate60 12706
# rate30 10640
# rate15 1215
def base_clean_rate(x):
clean = x.strip().lower()
if clean[0] == "0":
return None
rate = int(float(clean))
if rate < 20 or rate > 1000:
return None
return rate
def clean_rate(x):
rate = base_clean_rate(x)
if rate != None:
return "%s-per-60min" % rate
return ''
def clean_rate15(x):
rate = base_clean_rate(x)
# if rate != None:
# rate = rate * 4
if rate != None:
return "%s-per-15min" % rate
return ''
def clean_rate30(x):
rate = base_clean_rate(x)
# if rate != None:
# rate = rate * 2
if rate != None:
return "%s-per-30min" % rate
return ''
def rate_uri(cleaned):
if cleaned:
return "rate/%s" % cleaned
def rate_price(cleaned):
if cleaned:
idx = cleaned.find("-")
if idx != -1:
return int(cleaned[0:idx])
return ''
def rate_duration(cleaned):
if cleaned:
idx = cleaned.find("per-")
if idx != -1:
str = cleaned[idx+4:]
dur = str[0: len(str)-3]
return dur
return ''
def rate_unit(cleaned):
if cleaned:
idx = cleaned.find("min")
if idx != -1:
return "MIN"
idx = cleaned.find("sec")
if idx != -1:
return "SEC"
idx = cleaned.find("hr")
if idx != -1:
return "HUR"
return ''
# ethnicity 38587
def clean_ethnicity(x):
stripped = x.strip().lower().replace(" ","")
return stripped
def person_ethnicity_uri(cleaned):
if cleaned:
return "person_ethnicity/%s" % cleaned
def clean_height(x):
stripped = x.strip().lower()
# take only first measurement of any range
stripped = stripped.split('-')[0].strip()
try:
# First, 5'6" or 6' or 6'7
dimensions = stripped.split("'")
if len(dimensions) >= 2:
feet = int(dimensions[0])
try:
inches = int(dimensions[1].strip('"'))
except:
# empty inches
inches = 0
# return nearest5(int(2.54 * (12 * feet) + inches))
# no binning
return int(2.54 * (12 * feet) + inches)
else:
# no inches, so try centimeters
# Second, 137
# return nearest5(int(stripped))
# no binning
return int(stripped)
except:
return None
return None
def person_height_uri(cleaned):
if cleaned:
return "person_height/%s" % cleaned
# hair 22078
def clean_hair(x):
stripped = x.strip().lower()
return alphaOnlyPreserveSpace(stripped)
def person_haircolor_uri(cleaned):
if cleaned:
return "person_haircolor/%s" % cleaned.replace(" ","_")
# build 21842
def clean_build(x):
stripped = x.strip().lower()
return alphaOnly(stripped)
def person_build_uri(cleaned):
if cleaned:
return "person_build/%s" % cleaned
# cup 19179
def clean_cup(x):
stripped = x.strip().lower()
return alphaOnly(stripped)
def person_cupsizeus_uri(cleaned):
if cleaned:
return "person_cupsizeus/%s" % cleaned
# bust 18394
# bust 34-35
# bust D
# bust 34"
# bust over
# bust Perrrfct
def clean_bust(x):
"""Bust measured in inches, restricted to [20,50]"""
def sanityCheck(bust):
if bust >= 20 and bust <= 50:
return bust
else:
return None
stripped = x.strip().lower()
stripped = stripped.replace(" ","")
first = re.split("-", stripped)[0]
try:
return sanityCheck(int(float(first)))
except:
pass
try:
return sanityCheck(int(numericOnly(first)))
except:
pass
return None
def person_bustbandsize_uri(cleaned):
"Bust measured in inches"
if cleaned:
return "person_bustbandsize/%s" % cleaned
# piercings 18294
# None Belly Button Face
# xxxxx Other (where xxxx is a legal value)
# Tongue Breasts Belly Button Other
#
# Maybe use "belly button" "below the belt" as tokens, and then
# we should generate a comma-separated list of values and then
# use split values to generate a multi-valued cell so that we
# can generate multiple features per attribute.
def clean_piercings(x):
stripped = x.strip().lower()
stripped = re.sub("belly button", "bellybutton", stripped)
stripped = re.sub("below the belt", "belowthebelt", stripped)
return stripped.split(' ')
def pipeList(l):
return "|".join(l)
def person_piercings_uri(cleaned):
if cleaned:
return "person_piercings/%s" % cleaned
return ''
# creditcards 18272
def clean_creditcards(x):
stripped = x.strip().lower()
return stripped
def creditcardaccepted_uri(cleaned):
cleaned = clean_creditcards(cleaned)
if cleaned:
return "creditcardaccepted/%s" % cleaned
# hairlength 18030
def clean_hairlength(x):
stripped = x.strip().lower()
return alphaOnly(stripped)
def person_hairlength_uri(cleaned):
if cleaned:
return "person_hairlength/%s" % cleaned
# hairtype 17945
def clean_hairtype(x):
stripped = x.strip().lower()
return alphaOnlyPreserveSpace(stripped)
def person_hairtype_uri(cleaned):
if cleaned:
return "person_hairtype/%s" % cleaned.replace(" ","_")
# eyes 16723
def clean_eyes(x):
stripped = x.strip().lower()
return alphaOnlyPreserveSpace(stripped)
def person_eyecolor_uri(cleaned):
if cleaned:
return "person_eyecolor/%s" % cleaned.replace(" ","_")
# weight 13316
def clean_weight(x):
"""In kg.
unmarked weight < 90 is interpreted as kg, >=90 as lb"""
x = str(x).strip().lower()
def lb_to_kg(lb):
return int(float(lb)/2.2)
def sanityCheck(kg):
if kg >= 40 and kg <= 200:
return kg
else:
return None
try:
cleaned = x
# # first try for st/stone
l = re.split("stone", cleaned)
if len(l) == 1:
l = re.split("st", cleaned)
if len(l) > 1:
stone = float(l[0])
lb = l[1]
lb = lb.strip('s')
lb = lb.strip('lb')
lb = lb.strip('pound')
try:
lb = float(lb)
except ValueError, e:
lb = 0
# return sanityCheck(nearest2(lb_to_kg(int(stone*14+lb))))
# no binning
return sanityCheck(lb_to_kg(int(stone*14+lb)))
lb = cleaned.strip('s')
# now try for just pounds
if lb.endswith("lb"):
# return sanityCheck(nearest2(lb_to_kg(int(float(lb.strip('lb'))))))
# no binning
return sanityCheck(lb_to_kg(int(float(lb.strip('lb')))))
if lb.endswith('pound'):
# return sanityCheck(nearest2(lb_to_kg(int(float(lb.strip('pound'))))))
# no binning
return sanityCheck(lb_to_kg(int(float(lb.strip('pound')))))
# now kg
kg = cleaned.strip('s')
if kg.endswith("kg"):
# return sanityCheck(nearest2(int(float(kg.strip('kg')))))
# no binning
return sanityCheck(int(float(kg.strip('kg'))))
if kg.endswith("kilo"):
# return sanityCheck(nearest2(int(float(kg.strip('kilo')))))
# no binning
return sanityCheck(int(float(kg.strip('kilo'))))
if kg.endswith('kilogram'):
# return sanityCheck(nearest2(int(float(kg.strip('kilogram')))))
# no binning
return sanityCheck(int(float(kg.strip('kilogram'))))
# now assume number sans unit
num = int(float(cleaned))
if num < 90:
# assume kg
# return sanityCheck(nearest2(num))
# no binning
return sanityCheck(num)
else:
# assume lb
# return sanityCheck(nearest2(lb_to_kg(num)))
# no binning
return sanityCheck(lb_to_kg(num))
except Exception, e:
return None
def person_weight_uri(cleaned):
if cleaned:
return "person_weight/%s" % cleaned
# name 10042
def clean_name(x):
x = toTitleCaseCleaned(x)
if isSymbol(x[0:1]):
return ''
return x
def person_name_uri(cleaned):
if cleaned:
cleaned = cleaned.strip().replace(" ", "_").lower()
return "person_name/%s" % cleaned
return ''
def author_uri(cleaned):
if cleaned:
cleaned = cleaned.strip().replace(" ", "_").lower()
return "author/%s" % cleaned
return ''
# tattoos 8614
def clean_tattoos(x):
stripped = x.strip().lower()
return alphaOnly(stripped)
def person_tattoocount_uri(cleaned):
if cleaned:
return "person_tattoocount/%s" % cleaned
# grooming 5709
def clean_grooming(x):
stripped = x.strip().lower()
return alphaOnly(stripped)
def person_grooming_uri(cleaned):
if cleaned:
return "person_grooming/%s" % cleaned
# implants 5469
def clean_implants(x):
stripped = x.strip().lower()
return alphaOnly(stripped)
def person_implantspresent_uri(cleaned):
if cleaned:
return "person_implantspresent/%s" % cleaned
# username 5209
def clean_username(x):
stripped = x.strip().lower()
return alphaOnly(stripped)
def person_username_uri(cleaned):
if cleaned:
cleaned = cleaned.strip().replace(" ", "_").lower()
return "person_username/%s" % cleaned
return ''
def person_blackhat_username_uri(cleaned):
if cleaned:
cleaned = cleaned.strip().replace(" ", "_").lower()
return "person_blackhat_username/%s" % cleaned
return ''
# travel 4727
def clean_travel(x):
stripped = x.strip().lower()
return alphaOnly(stripped)
def person_travel_uri(cleaned):
if cleaned:
return "person_travel/%s" % cleaned
# waist 2468
# waist_samples = ["24 inches", "28\"", "70cm", "70 cm", "26.5", "svelte", "24-25"]
def clean_waist(x):
"copied from bust"
def inch_to_cm(inch):
return int(inch*2.54)
def sanityCheck(cm):
if cm >= 40 and cm <= 200:
return cm
else:
return None
try:
stripped = x.strip().lower()
stripped = stripped.replace(" ","")
first = re.split("-", stripped)[0]
first = first.strip()
except:
pass
try:
cleaned = first
inch = cleaned.strip('es')
inch = inch.strip('s')
# now try for just inches
if inch.endswith("inch"):
# return sanityCheck(nearest2(inch_to_cm(int(float(inch.strip('inch'))))))
# no binning
return sanityCheck(inch_to_cm(int(float(inch.strip('inch')))))
if inch.endswith('in'):
# return sanityCheck(nearest2(inch_to_cm(int(float(inch.strip('in'))))))
# no binning
return sanityCheck(inch_to_cm(int(float(inch.strip('in')))))
if inch.endswith('"'):
# return sanityCheck(nearest2(inch_to_cm(int(float(inch.strip('"'))))))
# no binning
return sanityCheck(inch_to_cm(int(float(inch.strip('"')))))
# now cm
cm = cleaned.strip('s')
if cm.endswith("cm"):
# return sanityCheck(nearest2(int(float(cm.strip('cm')))))
# no binning
return sanityCheck(int(float(cm.strip('cm'))))
if cm.endswith('centimeter'):
# return sanityCheck(nearest2(int(float(cm.strip('centimeter')))))
# no binning
return sanityCheck(int(float(cm.strip('centimeter'))))
# now assume number sans unit
num = int(float(cleaned))
if num >= 60:
# assume cm
# return sanityCheck(nearest2(num))
# no binning
return sanityCheck(num)
else:
# assume inch
# return sanityCheck(nearest2(inch_to_cm(num)))
# no binning
return sanityCheck(inch_to_cm(num))
except Exception, e:
return None
def person_waistsize_uri(cleaned):
"""in cm
unmarked waist < 60 is interpreted as in, >=60 as cm"""
if cleaned:
return "person_waistsize/" + str(cleaned)
else:
return None
# hips 2400
def clean_hips(x):
stripped = x.strip().lower()
return numericOnly(stripped)
def person_hipstype_uri(cleaned):
if cleaned:
return "person_hipstype/%s" % cleaned
# alias 2049
def clean_alias(x):
stripped = x.strip().lower()
return alphaOnly(stripped)
def person_alias_uri(cleaned):
if cleaned:
return "person_alias/%s" % cleaned
# availability 2049
#availability_samples = ["Incall", "Outcall", "Incall Outcall"]
def clean_availability(x):
stripped = x.strip().lower()
return alphaOnly(stripped)
def person_incalloutcall_uri(cleaned):
if cleaned:
return "person_incalloutcall/%s" % cleaned
def get_url_hash(string):
return hashlib.sha1(string.encode('utf-8')).hexdigest().upper()
def getCacheBaseUrl():
return "http://dig.isi.edu/data/"
def getHTBaseUrl():
return "http://dig.isi.edu/ht/data/"
def getMRSBaseUrl():
return "http://dig.isi.edu/mrs/data/"
# zip 2734
def clean_zip(x):
stripped = x.strip().lower()
return numericOnly(stripped)
def place_zipcode_uri(cleaned):
if cleaned:
return "place_zipcode/%s" % cleaned
# for stanford only
#location_samples = ["Iowa", "New York", "arlington"]
def clean_location(x):
stripped = x.strip().lower()
stripped = alphaNumeric(stripped).strip()
return toTitleCaseCleaned(stripped)
def place_location_uri(cleaned):
if cleaned:
cleaned = cleaned.replace(" ","_").lower()
return "place_location/%s" % cleaned
def feature_address(city, state, country):
return clean_address(city, state, country, ", ")
def clean_city(city):
return clean_location(city)
def clean_state(state, country):
state = clean_location(state)
if country:
state = standardize_state_name(country, state)
else:
state_us = standardize_state_name("US", state)
if len(state_us) > 0:
state = state_us
if len(state) == 2:
state = state.upper() #upper case state code
return state
def clean_country(country):
country = clean_location(country)
country = standardize_country_name(country)
return country
def country_code(country):
country = clean_location(country)
country = standardize_country_code(country)
return country
def clean_address(city, state, country, sep):
city = clean_location(city)
usep = ""
addr = ""
if city:
addr = city
usep = sep
country = clean_location(country)
country = standardize_country_name(country)
state = clean_location(state)
state = standardize_state_name(country, state)
if state:
addr = addr + usep + state
usep = sep
if country:
addr = addr + usep + country
return addr
def address_uri(city, state, country):
addr = clean_address(city, state, country, "-").strip()
if len(addr) > 0:
addr = addr.replace(" ", "_").lower()
return "address/" + addr
return ''
def country_uri(country):
country = clean_location(country)
country = standardize_country_name(country)
if country:
cc = country.replace(" ", "_").lower()
return "country/" + cc
return ''
def clean_website(website):
x = nonWhitespace(website)
if x:
return x.lower()
return ''
def website_uri(website):
if len(website) > 0:
uri = quote(website, safe='')
return "website/" + uri
return ''
def gender_uri(gender):
cg = clean_gender(gender)
if (len(cg)>0):
return "person_gender/" + cg
return ''
def emailaddress_uri(email):
c = clean_email(email)
if (len(c) > 0):
qc = quote(c, safe='')
return "emailaddress/" + qc
return ''
content_registeries = ["application", "audio", "example", "image",
"message", "model", "multipart", "text", "video"]
def clean_content_type(type):
idx = type.find("/")
if idx:
reg = type[0:idx]
if reg in content_registeries:
return type
return ''
def content_type_uri(cleaned):
if cleaned:
return "content_type/%s" % cleaned
return ''
def clean_content_length(clen):
return numericOnly(clen)
def content_length_uri(cleaned):
if cleaned:
return "content_length/%s" % cleaned
return ''
def publication_year_uri(cleaned):
if cleaned:
return "publication_year/%s" % cleaned
return ''
def clean_organization(org):
x = toTitleCaseCleaned(org)
if isSymbol(x[0:1]):
return ''
return x
def organization_name_uri(cleaned):
if cleaned:
for_uri = cleaned.replace(" ", "_").lower()
return "organization/name/%s" % for_uri
return ''
def provider_uri(cleaned):
if cleaned:
x = cleaned.replace(" ", "_").lower()
return "provider/%s" % x
return ''
def cluster_body_uri(uri):
if uri:
return "cluster/body/lsh/%s" % uri
return ''
mapFunctions = defaultdict(lambda x: None)
mapFunctions['phone'] = clean_phone
mapFunctions['age'] = clean_age
mapFunctions['email'] = clean_email
mapFunctions['gender'] = clean_gender
mapFunctions['rate'] = clean_rate
mapFunctions['rate15'] = clean_rate15
mapFunctions['rate30'] = clean_rate30
mapFunctions['rate60'] = clean_rate
mapFunctions['ethnicity'] = clean_ethnicity
mapFunctions['height'] = clean_height
mapFunctions['hair'] = clean_hair
mapFunctions['build'] = clean_build
mapFunctions['cup'] = clean_cup
mapFunctions['bust'] = clean_bust
mapFunctions['piercings'] = lambda x: pipeList(clean_piercings(x))
mapFunctions['creditcards'] = clean_creditcards
mapFunctions['hairlength'] = clean_hairlength
mapFunctions['hairtype'] = clean_hairtype
mapFunctions['eyes'] = clean_eyes
mapFunctions['weight'] = clean_weight
mapFunctions['name'] = clean_name
mapFunctions['tattoos'] = clean_tattoos
mapFunctions['grooming'] = clean_grooming
mapFunctions['implants'] = clean_implants
mapFunctions['username'] = clean_username
mapFunctions['travel'] = clean_travel
mapFunctions['zip'] = clean_zip
mapFunctions['waist'] = clean_waist
mapFunctions['hips'] = clean_hips
mapFunctions['alias'] = clean_alias
mapFunctions['availability'] = clean_availability
mapFunctions['location'] = clean_location
mapFunctions['userlocation'] = clean_location
def feature_value(attributeName, value):
try:
ret = mapFunctions[attributeName](value)
if ret == None:
ret = ''
return ret
except Exception, e:
return ''
uriFunction = defaultdict(lambda x: None)
uriFunction["phonenumber"] = phonenumber_uri
uriFunction["person_age"] = person_age_uri
uriFunction["emailaddress"] = emailaddress_uri
uriFunction["person_gender"] = person_gender_uri
uriFunction["person_ethnicity"] = person_ethnicity_uri
uriFunction["person_height"] = person_height_uri
uriFunction["person_haircolor"] = person_haircolor_uri
uriFunction["person_build"] = person_build_uri
uriFunction["person_cupsizeus"] = person_cupsizeus_uri
uriFunction["person_bustbandsize"] = person_bustbandsize_uri
uriFunction["person_piercings"] = person_piercings_uri
uriFunction["creditcardaccepted"] = creditcardaccepted_uri
uriFunction["person_hairlength"] = person_hairlength_uri
uriFunction["person_hairtype"] = person_hairtype_uri
uriFunction["person_eyecolor"] = person_eyecolor_uri
uriFunction["person_weight"] = person_weight_uri
uriFunction["person_name"] = person_name_uri
uriFunction["person_tattoocount"] = person_tattoocount_uri
uriFunction["person_grooming"] = person_grooming_uri
uriFunction["person_implantspresent"] = person_implantspresent_uri
uriFunction["person_username"] = person_username_uri
uriFunction["person_travel"] = person_travel_uri
uriFunction["place_zipcode"] = place_zipcode_uri
uriFunction["person_waistsize"] = person_waistsize_uri
uriFunction["person_hipstype"] = person_hipstype_uri
uriFunction["person_alias"] = person_alias_uri
uriFunction["person_incalloutcall"] = person_incalloutcall_uri
uriFunction["place_location"] = place_location_uri
uriFunction["rate"] = rate_uri
def feature_uri(cleanAttributeName, cleanValue):
try:
ret = uriFunction[cleanAttributeName](cleanValue)
if ret == None:
ret = ''
return ret
except Exception, e:
return ''
def feature_value(attributeName, value):
try:
ret = mapFunctions[attributeName](value)
if ret == None:
ret = ''
return ret
except Exception, e:
return ''
# Pedro
attribute_to_feature = {}
attribute_to_feature['phone'] = "phonenumber"
attribute_to_feature['age'] = "person_age"
attribute_to_feature['email'] = "emailaddress"
attribute_to_feature['gender'] = "person_gender"
attribute_to_feature['ethnicity'] = "person_ethnicity"
attribute_to_feature['height'] = "person_height"
attribute_to_feature['hair'] = "person_haircolor"
attribute_to_feature['build'] = "person_build"
attribute_to_feature['cup'] = "person_cupsizeus"
attribute_to_feature['bust'] = "person_bustbandsize"
attribute_to_feature['piercings'] = "person_piercings"
attribute_to_feature['creditcards'] = "creditcardaccepted"
attribute_to_feature['hairlength'] = "person_hairlength"
attribute_to_feature['hairtype'] = "person_hairtype"
attribute_to_feature['eyes'] = "person_eyecolor"
attribute_to_feature['weight'] = "person_weight"
attribute_to_feature['name'] = "person_name"
attribute_to_feature['tattoos'] = "person_tattoocount"
attribute_to_feature['grooming'] = "person_grooming"
attribute_to_feature['implants'] = "person_implantspresent"
attribute_to_feature['username'] = "person_username"
attribute_to_feature['travel'] = "person_travel"
attribute_to_feature['zip'] = "place_zipcode"
attribute_to_feature['waist'] = "person_waistsize"
attribute_to_feature['hips'] = "person_hipstype"
attribute_to_feature['alias'] = "person_alias"
attribute_to_feature['availability'] = "person_incalloutcall"
attribute_to_feature['location'] = "place_location"
attribute_to_feature['rate15'] = "rate"
attribute_to_feature['rate30'] = "rate"
attribute_to_feature['rate60'] = "rate"
def feature_name(attribute_name):
"""Note: this overrides a specific feature function"""
try:
ret = attribute_to_feature[attribute_name]
if ret == None:
ret = ''
return ret
except Exception, e:
return ''
def feature_mod_time(feature_name, feature_value, mod_time):
try:
if len(feature_value) > 0:
stripped = feature_value.strip()
if (len(stripped)>0):
return mod_time
return ''
except Exception, e:
return ''
|
|
# Lint as: python3
# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for flexbuffers.py."""
import array
import os.path
import struct
import unittest
from flatbuffers import flexbuffers
Type = flexbuffers.Type
LOG2 = {1: 0, 2: 1, 4: 2, 8: 3}
GOLD_FLEXBUFFER_OBJ = {
'bar': [1, 2, 3],
'bar3': [1, 2, 3],
'bool': True,
'bools': [True, False, True, False],
'foo': 100.0,
'mymap': {'foo': 'Fred'},
'vec': [-100, 'Fred', 4.0, b'M', False, 4.0]
}
GOLD_FLEXBUFFER_FILE = 'gold_flexbuffer_example.bin'
def read_test_file(name):
with open(os.path.join(os.path.dirname(__file__), name), 'rb') as f:
return f.read()
def packed_type(type_, i):
return (type_ << 2) | LOG2[i]
def uint_size(value):
"""Returns number of bytes (power of two) to represent unsigned value."""
assert value >= 0
n = 8
while not value < (1 << n):
n *= 2
return n // 8
def int_size(value):
"""Returns number of bytes (power of two) to represent signed value."""
n = 8
while not -(1 << (n - 1)) <= value < (1 << (n - 1)):
n *= 2
return n // 8
def uint_sizes(value):
return tuple(1 << i for i in range(LOG2[uint_size(value)], 4))
def int_sizes(value):
return tuple(1 << i for i in range(LOG2[int_size(value)], 4))
def int_bytes(value, byte_width):
return struct.pack({1: 'b', 2: 'h', 4: 'i', 8: 'q'}[byte_width], value)
def uint_bytes(value, byte_width):
return struct.pack({1: 'B', 2: 'H', 4: 'I', 8: 'Q'}[byte_width], value)
def float_bytes(value, byte_width):
return struct.pack({4: 'f', 8: 'd'}[byte_width], value)
def min_value(type_, byte_width):
assert byte_width > 0
if type_ in (Type.INT, Type.INDIRECT_INT):
return -(1 << (8 * byte_width - 1))
elif type_ in (Type.UINT, Type.INDIRECT_UINT):
return 0
else:
raise ValueError('Unsupported type %s' % type_)
def max_value(type_, byte_width):
assert byte_width > 0
if type_ in (Type.INT, Type.INDIRECT_INT):
return (1 << (8 * byte_width - 1)) - 1
elif type_ in (Type.UINT, Type.INDIRECT_UINT):
return (1 << 8 * byte_width) - 1
else:
raise ValueError('Unsupported type %s' % type_)
def str_bytes(value, byte_width):
value_bytes = value.encode('utf-8')
return [*uint_bytes(len(value_bytes), byte_width), *value_bytes, 0]
def key_bytes(value):
return [*value.encode('ascii'), 0]
def encode_type(type_, value, byte_width=None):
fbb = flexbuffers.Builder()
add = fbb.Adder(type_)
if byte_width:
add(value, byte_width)
else:
add(value)
return fbb.Finish()
INT_MIN_MAX_VALUES = (min_value(Type.INT, 1), max_value(Type.INT, 1),
min_value(Type.INT, 2), max_value(Type.INT, 2),
min_value(Type.INT, 4), max_value(Type.INT, 4),
min_value(Type.INT, 8), max_value(Type.INT, 8))
UINT_MIN_MAX_VALUES = (0, max_value(Type.UINT, 1), max_value(Type.UINT, 2),
max_value(Type.UINT, 4), max_value(Type.UINT, 8))
class UtilTest(unittest.TestCase):
"""Tests to check FlexBuffer utility functions."""
def _test_type_predicate(self, pred, types):
for type_ in types:
with self.subTest(type=type_, pred=pred):
self.assertTrue(pred(type_))
for type_ in set(Type).difference(types):
with self.subTest(type=type_, pred=pred):
self.assertFalse(pred(type_))
def test_inline_types(self):
self._test_type_predicate(
Type.IsInline, (Type.NULL, Type.INT, Type.UINT, Type.FLOAT, Type.BOOL))
def test_typed_vector(self):
self._test_type_predicate(
Type.IsTypedVector,
(Type.VECTOR_INT, Type.VECTOR_UINT, Type.VECTOR_FLOAT, Type.VECTOR_KEY,
Type.VECTOR_STRING_DEPRECATED, Type.VECTOR_BOOL))
self._test_type_predicate(
Type.IsTypedVectorElementType,
(Type.INT, Type.UINT, Type.FLOAT, Type.KEY, Type.STRING, Type.BOOL))
with self.assertRaises(ValueError):
Type.ToTypedVectorElementType(Type.VECTOR)
self.assertIs(Type.ToTypedVectorElementType(Type.VECTOR_INT), Type.INT)
self.assertIs(Type.ToTypedVectorElementType(Type.VECTOR_UINT), Type.UINT)
self.assertIs(Type.ToTypedVectorElementType(Type.VECTOR_FLOAT), Type.FLOAT)
self.assertIs(Type.ToTypedVectorElementType(Type.VECTOR_KEY), Type.KEY)
self.assertIs(
Type.ToTypedVectorElementType(Type.VECTOR_STRING_DEPRECATED),
Type.STRING)
self.assertIs(Type.ToTypedVectorElementType(Type.VECTOR_BOOL), Type.BOOL)
with self.assertRaises(ValueError):
Type.ToTypedVector(Type.VECTOR)
self.assertIs(Type.ToTypedVector(Type.INT), Type.VECTOR_INT)
self.assertIs(Type.ToTypedVector(Type.UINT), Type.VECTOR_UINT)
self.assertIs(Type.ToTypedVector(Type.FLOAT), Type.VECTOR_FLOAT)
self.assertIs(Type.ToTypedVector(Type.KEY), Type.VECTOR_KEY)
self.assertIs(
Type.ToTypedVector(Type.STRING), Type.VECTOR_STRING_DEPRECATED)
self.assertIs(Type.ToTypedVector(Type.BOOL), Type.VECTOR_BOOL)
def test_fixed_typed_vector(self):
self._test_type_predicate(
Type.IsFixedTypedVector,
(Type.VECTOR_INT2, Type.VECTOR_UINT2, Type.VECTOR_FLOAT2,
Type.VECTOR_INT3, Type.VECTOR_UINT3, Type.VECTOR_FLOAT3,
Type.VECTOR_INT4, Type.VECTOR_UINT4, Type.VECTOR_FLOAT4))
self._test_type_predicate(Type.IsFixedTypedVectorElementType,
(Type.INT, Type.UINT, Type.FLOAT))
self.assertEqual(
Type.ToFixedTypedVectorElementType(Type.VECTOR_INT2), (Type.INT, 2))
self.assertEqual(
Type.ToFixedTypedVectorElementType(Type.VECTOR_UINT2), (Type.UINT, 2))
self.assertEqual(
Type.ToFixedTypedVectorElementType(Type.VECTOR_FLOAT2), (Type.FLOAT, 2))
self.assertEqual(
Type.ToFixedTypedVectorElementType(Type.VECTOR_INT3), (Type.INT, 3))
self.assertEqual(
Type.ToFixedTypedVectorElementType(Type.VECTOR_UINT3), (Type.UINT, 3))
self.assertEqual(
Type.ToFixedTypedVectorElementType(Type.VECTOR_FLOAT3), (Type.FLOAT, 3))
self.assertEqual(
Type.ToFixedTypedVectorElementType(Type.VECTOR_INT4), (Type.INT, 4))
self.assertEqual(
Type.ToFixedTypedVectorElementType(Type.VECTOR_UINT4), (Type.UINT, 4))
self.assertEqual(
Type.ToFixedTypedVectorElementType(Type.VECTOR_FLOAT4), (Type.FLOAT, 4))
# Invalid size
for type_ in Type.INT, Type.UINT, Type.FLOAT:
with self.assertRaises(ValueError):
Type.ToTypedVector(type_, 1)
with self.assertRaises(ValueError):
Type.ToTypedVector(type_, 5)
# Invalid element type
for length in 1, 2, 3, 4, 5:
with self.assertRaises(ValueError):
Type.ToTypedVector(Type.STRING, length)
self.assertIs(Type.ToTypedVector(Type.INT, 2), Type.VECTOR_INT2)
self.assertIs(Type.ToTypedVector(Type.INT, 3), Type.VECTOR_INT3)
self.assertIs(Type.ToTypedVector(Type.INT, 4), Type.VECTOR_INT4)
self.assertIs(Type.ToTypedVector(Type.UINT, 2), Type.VECTOR_UINT2)
self.assertIs(Type.ToTypedVector(Type.UINT, 3), Type.VECTOR_UINT3)
self.assertIs(Type.ToTypedVector(Type.UINT, 4), Type.VECTOR_UINT4)
self.assertIs(Type.ToTypedVector(Type.FLOAT, 2), Type.VECTOR_FLOAT2)
self.assertIs(Type.ToTypedVector(Type.FLOAT, 3), Type.VECTOR_FLOAT3)
self.assertIs(Type.ToTypedVector(Type.FLOAT, 4), Type.VECTOR_FLOAT4)
def test_width(self):
for x in range(1 << 10):
self.assertEqual(flexbuffers.BitWidth.U(x), LOG2[uint_size(x)])
for x in range(-(1 << 10), 1 << 10):
self.assertEqual(flexbuffers.BitWidth.I(x), LOG2[int_size(x)])
def test_padding(self):
self.assertEqual(flexbuffers._PaddingBytes(0, 4), 0)
self.assertEqual(flexbuffers._PaddingBytes(0, 8), 0)
self.assertEqual(flexbuffers._PaddingBytes(0, 16), 0)
self.assertEqual(flexbuffers._PaddingBytes(1, 8), 7)
self.assertEqual(flexbuffers._PaddingBytes(17, 8), 7)
self.assertEqual(flexbuffers._PaddingBytes(42, 2), 0)
class DecoderTest(unittest.TestCase):
"""Tests to check FlexBuffer decoding functions.
Common variable names used in the tests for compactness:
bw: byte_width
ebw: element_byte_width
kbw: key_byte_width
vbw: value_byte_width
tbw: type_byte_width
Having '_ignored' suffix means that variable doesn't affect the constructed
byte buffer size.
"""
def test_null(self):
for bw in 1, 2, 4, 8:
for ebw_ignored in 1, 2, 4, 8:
with self.subTest(bw=bw, ebw_ignored=ebw_ignored):
data = bytes([
*uint_bytes(0, bw),
packed_type(Type.NULL, ebw_ignored),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsNull)
self.assertEqual(root.AsBool, False)
self.assertEqual(root.AsInt, 0)
self.assertEqual(root.AsFloat, 0.0)
for prop in (type(root).AsKey, type(root).AsString, type(root).AsBlob,
type(root).AsVector, type(root).AsTypedVector,
type(root).AsFixedTypedVector, type(root).AsMap):
with self.assertRaises(TypeError):
prop.fget(root)
self.assertEqual(root.Value, None)
self.assertIsNone(flexbuffers.Loads(data))
def test_bool(self):
for value in False, True:
for bw in 1, 2, 4, 8:
for ebw_ignored in 1, 2, 4, 8:
with self.subTest(bw=bw, ebw_ignored=ebw_ignored):
data = bytes([
*uint_bytes(int(value), bw),
packed_type(Type.BOOL, ebw_ignored),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsBool)
self.assertEqual(root.AsBool, value)
self.assertEqual(root.AsInt, int(value))
self.assertEqual(root.AsFloat, float(value))
for prop in (type(root).AsKey, type(root).AsString,
type(root).AsBlob,
type(root).AsVector, type(root).AsTypedVector,
type(root).AsFixedTypedVector, type(root).AsMap):
with self.assertRaises(TypeError):
prop.fget(root)
self.assertEqual(root.Value, value)
self.assertEqual(flexbuffers.Loads(data), value)
def test_mutate_bool(self):
root = flexbuffers.GetRoot(flexbuffers.Dumps(True))
self.assertTrue(root.IsBool)
self.assertTrue(root.AsBool)
self.assertTrue(root.MutateBool(False))
self.assertTrue(root.IsBool)
self.assertFalse(root.AsBool)
self.assertTrue(root.MutateBool(True))
self.assertTrue(root.IsBool)
self.assertTrue(root.AsBool)
def _check_int(self, data, value):
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsInt)
self.assertEqual(root.AsInt, value)
self.assertEqual(root.AsBool, bool(value))
self.assertEqual(root.AsFloat, float(value))
for prop in (type(root).AsKey, type(root).AsString, type(root).AsBlob,
type(root).AsVector, type(root).AsTypedVector,
type(root).AsFixedTypedVector, type(root).AsMap):
with self.assertRaises(TypeError):
prop.fget(root)
self.assertEqual(root.Value, value)
self.assertEqual(flexbuffers.Loads(data), value)
def test_int(self):
for value in (0, 1, -1, 15, -17, *INT_MIN_MAX_VALUES):
for bw in int_sizes(value):
for ebw_ignored in 1, 2, 4, 8:
with self.subTest(value=value, bw=bw, ebw_ignored=ebw_ignored):
data = bytes([
*int_bytes(value, bw),
packed_type(Type.INT, ebw_ignored),
bw,
])
self._check_int(data, value)
def test_indirect_int(self):
for value in (0, 1, -1, 15, -17, *INT_MIN_MAX_VALUES):
for bw in 1, 2, 4, 8:
for ebw in int_sizes(value):
with self.subTest(value=value, bw=bw, ebw=ebw):
data = bytes([
# Int
*int_bytes(value, ebw),
# Root
*uint_bytes(ebw, bw),
packed_type(Type.INDIRECT_INT, ebw),
bw,
])
self._check_int(data, value)
def test_uint(self):
for value in (1, *UINT_MIN_MAX_VALUES):
for bw in uint_sizes(value):
for ebw_ignored in 1, 2, 4, 8:
with self.subTest(value=value, bw=bw, ebw_ignored=ebw_ignored):
data = bytes([
*uint_bytes(value, bw),
packed_type(Type.UINT, ebw_ignored),
bw,
])
self._check_int(data, value)
def test_inidirect_uint(self):
for value in (1, *UINT_MIN_MAX_VALUES):
for bw in 1, 2, 4, 8:
for ebw in uint_sizes(value):
with self.subTest(value=value, bw=bw, ebw=ebw):
data = bytes([
# UInt
*uint_bytes(value, ebw),
# Root
*uint_bytes(ebw, bw),
packed_type(Type.INDIRECT_UINT, ebw),
bw,
])
self._check_int(data, value)
def test_mutate_ints(self):
# Signed
for type_ in Type.INT, Type.INDIRECT_INT:
with self.subTest(type=type_):
root = flexbuffers.GetRoot(encode_type(type_, 56))
self.assertEqual(root.AsInt, 56)
for new_value in 0, 1, -1, -128, 127:
self.assertTrue(root.MutateInt(new_value))
self.assertEqual(root.AsInt, new_value)
for new_value in -129, 128:
self.assertFalse(root.MutateInt(new_value))
# Unsigned
for type_ in Type.UINT, Type.INDIRECT_UINT:
with self.subTest(type=type_):
root = flexbuffers.GetRoot(encode_type(type_, 1))
self.assertEqual(root.AsInt, 1)
for new_value in 0, 1, 255:
self.assertTrue(root.MutateInt(new_value))
self.assertEqual(root.AsInt, new_value)
self.assertFalse(root.MutateInt(256))
# Inside vector
fbb = flexbuffers.Builder()
fbb.VectorFromElements([13, 0, -15])
data = fbb.Finish()
self.assertEqual(flexbuffers.Loads(data), [13, 0, -15])
self.assertTrue(flexbuffers.GetRoot(data).AsVector[0].MutateInt(0))
self.assertTrue(flexbuffers.GetRoot(data).AsVector[1].MutateInt(-7))
self.assertTrue(flexbuffers.GetRoot(data).AsVector[2].MutateInt(45))
self.assertEqual(flexbuffers.Loads(data), [0, -7, 45])
# Inside map
fbb = flexbuffers.Builder()
fbb.MapFromElements({'x': -7, 'y': 46})
data = fbb.Finish()
self.assertEqual(flexbuffers.Loads(data), {'x': -7, 'y': 46})
self.assertTrue(flexbuffers.GetRoot(data).AsMap['x'].MutateInt(14))
self.assertTrue(flexbuffers.GetRoot(data).AsMap['y'].MutateInt(-1))
self.assertEqual(flexbuffers.Loads(data), {'x': 14, 'y': -1})
def _check_float(self, data, value):
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsFloat)
self.assertAlmostEqual(root.AsFloat, value)
for prop in (type(root).AsKey, type(root).AsString, type(root).AsBlob,
type(root).AsVector, type(root).AsTypedVector,
type(root).AsFixedTypedVector, type(root).AsMap):
with self.assertRaises(TypeError):
prop.fget(root)
self.assertAlmostEqual(root.Value, value)
self.assertAlmostEqual(flexbuffers.Loads(data), value)
def test_float(self):
for value in -1.0, 0.0, 1.0, 3.141592, 1.5e6:
for bw in 4, 8:
for ebw_ignored in 1, 2, 4, 8:
with self.subTest(value=value, bw=bw, ebw_ignored=ebw_ignored):
data = bytes([
*float_bytes(value, bw),
packed_type(Type.FLOAT, ebw_ignored),
bw,
])
self._check_float(data, value)
def test_indirect_float(self):
for value in -1.0, 0.0, 1.0, 3.141592, 1.5e6:
for bw in 1, 2, 4, 8:
for ebw in 4, 8:
with self.subTest(value=value, bw=bw, ebw=ebw):
data = bytes([
# Float
*float_bytes(value, ebw),
# Root
*uint_bytes(ebw, bw),
packed_type(Type.INDIRECT_FLOAT, ebw),
bw,
])
self._check_float(data, value)
def test_mutate_float(self):
for type_ in Type.FLOAT, Type.INDIRECT_FLOAT:
for bw in 4, 8:
value = 3.141592
root = flexbuffers.GetRoot(encode_type(type_, value, bw))
self.assertAlmostEqual(root.AsFloat, value)
value = 2.71828
self.assertTrue(root.MutateFloat(value))
self.assertAlmostEqual(root.AsFloat, value, places=5)
# Inside vector
data = flexbuffers.Dumps([2.4, 1.5, -7.2])
self.assertTrue(flexbuffers.GetRoot(data).AsVector[0].MutateFloat(0.0))
self.assertTrue(flexbuffers.GetRoot(data).AsVector[1].MutateFloat(15.2))
self.assertTrue(flexbuffers.GetRoot(data).AsVector[2].MutateFloat(-5.1))
for a, b in zip(flexbuffers.Loads(data), [0.0, 15.2, -5.1]):
self.assertAlmostEqual(a, b)
def test_string(self):
for value in 'red', 'green', 'blue', 'flatbuffers + flexbuffers':
value_bytes = value.encode('utf-8')
for bw in 1, 2, 4, 8:
for lbw in 1, 2, 4, 8:
with self.subTest(bw=bw, lbw=lbw):
data = bytes([
# String
*uint_bytes(len(value_bytes), lbw),
*value_bytes,
0,
# Root
*uint_bytes(len(value_bytes) + 1, bw), # offset
packed_type(Type.STRING, lbw),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsString)
self.assertEqual(root.AsString, value)
self.assertEqual(root.Value, value)
self.assertEqual(root.AsInt, len(value))
self.assertEqual(flexbuffers.Loads(data), value)
def test_mutate_string(self):
data = encode_type(Type.STRING, '12345')
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsString)
self.assertEqual(root.AsString, '12345')
self.assertFalse(root.MutateString('543210'))
self.assertTrue(root.MutateString('54321'))
self.assertTrue(root.IsString)
self.assertEqual(root.AsString, '54321')
self.assertTrue(root.MutateString('543'))
self.assertTrue(root.IsString)
self.assertEqual(root.AsString, '543')
self.assertFalse(root.MutateString('54321'))
def test_empty_blob(self):
for bw in 1, 2, 4, 8:
for lbw in 1, 2, 4, 8:
with self.subTest(bw=bw, lbw=lbw):
data = bytes([
# Blob
*uint_bytes(0, lbw),
# Root
*uint_bytes(0, bw),
packed_type(Type.BLOB, lbw),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsBlob)
self.assertEqual(root.AsBlob, bytes())
self.assertEqual(root.Value, bytes())
self.assertEqual(flexbuffers.Loads(data), bytes())
def test_blob(self):
for blob in [], [215], [23, 75, 124, 0, 45, 15], 255 * [0]:
for bw in 1, 2, 4, 8:
for lbw in 1, 2, 4, 8:
with self.subTest(blob=blob, bw=bw, lbw=lbw):
data = bytes([
# Blob
*uint_bytes(len(blob), lbw),
*blob,
# Root
*uint_bytes(len(blob), bw),
packed_type(Type.BLOB, lbw),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsBlob)
self.assertEqual(root.AsBlob, bytes(blob))
self.assertEqual(root.Value, bytes(blob))
self.assertEqual(flexbuffers.Loads(data), bytes(blob))
def test_key(self):
for value in '', 'x', 'color':
for bw in 1, 2, 4, 8:
with self.subTest(value=value, bw=bw):
value_bytes = value.encode('ascii')
data = bytes([
# Key
*value_bytes,
0,
# Root
*uint_bytes(len(value_bytes) + 1, bw),
packed_type(Type.KEY, 1),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsKey)
self.assertEqual(root.AsKey, value)
self.assertEqual(root.Value, value)
self.assertEqual(flexbuffers.Loads(data), value)
def _check_fixed_typed_vector(self, data, vector, type_):
self.assertEqual(flexbuffers.Loads(data), vector)
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsFixedTypedVector)
v = root.AsFixedTypedVector
self.assertEqual(len(v), len(vector))
self.assertIs(v.ElementType, type_)
self.assertEqual([e.Value for e in v], vector)
self.assertSequenceEqual(v.Value, vector)
self.assertEqual(root.AsInt, len(vector))
def test_fixed_typed_vector_float(self):
for type_, vector in ((Type.VECTOR_FLOAT2, [-75.0, 34.89]),
(Type.VECTOR_FLOAT3, [-75.0, 34.89, 12.0]),
(Type.VECTOR_FLOAT4, [-75.0, 34.89, -1.0, 1.0])):
for bw in 1, 2, 4, 8:
for ebw in 4, 8:
with self.subTest(type=type_, vector=vector, bw=bw, ebw=ebw):
data = bytes([
# FixedTypedVector
*b''.join(float_bytes(e, ebw) for e in vector),
# Root
*uint_bytes(len(vector) * ebw, bw),
packed_type(type_, ebw),
bw,
])
for a, b in zip(flexbuffers.Loads(data), vector):
self.assertAlmostEqual(a, b, places=2)
def test_fixed_typed_vector_int(self):
for type_, vector in ((Type.VECTOR_INT2, [0, -13]), (Type.VECTOR_INT3,
[127, 0, -13]),
(Type.VECTOR_INT4, [127, 0, -13, 0])):
for bw in 1, 2, 4, 8:
for ebw in 1, 2, 4, 8:
with self.subTest(type=type_, vector=vector, bw=bw, ebw=ebw):
data = bytes([
# FixedTypeVector
*b''.join(int_bytes(e, ebw) for e in vector),
# Root
*uint_bytes(ebw * len(vector), bw),
packed_type(type_, ebw),
bw,
])
self._check_fixed_typed_vector(data, vector, Type.INT)
def test_fixed_typed_vector_uint(self):
for type_, vector in ((Type.VECTOR_UINT2, [0, 13]),
(Type.VECTOR_UINT3, [127, 0, 13]), (Type.VECTOR_UINT4,
[127, 0, 13, 0])):
for bw in 1, 2, 4, 8:
for ebw in 1, 2, 4, 8:
with self.subTest(type=type_, vector=vector, bw=bw, ebw=ebw):
data = bytes([
# FixedTypeVector
*b''.join(uint_bytes(e, ebw) for e in vector),
# Root
*uint_bytes(ebw * len(vector), bw),
packed_type(type_, ebw),
bw,
])
self._check_fixed_typed_vector(data, vector, Type.UINT)
def _check_typed_vector(self, data, vector, type_):
self.assertEqual(flexbuffers.Loads(data), vector)
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsTypedVector)
v = root.AsTypedVector
self.assertIs(v.ElementType, type_)
self.assertEqual(len(v), len(vector))
self.assertEqual([e.Value for e in v], vector)
self.assertSequenceEqual(v.Value, vector)
self.assertEqual(root.AsInt, len(vector))
def test_empty_typed_vector(self):
for type_ in (Type.VECTOR_BOOL, Type.VECTOR_INT, Type.VECTOR_UINT,
Type.VECTOR_FLOAT, Type.VECTOR_KEY,
Type.VECTOR_STRING_DEPRECATED):
for bw in 1, 2, 4, 8:
for ebw in 1, 2, 4, 8:
with self.subTest(type=type_, bw=bw, ebw=ebw):
data = bytes([
# TypedVector[type_]
*uint_bytes(0, ebw),
# Root
*uint_bytes(0, bw),
packed_type(type_, ebw),
bw
])
element_type = Type.ToTypedVectorElementType(type_)
if element_type == Type.STRING:
element_type = Type.KEY
self._check_typed_vector(data, [], element_type)
def test_typed_vector_bool(self):
vector = [True, False, False, False, True]
for bw in 1, 2, 4, 8:
for ebw in 1, 2, 4, 8:
with self.subTest(bw=bw, ebw=ebw):
data = bytes([
# TypedVector[Type.BOOL]
*uint_bytes(len(vector), ebw),
*b''.join(uint_bytes(int(e), ebw) for e in vector),
# Root
*uint_bytes(len(vector) * ebw, bw),
packed_type(Type.VECTOR_BOOL, ebw),
bw,
])
self._check_typed_vector(data, vector, Type.BOOL)
def test_typed_vector_int(self):
vector = [-100, 200, -300]
for bw in 1, 2, 4, 8:
for ebw in 2, 4, 8:
with self.subTest(bw=bw, ebw=ebw):
data = bytes([
# TypedVector[Type.INT]
*uint_bytes(len(vector), ebw),
*b''.join(int_bytes(e, ebw) for e in vector),
# Root
*uint_bytes(len(vector) * ebw, bw),
packed_type(Type.VECTOR_INT, ebw),
bw,
])
self._check_typed_vector(data, vector, Type.INT)
def test_typed_vector_uint(self):
vector = [100, 200, 300, 400, 0]
for bw in 1, 2, 4, 8:
for ebw in 2, 4, 8:
with self.subTest(bw=bw, ebw=ebw):
data = bytes([
# TypedVector[Type.UINT]
*uint_bytes(len(vector), ebw),
*b''.join(int_bytes(e, ebw) for e in vector),
# Root
*uint_bytes(len(vector) * ebw, bw),
packed_type(Type.VECTOR_UINT, ebw),
bw,
])
self._check_typed_vector(data, vector, Type.UINT)
def test_typed_vector_float(self):
vector = [3.64, -6.36, 3.14, 634.0, -42.0]
for bw in 1, 2, 4, 8:
for ebw in 4, 8:
with self.subTest(bw=bw, ebw=ebw):
data = bytes([
# TypedVector[Type.FLOAT]
*uint_bytes(len(vector), ebw),
*b''.join(float_bytes(e, ebw) for e in vector),
# Root
*uint_bytes(ebw * len(vector), bw),
packed_type(Type.VECTOR_FLOAT, ebw),
bw,
])
for a, b in zip(flexbuffers.Loads(data), vector):
self.assertAlmostEqual(a, b, places=2)
def test_typed_vector_key(self):
vector = ['red', 'green', 'blue']
for bw in 1, 2, 4, 8:
for ebw in 1, 2, 4, 8:
with self.subTest(bw=bw, ebw=ebw):
data = bytes([
# Keys
*key_bytes(vector[0]),
*key_bytes(vector[1]),
*key_bytes(vector[2]),
# TypedVector[Type.KEY]
*uint_bytes(len(vector), ebw),
*uint_bytes(15 + 1 * ebw, ebw), # offset to vector[0]
*uint_bytes(11 + 2 * ebw, ebw), # offset to vector[1]
*uint_bytes(5 + 3 * ebw, ebw), # offset to vector[2]
# Root
*uint_bytes(len(vector) * ebw, bw), # offset to vector
packed_type(Type.VECTOR_KEY, ebw),
bw,
])
self._check_typed_vector(data, vector, Type.KEY)
def test_typed_vector_string(self):
vector = ['red', 'green', 'blue']
for bw in 1, 2, 4, 8:
for ebw in 1, 2, 4, 8:
with self.subTest(bw=bw, ebw=ebw):
data = bytes([
# Strings
*str_bytes(vector[0], 1), # 5 bytes
*str_bytes(vector[1], 1), # 7 bytes
*str_bytes(vector[2], 1), # 6 bytes
# TypedVector[Type.STRING]
*uint_bytes(len(vector), ebw),
*uint_bytes(17 + 1 * ebw, ebw), # offset to vector[0]
*uint_bytes(12 + 2 * ebw, ebw), # offset to vector[1]
*uint_bytes(5 + 3 * ebw, ebw), # offset to vector[2]
# Root
*uint_bytes(len(vector) * ebw, bw), # offset to vector
packed_type(Type.VECTOR_STRING_DEPRECATED, ebw),
bw,
])
# We have to pass Type.KEY because of Type.VECTOR_STRING_DEPRECATED.
self._check_typed_vector(data, vector, Type.KEY)
def test_typed_vector_string_deprecated(self):
# Check FlexBuffersDeprecatedTest() inside test.cpp for details.
vector = [300 * 'A', 'test']
fbb = flexbuffers.Builder()
with fbb.TypedVector():
for e in vector:
fbb.String(e)
data = fbb.Finish()
# We have to pass Type.KEY because of Type.VECTOR_STRING_DEPRECATED.
self._check_typed_vector(data, vector, Type.KEY)
def test_typed_vector_invalid(self):
fbb = flexbuffers.Builder()
with self.assertRaises(RuntimeError):
fbb.TypedVectorFromElements(['string', 423])
def test_empty_vector(self):
for bw in 1, 2, 4, 8:
for ebw in 1, 2, 4, 8:
data = bytes([
*uint_bytes(0, ebw),
# Root
*uint_bytes(0, bw),
packed_type(Type.VECTOR, ebw),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsVector)
self.assertEqual(len(root.AsVector), 0)
self.assertEqual(flexbuffers.Loads(data), [])
def test_vector1(self):
vector = [300, 400, 500]
for bw in 1, 2, 4, 8:
for ebw in 2, 4, 8:
for tbw_ignored in 1, 2, 4, 8:
with self.subTest(bw=bw, ebw=ebw, ignore=tbw_ignored):
data = bytes([
# Vector length
*uint_bytes(len(vector), ebw),
# Vector elements
*int_bytes(vector[0], ebw),
*int_bytes(vector[1], ebw),
*int_bytes(vector[2], ebw),
# Vector types
packed_type(Type.INT, tbw_ignored),
packed_type(Type.INT, tbw_ignored),
packed_type(Type.INT, tbw_ignored),
# Root
*uint_bytes(ebw * len(vector) + len(vector), bw),
packed_type(Type.VECTOR, ebw),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsVector)
self.assertFalse(root.IsMap)
v = root.AsVector
self.assertEqual(len(v), len(vector))
for i in range(len(v)):
self.assertTrue(v[i].IsInt)
self.assertEqual(v[i].AsInt, vector[i])
for i, e in enumerate(v):
self.assertTrue(e.IsInt)
self.assertEqual(e.AsInt, vector[i])
with self.assertRaises(IndexError):
v[-1].AsInt # pylint: disable=pointless-statement
with self.assertRaises(IndexError):
v[3].AsInt # pylint: disable=pointless-statement
with self.assertRaises(TypeError):
root.AsMap # pylint: disable=pointless-statement
self.assertEqual(root.AsInt, len(vector))
self.assertEqual(root.AsFloat, float(len(vector)))
self.assertEqual(flexbuffers.Loads(data), vector)
def test_vector2(self):
vector = [1984, 'August', True]
for bw in 1, 2, 4, 8:
with self.subTest(bw=bw):
data = bytes([
*str_bytes(vector[1], 1),
# Vector
*uint_bytes(len(vector), 2),
*int_bytes(vector[0], 2),
*uint_bytes(11, 2), # offset to 'August'
*uint_bytes(int(vector[2]), 2),
packed_type(Type.INT, 2),
packed_type(Type.STRING, 1),
packed_type(Type.BOOL, 2),
# Root
*uint_bytes(2 * len(vector) + len(vector), bw), # offset to vector
packed_type(Type.VECTOR, 2),
bw,
])
self.assertEqual(flexbuffers.Loads(data), vector)
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsVector)
v = root.AsVector
self.assertTrue(v[0].IsInt)
self.assertEqual(v[0].AsInt, 1984)
self.assertTrue(v[1].IsString)
self.assertEqual(v[1].AsString, 'August')
self.assertTrue(v[2].IsBool)
self.assertTrue(v[2].AsBool)
self.assertEqual(v.Value, vector)
self.assertEqual(root.AsInt, len(vector))
def test_empty_map(self):
for bw in 1, 2, 4, 8:
for kbw in 1, 2, 4, 8:
for vbw in 1, 2, 4, 8:
data = bytes([
*uint_bytes(0, kbw), # Keys length
*uint_bytes(0, vbw),
*uint_bytes(kbw, vbw),
*uint_bytes(0, vbw), # Values length
# Root
*uint_bytes(0, bw),
packed_type(Type.MAP, vbw),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsMap)
self.assertEqual(len(root.AsMap), 0)
self.assertEqual(flexbuffers.Loads(data), {})
def test_map(self):
value = {'foo': 13, 'bar': 14}
for bw in 1, 2, 4, 8:
for kbw in 1, 2, 4, 8:
for vbw in 1, 2, 4, 8:
with self.subTest(kbw=kbw, vbw=vbw, bw=bw):
data = bytes([
*key_bytes('foo'), # 4 bytes
*key_bytes('bar'), # 4 bytes
# Map
*uint_bytes(len(value), kbw),
*uint_bytes(4 + 1 * kbw, kbw), # offset to 'bar'
*uint_bytes(8 + 2 * kbw, kbw), # offset to 'foo'
*uint_bytes(len(value) * kbw, vbw), # offset to keys
*uint_bytes(kbw, vbw),
*uint_bytes(len(value), vbw),
*int_bytes(value['bar'], vbw),
*int_bytes(value['foo'], vbw),
packed_type(Type.INT, vbw),
packed_type(Type.INT, vbw),
# Root
*uint_bytes(vbw * len(value) + len(value),
bw), # offset to values
packed_type(Type.MAP, vbw),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsMap)
m = root.AsMap
self.assertEqual(len(m), 2)
self.assertEqual(m[0].AsInt, 14)
self.assertEqual(m[1].AsInt, 13)
self.assertEqual(m['bar'].AsInt, 14)
self.assertEqual(m['foo'].AsInt, 13)
for invalid_key in 'a', 'b', 'no':
with self.assertRaises(KeyError):
m[invalid_key] # pylint: disable=pointless-statement
values = m.Values
self.assertEqual(len(values), 2)
self.assertEqual(values[0].AsInt, 14)
self.assertEqual(values[1].AsInt, 13)
keys = m.Keys
self.assertEqual(len(keys), 2)
self.assertEqual(len(keys[0].AsKey), 3)
self.assertEqual(keys[0].AsKey, 'bar')
self.assertEqual(len(keys[1].AsKey), 3)
self.assertEqual(keys[1].AsKey, 'foo')
keys = [key.AsKey for key in keys]
self.assertEqual(sorted(keys), keys)
self.assertEqual(root.AsInt, len(value))
self.assertEqual(flexbuffers.Loads(data), value)
def test_alignment(self):
value = ['test', 7]
data = bytes([
*key_bytes('test'), # 5 bytes: 'test' and \0
0,
0,
0, # 3 bytes: alignment
# Vector
*uint_bytes(len(value), byte_width=8),
*uint_bytes(16, byte_width=8),
*uint_bytes(7, byte_width=8),
packed_type(Type.KEY, 1),
packed_type(Type.INT, 8),
# Root
*uint_bytes(8 * len(value) + len(value), 1),
packed_type(Type.VECTOR, 8),
1,
])
self.assertEqual(flexbuffers.Loads(data), value)
class EncoderTest(unittest.TestCase):
"""Tests to check FlexBuffer encoding functions."""
def test_null(self):
def encode_null():
fbb = flexbuffers.Builder()
fbb.Null()
return fbb.Finish()
self.assertIsNone(flexbuffers.Loads(encode_null()))
def test_bool(self):
for value in False, True:
data = encode_type(Type.BOOL, value)
self.assertEqual(flexbuffers.Loads(data), value)
def test_int(self):
for byte_width in 1, 2, 4, 8:
for type_ in Type.INT, Type.INDIRECT_INT, Type.UINT, Type.INDIRECT_UINT:
with self.subTest(byte_width=byte_width, type=type_):
value = min_value(type_, byte_width)
data = encode_type(type_, value)
self.assertEqual(flexbuffers.Loads(data), value)
value = max_value(type_, byte_width)
data = encode_type(type_, value)
self.assertEqual(flexbuffers.Loads(data), value)
def test_float(self):
for value in 3.141592, 7.62, 999.99:
for type_ in Type.FLOAT, Type.INDIRECT_FLOAT:
with self.subTest(value=value, type=type_):
data = encode_type(type_, value)
self.assertEqual(flexbuffers.Loads(data), value)
data = encode_type(type_, value, 4)
self.assertAlmostEqual(flexbuffers.Loads(data), value, places=4)
data = encode_type(type_, value, 8)
self.assertEqual(flexbuffers.Loads(data), value)
def test_string(self):
for value in '', 'x', 'color', 'hello world':
with self.subTest(value=value):
data = encode_type(Type.STRING, value)
self.assertEqual(flexbuffers.Loads(data), value)
def test_blob(self):
for value in bytes(), bytes([240, 12, 143, 7]), bytes(1000 * [17]):
with self.subTest(value=value):
data = encode_type(Type.BLOB, value)
self.assertEqual(flexbuffers.Loads(data), value)
def test_key(self):
for value in '', 'color', 'hello world':
with self.subTest(value=value):
data = encode_type(Type.KEY, value)
self.assertEqual(flexbuffers.Loads(data), value)
with self.assertRaises(ValueError):
encode_type(Type.KEY, (b'\x00' * 10).decode('ascii'))
def test_vector(self):
def encode_vector(elements, element_type):
fbb = flexbuffers.Builder()
with fbb.Vector():
add = fbb.Adder(element_type)
for e in elements:
add(e)
return fbb.Finish()
def encode_vector_from_elements(elements):
fbb = flexbuffers.Builder()
fbb.VectorFromElements(elements)
return fbb.Finish()
for elements in [], [1435], [56, 23, 0, 6783]:
data = encode_vector(elements, Type.INT)
self.assertEqual(flexbuffers.Loads(data), elements)
data = encode_vector_from_elements(elements)
self.assertEqual(flexbuffers.Loads(data), elements)
# Elements of different type: one by one
elements = [56.0, 'flexbuffers', 0, False, 75123]
fbb = flexbuffers.Builder()
with fbb.Vector():
fbb.Float(elements[0])
fbb.String(elements[1])
fbb.UInt(elements[2], 8)
fbb.Bool(elements[3])
fbb.Int(elements[4])
data = fbb.Finish()
self.assertEqual(flexbuffers.Loads(data), elements)
# Elements of different type: all at once
fbb = flexbuffers.Builder()
fbb.VectorFromElements(elements)
data = fbb.Finish()
self.assertEqual(flexbuffers.Loads(data), elements)
def test_nested_vectors(self):
fbb = flexbuffers.Builder()
with fbb.Vector():
fbb.String('begin')
fbb.IndirectInt(42)
with fbb.Vector():
for i in range(5):
fbb.Int(i)
fbb.String('end')
data = fbb.Finish()
self.assertEqual(
flexbuffers.Loads(data), ['begin', 42, [0, 1, 2, 3, 4], 'end'])
def test_big_vector(self):
n = 10 * 1000
fbb = flexbuffers.Builder()
with fbb.Vector():
for i in range(n):
fbb.Int(i)
self.assertEqual(flexbuffers.Loads(fbb.Finish()), list(range(n)))
def test_typed_vector(self):
def encode_typed_vector_from_elements(elements, element_type=None):
fbb = flexbuffers.Builder()
fbb.TypedVectorFromElements(elements, element_type)
return fbb.Finish()
for elements in [], [False], [True], [False, True, True, False, False]:
data = encode_typed_vector_from_elements(elements, Type.BOOL)
self.assertEqual(flexbuffers.Loads(data), elements)
data = encode_typed_vector_from_elements(elements)
self.assertEqual(flexbuffers.Loads(data), elements)
for elements in [], [23455], [351, -2, 0, 6783, 0, -10]:
data = encode_typed_vector_from_elements(elements, Type.INT)
self.assertEqual(flexbuffers.Loads(data), elements)
data = encode_typed_vector_from_elements(elements)
self.assertEqual(flexbuffers.Loads(data), elements)
for elements in [], [23455], [351, 2, 0, 6783, 0, 10]:
data = encode_typed_vector_from_elements(elements)
self.assertEqual(flexbuffers.Loads(data), elements)
data = encode_typed_vector_from_elements(elements, Type.INT)
self.assertEqual(flexbuffers.Loads(data), elements)
data = encode_typed_vector_from_elements(elements, Type.UINT)
self.assertEqual(flexbuffers.Loads(data), elements)
for elements in [], [7.0], [52.0, 51.2, 70.0, -4.0]:
data = encode_typed_vector_from_elements(elements, Type.FLOAT)
self.assertEqual(flexbuffers.Loads(data), elements)
data = encode_typed_vector_from_elements(elements)
self.assertEqual(flexbuffers.Loads(data), elements)
for elements in [], ['color'], ['x', 'y']:
data = encode_typed_vector_from_elements(elements, Type.KEY)
self.assertEqual(flexbuffers.Loads(data), elements)
data = encode_typed_vector_from_elements(elements)
self.assertEqual(flexbuffers.Loads(data), elements)
def test_typed_vector_from_array(self):
def encode_array(typecode, values):
fbb = flexbuffers.Builder()
fbb.VectorFromElements(array.array(typecode, values))
return fbb.Finish()
values = [1.0, 3.14, -2.54, 0.0]
data = encode_array('f', values)
for a, b in zip(flexbuffers.Loads(data), values):
self.assertAlmostEqual(a, b, places=2)
values = [1.0, 3.14, -2.54, 0.0]
data = encode_array('d', values)
self.assertEqual(flexbuffers.Loads(data), values)
values = [1, -7, 9, 26, 12]
data = encode_array('i', values)
self.assertEqual(flexbuffers.Loads(data), values)
values = [0, 1, 2, 3, 4, 5, 6]
data = encode_array('I', values)
self.assertEqual(flexbuffers.Loads(data), values)
def test_fixed_typed_vector(self):
def encode_fixed_typed_vector(elements, element_type=None):
fbb = flexbuffers.Builder()
fbb.FixedTypedVectorFromElements(elements, element_type)
return fbb.Finish()
for elements in ((-2, 2), (1, 2, 3), (100, -100, 200, -200), (4.0, 7.0),
(0.0, 1.0, 8.0), (9.0, 7.0, 1.0, 5.5)):
with self.subTest(elements=elements):
data = encode_fixed_typed_vector(elements)
self.assertSequenceEqual(flexbuffers.Loads(data), elements)
elements = [-170, 432, 0, -7]
data = encode_fixed_typed_vector(elements, Type.INT)
self.assertSequenceEqual(flexbuffers.Loads(data), elements)
with self.assertRaises(ValueError):
encode_fixed_typed_vector([]) # Invalid input length
with self.assertRaises(ValueError):
encode_fixed_typed_vector([1]) # Invalid input length
with self.assertRaises(ValueError):
encode_fixed_typed_vector([1, 2, 3, 4, 5]) # Invalid input length
with self.assertRaises(TypeError):
encode_fixed_typed_vector([1, 1.0]) # Invalid input types
with self.assertRaises(TypeError):
encode_fixed_typed_vector(['', '']) # Invalid input types
def test_map_builder(self):
def get_keys(data):
return [key.AsKey for key in flexbuffers.GetRoot(data).AsMap.Keys]
# Empty map
fbb = flexbuffers.Builder()
with fbb.Map():
pass
data = fbb.Finish()
self.assertEqual(flexbuffers.Loads(data), {})
# Two-element map of Int
fbb = flexbuffers.Builder()
with fbb.Map():
fbb.Int('y', -2)
fbb.Int('x', 10)
data = fbb.Finish()
self.assertEqual(flexbuffers.Loads(data), {'x': 10, 'y': -2})
# Multiple-element map of vectors
fbb = flexbuffers.Builder()
with fbb.Map():
with fbb.Vector('v'):
fbb.Int(45)
with fbb.TypedVector('tv'):
fbb.Int(-7)
fbb.FixedTypedVectorFromElements('ftv', [-2.0, 1.0])
data = fbb.Finish()
self.assertEqual(
flexbuffers.Loads(data), {
'v': [45],
'tv': [-7],
'ftv': [-2.0, 1.0]
})
keys = get_keys(data)
self.assertEqual(sorted(keys), keys)
# Multiple-element map of different types
fbb = flexbuffers.Builder()
with fbb.Map():
fbb.Null('n')
fbb.Bool('b', False)
fbb.Int('i', -27)
fbb.UInt('u', 27)
fbb.Float('f', -0.85)
fbb.String('s', 'String')
fbb.Blob('bb', b'data')
fbb.IndirectInt('ii', -9500)
fbb.IndirectUInt('iu', 540)
fbb.IndirectFloat('if', 0.0)
fbb.VectorFromElements('v', [2, 1, 0.0])
fbb.TypedVectorFromElements('tv', [2, 1, 0])
fbb.FixedTypedVectorFromElements('ftv', [2.0, -6.0])
data = fbb.Finish()
self.assertEqual(
flexbuffers.Loads(data), {
'n': None,
'b': False,
'i': -27,
'u': 27,
'f': -0.85,
's': 'String',
'bb': b'data',
'ii': -9500,
'iu': 540,
'if': 0.0,
'v': [2, 1, 0.0],
'tv': [2, 1, 0],
'ftv': [2.0, -6.0]
})
keys = get_keys(data)
self.assertEqual(sorted(keys), keys)
def test_map_python(self):
maps = [
{},
{
'key': 'value'
},
{
'x': None,
'y': 3400,
'z': -7040
},
{
'zzz': 100,
'aaa': 5.0,
'ccc': ['Test', 32, False, None, True]
},
{
'name': ['John', 'Smith'],
'valid': True,
'note': None,
'address': {
'lines': [175, 'Alhambra'],
'city': 'San Francisco',
'zip': 94123,
},
},
]
for m in maps:
self.assertEqual(flexbuffers.Loads(flexbuffers.Dumps(m)), m)
def test_gold_from_file(self):
data = read_test_file(GOLD_FLEXBUFFER_FILE)
self.assertEqual(flexbuffers.Loads(data), GOLD_FLEXBUFFER_OBJ)
def test_gold_from_builder(self):
fbb = flexbuffers.Builder()
with fbb.Map():
with fbb.Vector('vec'):
fbb.Int(-100)
fbb.String('Fred')
fbb.IndirectFloat(4.0)
i_f = fbb.LastValue
fbb.Blob(bytes([77]))
fbb.Bool(False)
fbb.ReuseValue(i_f)
vec = [1, 2, 3]
fbb.VectorFromElements('bar', vec)
fbb.FixedTypedVectorFromElements('bar3', [1, 2, 3])
fbb.VectorFromElements('bools', [True, False, True, False])
fbb.Bool('bool', True)
fbb.Float('foo', 100)
with fbb.Map('mymap'):
fbb.String('foo', 'Fred')
data = fbb.Finish()
self.assertEqual(flexbuffers.Loads(data), GOLD_FLEXBUFFER_OBJ)
def test_min_bit_width(self):
fbb = flexbuffers.Builder(force_min_bit_width=flexbuffers.BitWidth.W8)
fbb.TypedVectorFromElements([0, 1, 0, 1, 0])
data = fbb.Finish()
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsTypedVector)
self.assertEqual(root.AsTypedVector.ByteWidth, 1)
fbb = flexbuffers.Builder(force_min_bit_width=flexbuffers.BitWidth.W32)
fbb.TypedVectorFromElements([0, 1, 0, 1, 0])
data = fbb.Finish()
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsTypedVector)
self.assertEqual(root.AsTypedVector.ByteWidth, 4)
def test_share_keys(self):
def encode_key_vector(value, count, share_keys):
fbb = flexbuffers.Builder(share_keys=share_keys)
with fbb.Vector():
for _ in range(count):
fbb.Key(value)
return fbb.Finish(), fbb.KeyPool.Elements
data, pool = encode_key_vector('test', 10, share_keys=False)
self.assertEqual(len(pool), 0)
self.assertEqual(len(data), 74)
self.assertEqual(flexbuffers.Loads(data), 10 * ['test'])
data, pool = encode_key_vector('test', 10, share_keys=True)
self.assertEqual(len(pool), 1)
self.assertEqual(pool[0], 'test'.encode('ascii'))
self.assertEqual(len(data), 29)
self.assertEqual(flexbuffers.Loads(data), 10 * ['test'])
def test_share_strings(self):
def encode_string_vector(value, count, share_strings):
fbb = flexbuffers.Builder(share_strings=share_strings)
with fbb.Vector():
for _ in range(count):
fbb.String(value)
return fbb.Finish(), fbb.StringPool.Elements
data, pool = encode_string_vector('test', 10, share_strings=False)
self.assertEqual(len(pool), 0)
self.assertEqual(len(data), 84)
self.assertEqual(flexbuffers.Loads(data), 10 * ['test'])
data, pool = encode_string_vector('test', 10, share_strings=True)
self.assertEqual(len(pool), 1)
self.assertEqual(pool[0], 'test'.encode('utf-8'))
self.assertEqual(len(data), 30)
self.assertEqual(flexbuffers.Loads(data), 10 * ['test'])
def test_invalid_stack_size(self):
fbb = flexbuffers.Builder()
with self.assertRaises(RuntimeError):
fbb.Finish()
fbb.Int(100)
fbb.Int(200)
with self.assertRaises(RuntimeError):
fbb.Finish()
fbb.Clear()
fbb.Int(420)
fbb.Finish()
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
import time
from requests.compat import quote
from .rssreadertools import getBasicConfig
ReaderBasicConfig = getBasicConfig()
class ItemsContainer(object):
"""
A base class used for all classes aimed to have items (Categories and Feeds)
"""
def __init__(self):
self.items = []
self.itemsById = {}
self.lastLoadOk = False
self.lastLoadLength = 0
self.lastUpdated = None
self.unread = 0
self.continuation = None
def _getContent(self, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Get content from google reader with specified parameters.
Must be overladed in inherited clases
"""
return None
def loadItems(self, excludeRead=False, loadLimit=20, since=None, until=None):
"""
Load items and call itemsLoadedDone to transform data in objects
"""
self.clearItems()
self.loadtLoadOk = False
self.lastLoadLength = 0
self._itemsLoadedDone(self._getContent(excludeRead, None, loadLimit, since, until))
def loadMoreItems(self, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Load more items using the continuation parameters of previously loaded items.
"""
self.lastLoadOk = False
self.lastLoadLength = 0
if not continuation and not self.continuation:
return
self._itemsLoadedDone(self._getContent(excludeRead, continuation or self.continuation, loadLimit, since, until))
def _itemsLoadedDone(self, data):
"""
Called when all items are loaded
"""
if data is None:
return
self.continuation = data.get('continuation', None)
self.lastUpdated = data.get('updated', None)
self.lastLoadLength = len(data.get('items', []))
self.googleReader.itemsToObjects(self, data.get('items', []))
self.lastLoadOk = True
def _addItem(self, item):
self.items.append(item)
self.itemsById[item.id] = item
def getItem(self, id):
return self.itemsById[id]
def clearItems(self):
self.items = []
self.itemsById = {}
self.continuation = None
def getItems(self):
return self.items
def countItems(self, excludeRead=False):
if excludeRead:
sum([1 for item in self.items if item.isUnread()])
else:
return len(self.items)
def markItemRead(self, item, read):
if read and item.isUnread():
self.unread -= 1
elif not read and item.isRead():
self.unread += 1
def markAllRead(self):
self.unread = 0
for item in self.items:
item.read = True
item.canUnread = False
result = self.googleReader.markFeedAsRead(self)
return result.upper() == 'OK'
def countUnread(self):
self.unread = self.countItems(excludeRead=True)
class Category(ItemsContainer):
"""
Class for representing a category
"""
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return "<%s (%d), %s>" % (self.label, self.unread, self.id)
def __init__(self, googleReader, label, id):
"""
:param label: (str)
:param id: (str)
"""
super(Category, self).__init__()
self.googleReader = googleReader
self.label = label
self.id = id
self.feeds = []
self.fetchUrl = ReaderBasicConfig.CATEGORY_URL + Category.urlQuote(self.label)
def _addFeed(self, feed):
if not feed in self.feeds:
self.feeds.append(feed)
try:
self.unread += feed.unread
except:
pass
def getFeeds(self):
return self.feeds
def _getContent(self, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
return self.googleReader.getCategoryContent(self, excludeRead, continuation, loadLimit, since, until)
def countUnread(self):
self.unread = sum([feed.unread for feed in self.feeds])
def toArray(self):
pass
def toJSON(self):
pass
@staticmethod
def urlQuote(string):
""" Quote a string for being used in a HTTP URL """
return quote(string.encode("utf-8"))
class BaseFeed(ItemsContainer):
"""
Class for representing a special feed.
"""
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return "<%s, %s>" % (self.title, self.id)
def __init__(self, googleReader, title, id, unread, categories=[]):
"""
:param title: (str, name of the feed)
:param id: (str, id for google reader)
:param unread: (int, number of unread items, 0 by default)
:param categories: (list) - list of all categories a feed belongs to, can be empty
"""
super(BaseFeed, self).__init__()
self.googleReader = googleReader
self.id = id
self.title = title
self.unread = unread
self.categories = []
for category in categories:
self.addCategory(category)
self.continuation = None
def addCategory(self, category):
if not category in self.categories:
self.categories.append(category)
category._addFeed(self)
def getCategories(self):
return self.categories
def _getContent(self, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
return self.googleReader.getFeedContent(self, excludeRead, continuation, loadLimit, since, until)
def markItemRead(self, item, read):
super(BaseFeed, self).markItemRead(item, read)
for category in self.categories:
category.countUnread()
def markAllRead(self):
self.unread = 0
for category in self.categories:
category.countUnread()
return super(BaseFeed, self).markAllRead()
def toArray(self):
pass
def toJSON(self):
pass
class SpecialFeed(BaseFeed):
"""
Class for representing specials feeds (starred, shared, friends...)
"""
def __init__(self, googleReader, type):
"""
type is one of ReaderBasicConfig.SPECIAL_FEEDS
"""
super(SpecialFeed, self).__init__(
googleReader,
title = type,
id = ReaderBasicConfig.SPECIAL_FEEDS_PART_URL+type,
unread = 0,
categories = [],
)
self.type = type
self.fetchUrl = ReaderBasicConfig.CONTENT_BASE_URL + Category.urlQuote(self.id)
class Feed(BaseFeed):
"""
Class for representing a normal feed.
"""
def __init__(self, googleReader, title, id, siteUrl=None, unread=0, categories=[]):
"""
:param title: str name of the feed
:param id: str, id for google reader
:param siteUrl: str, can be empty
:param unread: int, number of unread items, 0 by default
:param categories: (list) - list of all categories a feed belongs to, can be empty
"""
super(Feed, self).__init__(googleReader, title, id, unread, categories)
self.feedUrl = self.id.lstrip('feed/')
self.siteUrl = siteUrl
self.fetchUrl = ReaderBasicConfig.FEED_URL + Category.urlQuote(self.id)
class Item(object):
"""
Class for representing an individual item (an entry of a feed)
"""
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return '<"%s" by %s, %s>' % (self.title, self.author, self.id)
def __init__(self, googleReader, item, parent):
"""
:param item: An item loaded from json
:param parent: the object (Feed of Category) containing the Item
"""
self.googleReader = googleReader
self.parent = parent
self.data = item # save original data for accessing other fields
self.id = item['id']
self.title = item.get('title', '(no title)')
self.author = item.get('author', None)
self.content = item.get('content', item.get('summary', {}))
if (isinstance(self.content, dict)):
self.content = self.content.get('content', '')
self.origin = { 'title': '', 'url': ''}
if 'crawlTimeMsec' in item:
self.time = int(item['crawlTimeMsec']) // 1000
else:
self.time = None
# check original url
self.url = None
for alternate in item.get('alternate', []):
if alternate.get('type', '') == 'text/html':
self.url = alternate['href']
break
# check status
self.read = False
self.starred = False
self.shared = False
for category in item.get('categories', []):
if category.endswith('/state/com.google/read'):
self.read = True
elif category.endswith('/state/com.google/starred'):
self.starred = True
elif category in ('user/-/state/com.google/broadcast',
'user/%s/state/com.google/broadcast' % self.googleReader.userId):
self.shared = True
self.canUnread = item.get('isReadStateLocked', 'false') != 'true'
self.published = item.get('published', '')
if self.published and self.published != '':
self.published = time.strftime('%m/%d %H:%M',
time.localtime(self.published))
# keep feed, can be used when item is fetched from a special feed, then it's the original one
try:
f = item['origin']
self.origin = {
'title': f.get('title', ''),
'url': f.get('htmlUrl', ''),
}
self.feed = self.googleReader.getFeed(f['streamId'])
if not self.feed:
raise
if not self.feed.title and 'title' in f:
self.feed.title = f['title']
except:
try:
self.feed = Feed(self, f.get('title', ''), f['streamId'], f.get('htmlUrl', None), 0, [])
try:
self.googleReader._addFeed(self.feed)
except:
pass
except:
self.feed = None
self.parent._addItem(self)
def isUnread(self):
return not self.read
def isRead(self):
return self.read
def markRead(self, read=True):
self.parent.markItemRead(self, read)
self.read = read
if read:
result = self.googleReader.addItemTag(self, ReaderBasicConfig.TAG_READ)
else:
result = self.googleReader.removeItemTag(self, ReaderBasicConfig.TAG_READ)
return result.upper() == 'OK'
def markUnread(self, unread=True):
return self.markRead(not unread)
def isShared(self):
return self.shared
def markShared(self, shared=True):
self.shared = shared
if shared:
result = self.googleReader.addItemTag(self, ReaderBasicConfig.TAG_SHARED)
else:
result = self.googleReader.removeItemTag(self, ReaderBasicConfig.TAG_SHARED)
return result.upper() == 'OK'
def share(self):
return self.markShared()
def unShare(self):
return self.markShared(False)
def isStarred(self):
return self.starred
def markStarred(self, starred=True):
self.starred = starred
if starred:
result = self.googleReader.addItemTag(self, ReaderBasicConfig.TAG_STARRED)
else:
result = self.googleReader.removeItemTag(self, ReaderBasicConfig.TAG_STARRED)
return result.upper() == 'OK'
def star(self):
return self.markStarred()
def unStar(self):
return self.markStarred(False)
|
|
import os
import re
import errno
import logging
import shutil
import tempfile
import platform
import datetime
import pytz
from PyQt5.QtCore import Qt, QCoreApplication, QMetaObject, QThreadPool, pyqtSlot, pyqtSignal
from PyQt5.QtWidgets import qApp, QMainWindow, QWidget, QAction, QSizePolicy, QMessageBox, QStyle, QSplitter, \
QToolBar, QStatusBar, QVBoxLayout, QTableWidgetItem, QAbstractItemView, QDialog, QCheckBox, QMenu, QHBoxLayout, \
QLabel, QLineEdit, QPushButton, QFileDialog
from PyQt5.QtGui import QIcon
from deriva.qt import EmbeddedAuthWindow, QPlainTextEditLogger, TableWidget, Task
from deriva.core import ErmrestCatalog, HatracStore, read_config, write_config, format_exception, urlquote, \
resource_path
from launcher.impl.catalog_tasks import CatalogQueryTask, SessionQueryTask, CatalogUpdateTask, WORKLIST_QUERY, \
WORKLIST_UPDATE, WORKLIST_CURATOR_QUERY, WORKLIST_STATUS_UPDATE
from launcher.impl.store_tasks import FileRetrieveTask, FileUploadTask, HATRAC_UPDATE_URL_TEMPLATE
from launcher.impl.process_tasks import ViewerTask
from launcher.ui import DEFAULT_CONFIG, CURATORS
from launcher import resources, __version__ as launcher_version
from synspy import __version__ as synspy_version
# noinspection PyArgumentList
class MainWindow(QMainWindow):
config = None
credential = None
config_path = None
store = None
catalog = None
identity = None
attributes = None
server = None
tempdir = None
progress_update_signal = pyqtSignal(str)
use_3D_viewer = False
curator_mode = False
def __init__(self, config_path=None):
super(MainWindow, self).__init__()
self.ui = MainWindowUI(self)
self.configure(config_path)
self.authWindow = EmbeddedAuthWindow(
self,
config=self.config.get("server"),
cookie_persistence=False,
authentication_success_callback=self.onLoginSuccess
)
self.getSession()
if not self.identity:
self.ui.actionLaunch.setEnabled(False)
self.ui.actionRefresh.setEnabled(False)
self.ui.actionOptions.setEnabled(False)
self.ui.actionLogout.setEnabled(False)
def configure(self, config_path):
# configure logging
self.ui.logTextBrowser.widget.log_update_signal.connect(self.updateLog)
self.ui.logTextBrowser.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logging.getLogger().addHandler(self.ui.logTextBrowser)
logging.getLogger().setLevel(logging.INFO)
# configure Ermrest/Hatrac
if not config_path:
config_path = os.path.join(os.path.expanduser(
os.path.normpath("~/.deriva/synapse/synspy-launcher")), "config.json")
self.config_path = config_path
config = read_config(self.config_path, create_default=True, default=DEFAULT_CONFIG)
protocol = config["server"]["protocol"]
self.server = config["server"]["host"]
catalog_id = config["server"]["catalog_id"]
session_config = config.get("session")
self.catalog = ErmrestCatalog(protocol, self.server, catalog_id, self.credential, session_config=session_config)
self.store = HatracStore(protocol, self.server, self.credential, session_config=session_config)
# create working dir (tempdir)
self.tempdir = tempfile.mkdtemp(prefix="synspy_")
# determine viewer mode
self.use_3D_viewer = True if config.get("viewer_mode", "2d").lower() == "3d" else False
# curator mode?
curator_mode = config.get("curator_mode")
if not curator_mode:
config["curator_mode"] = False
self.curator_mode = config.get("curator_mode")
# save config
self.config = config
write_config(self.config_path, self.config)
def getSession(self):
qApp.setOverrideCursor(Qt.WaitCursor)
self.updateStatus("Validating session.")
queryTask = SessionQueryTask(self.catalog)
queryTask.status_update_signal.connect(self.onSessionResult)
queryTask.query()
def onLoginSuccess(self, **kwargs):
self.authWindow.hide()
self.credential = kwargs["credential"]
self.catalog.set_credentials(self.credential, self.server)
self.store.set_credentials(self.credential, self.server)
self.getSession()
def enableControls(self):
self.ui.actionLaunch.setEnabled(True)
self.ui.actionRefresh.setEnabled(True)
self.ui.actionOptions.setEnabled(self.authWindow.authenticated())
self.ui.actionLogin.setEnabled(not self.authWindow.authenticated())
self.ui.actionLogout.setEnabled(self.authWindow.authenticated())
self.ui.actionExit.setEnabled(True)
self.ui.workList.setEnabled(True)
def disableControls(self):
self.ui.actionLaunch.setEnabled(False)
self.ui.actionRefresh.setEnabled(False)
self.ui.actionOptions.setEnabled(False)
self.ui.actionLogin.setEnabled(False)
self.ui.actionLogout.setEnabled(False)
self.ui.actionExit.setEnabled(False)
self.ui.workList.setEnabled(False)
def closeEvent(self, event=None):
self.disableControls()
self.cancelTasks()
shutil.rmtree(self.tempdir)
if event:
event.accept()
def cancelTasks(self):
Task.shutdown_all()
self.statusBar().showMessage("Waiting for background tasks to terminate...")
while True:
qApp.processEvents()
if QThreadPool.globalInstance().waitForDone(10):
break
self.statusBar().showMessage("All background tasks terminated successfully")
def is_curator(self):
for attr in self.attributes:
if attr.get('id') == CURATORS:
return True
return False
def displayWorklist(self, worklist):
keys = [
"RID",
"RCT",
"Source Image",
"Classifier",
"Due Date",
"Accepted?",
"Status",
"URL",
"Npz URL",
"ZYX Slice",
"Segmentation Mode",
"Segments URL",
"Segments Filtered URL",
"Subject",
]
self.ui.workList.clear()
self.ui.workList.setRowCount(0)
self.ui.workList.setColumnCount(0)
displayed = ["RID", "RCT", "Segmentation Mode", "Classifier", "Due Date", "Accepted?", "Status"]
self.ui.workList.setRowCount(len(worklist))
self.ui.workList.setColumnCount(len(keys))
self.ui.workList.removeAction(self.ui.markIncompleteAction)
if self.is_curator() and self.curator_mode:
self.ui.workList.addAction(self.ui.markIncompleteAction)
rows = 0
for row in worklist:
value = row.get("Status")
if not (value == "analysis pending" or value == "analysis in progress") \
and not (self.is_curator() and self.curator_mode):
self.ui.workList.hideRow(rows)
cols = 0
for key in keys:
item = QTableWidgetItem()
if key == "Classifier":
value = "%s (%s)" % (row['user'][0]['Full_Name'], row['user'][0]['Display_Name'])
item.setData(Qt.UserRole, row['Classifier'])
elif key == "URL" or key == "Subject":
value = row["source_image"][0].get(key)
else:
value = row.get(key)
if isinstance(value, bool):
value = str(value)
if isinstance(value, str) and key == 'RCT':
value = value.replace('T', ' ')[0:19] # drop fractional seconds and TZ
if isinstance(value, str):
item.setText(value)
item.setToolTip(value)
self.ui.workList.setItem(rows, cols, item)
cols += 1
rows += 1
cols = 0
for key in keys:
if key not in displayed:
self.ui.workList.hideColumn(cols)
cols += 1
self.ui.workList.setHorizontalHeaderLabels(keys) # add header names
self.ui.workList.horizontalHeader().setDefaultAlignment(Qt.AlignLeft) # set alignment
for col in range(len(displayed)):
self.ui.workList.resizeColumnToContents(col)
self.ui.workList.sortByColumn(2, Qt.DescendingOrder)
def getCacheDir(self):
cwd = os.getcwd()
cache_dir = os.path.expanduser(self.config.get("cache_dir", cwd))
if not os.path.isdir(cache_dir):
try:
os.makedirs(cache_dir)
except OSError as error:
if error.errno != errno.EEXIST:
logging.error(format_exception(error))
cache_dir = cwd
return cache_dir
def downloadCallback(self, **kwargs):
status = kwargs.get("progress")
if status:
self.progress_update_signal.emit(status)
return True
def uploadCallback(self, **kwargs):
completed = kwargs.get("completed")
total = kwargs.get("total")
file_path = kwargs.get("file_path")
if completed and total:
file_path = " [%s]" % os.path.basename(file_path) if file_path else ""
status = "Uploading file%s: %d%% complete" % (file_path, round(((completed / total) % 100) * 100))
else:
summary = kwargs.get("summary", "")
file_path = "Uploaded file: [%s] " % os.path.basename(file_path) if file_path else ""
status = file_path # + summary
if status:
self.progress_update_signal.emit(status)
return True
def serverProblemMessageBox(self, text, detail):
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setWindowTitle("Confirm Action")
msg.setText(text)
msg.setInformativeText(detail + "\n\nWould you like to remove this item from the current worklist?")
msg.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
ret = msg.exec_()
if ret == QMessageBox.No:
return
else:
row = self.ui.workList.getCurrentTableRow()
self.ui.workList.removeRow(row)
return
def retrieveFiles(self):
# if there is an existing segments file, download it first, otherwise just initiate the input file download
seg_mode = self.ui.workList.getCurrentTableItemTextByName("Segmentation Mode")
segments_url = self.ui.workList.getCurrentTableItemTextByName("Segments Filtered URL")
if segments_url:
segments_filename = 'ROI_%s_%s_only.csv' % (
self.ui.workList.getCurrentTableItemTextByName("RID"), seg_mode)
segments_destfile = os.path.abspath(os.path.join(self.tempdir, segments_filename))
self.updateStatus("Downloading file: [%s]" % segments_destfile)
downloadTask = FileRetrieveTask(self.store)
downloadTask.status_update_signal.connect(self.onRetrieveAnalysisFileResult)
self.progress_update_signal.connect(self.updateProgress)
downloadTask.retrieve(
segments_url,
destfile=segments_destfile,
progress_callback=self.downloadCallback)
else:
self.retrieveInputFile()
def retrieveInputFile(self):
# get the main TIFF file for analysis if not already cached
if self.use_3D_viewer:
url = self.ui.workList.getCurrentTableItemTextByName("URL")
filename = 'Image_%s.ome.tiff' % self.ui.workList.getCurrentTableItemTextByName("Source Image")
else:
url = self.ui.workList.getCurrentTableItemTextByName("Npz URL")
filename = 'ROI_%s.npz' % self.ui.workList.getCurrentTableItemTextByName("RID")
destfile = os.path.abspath(os.path.join(self.getCacheDir(), filename))
if not url and not self.use_3D_viewer:
self.resetUI("Unable to launch 2D viewer due to missing NPZ file for %s." %
self.ui.workList.getCurrentTableItemTextByName("RID"))
self.serverProblemMessageBox(
"2D viewer requires NPZ data to be present!",
"The launcher is currently configured to execute the 2D viewer, which requires NPZ files for input. " +
"No NPZ file could be found on the server for this task.")
return
if not os.path.isfile(destfile):
self.updateStatus("Downloading file: [%s]" % destfile)
downloadTask = FileRetrieveTask(self.store)
downloadTask.status_update_signal.connect(self.onRetrieveInputFileResult)
self.progress_update_signal.connect(self.updateProgress)
downloadTask.retrieve(
url,
destfile=destfile,
progress_callback=self.downloadCallback)
else:
self.onRetrieveInputFileResult(True, "The file [%s] already exists" % destfile, None, destfile)
def getSubprocessPath(self):
executable = "synspy-viewer" if self.use_3D_viewer else "synspy-viewer2d"
base_path = None
return os.path.normpath(resource_path(executable, base_path))
def executeViewer(self, file_path):
self.updateStatus("Executing viewer...")
env = os.environ
env["SYNSPY_AUTO_DUMP_LOAD"] = "true"
env["DUMP_PREFIX"] = "./ROI_%s" % self.ui.workList.getCurrentTableItemTextByName("RID")
env["ZYX_SLICE"] = self.ui.workList.getCurrentTableItemTextByName("ZYX Slice")
env["ZYX_IMAGE_GRID"] = "0.4, 0.26, 0.26"
env["SYNSPY_DETECT_NUCLEI"] = str(
"nucleic" == self.ui.workList.getCurrentTableItemTextByName("Segmentation Mode")).lower()
output_path = os.path.join(os.path.dirname(self.config_path), "viewer.log")
classifier = self.ui.workList.getTableItemByName(
self.ui.workList.getCurrentTableRow(), "Classifier").data(Qt.UserRole)
viewerTask = ViewerTask(self.getSubprocessPath(), self.identity == classifier, proc_output_path=output_path)
viewerTask.status_update_signal.connect(self.onSubprocessExecuteResult)
viewerTask.run(file_path, self.tempdir, env)
def uploadAnalysisResult(self, update_state):
qApp.setOverrideCursor(Qt.WaitCursor)
# generate hatrac upload params
basename = "ROI_%s" % self.ui.workList.getCurrentTableItemTextByName("RID")
match = r"%s_.*\.csv$" % basename
output_files = [f for f in os.listdir(self.tempdir)
if os.path.isfile(os.path.join(self.tempdir, f)) and re.match(match, f)]
if not output_files:
self.resetUI("Could not locate output file from viewer subprocess -- aborting.")
return
seg_mode = self.ui.workList.getCurrentTableItemTextByName("Segmentation Mode")
if seg_mode == "synaptic":
extension = "_synaptic_only.csv"
elif seg_mode == "nucleic":
extension = "_nucleic_only.csv"
else:
self.updateStatus("Unknown segmentation mode \"%s\" -- aborting." % seg_mode)
return
file_name = basename + extension
hatrac_path = HATRAC_UPDATE_URL_TEMPLATE % \
(self.ui.workList.getCurrentTableItemTextByName("Subject"), file_name)
file_path = os.path.abspath(os.path.join(self.tempdir, file_name))
# upload to object store
self.updateStatus("Uploading file %s to server..." % file_name)
self.progress_update_signal.connect(self.updateProgress)
uploadTask = FileUploadTask(self.store)
uploadTask.status_update_signal.connect(self.onUploadFileResult)
uploadTask.upload(hatrac_path, file_path, update_state, callback=self.uploadCallback)
def markIncomplete(self):
RID = self.ui.workList.getCurrentTableItemTextByName("RID")
body = [{"RID": RID, "Status": "analysis in progress"}]
self.updateStatus("Updating task status for %s..." % RID)
updateTask = CatalogUpdateTask(self.catalog)
updateTask.status_update_signal.connect(self.onCatalogUpdateResult)
updateTask.update(WORKLIST_STATUS_UPDATE, json=body)
@pyqtSlot()
def taskTriggered(self):
self.ui.logTextBrowser.widget.clear()
self.disableControls()
@pyqtSlot(str)
def updateProgress(self, status):
self.statusBar().showMessage(status)
@pyqtSlot(str, str)
def updateStatus(self, status, detail=None):
logging.info(status + ((": %s" % detail) if detail else ""))
self.statusBar().showMessage(status)
@pyqtSlot(str, str)
def resetUI(self, status, detail=None):
qApp.restoreOverrideCursor()
self.updateStatus(status, detail)
self.enableControls()
@pyqtSlot(str)
def updateLog(self, text):
self.ui.logTextBrowser.widget.appendPlainText(text)
@pyqtSlot(bool, str, str, object)
def onSessionResult(self, success, status, detail, result):
qApp.restoreOverrideCursor()
if success:
self.identity = result["client"]["id"]
self.attributes = result["attributes"]
display_name = result["client"]["full_name"]
self.setWindowTitle("%s (%s - %s)" % (self.windowTitle(), self.server, display_name))
self.ui.actionLaunch.setEnabled(True)
self.ui.actionLogout.setEnabled(True)
self.ui.actionLogin.setEnabled(False)
if not self.is_curator():
self.curator_mode = self.config["curator_mode"] = False
self.on_actionRefresh_triggered()
else:
self.updateStatus("Login required.")
@pyqtSlot()
def on_actionLaunch_triggered(self):
self.disableControls()
qApp.setOverrideCursor(Qt.WaitCursor)
# create working dir (tempdir)
if self.tempdir:
shutil.rmtree(self.tempdir)
self.tempdir = tempfile.mkdtemp(prefix="synspy_")
self.retrieveFiles()
@pyqtSlot(bool, str, str, object)
def onRetrieveAnalysisFileResult(self, success, status, detail, file_path):
if not success:
try:
os.remove(file_path)
except Exception as e:
logging.warning("Unable to remove file [%s]: %s" % (file_path, format_exception(e)))
self.resetUI(status, detail)
self.serverProblemMessageBox(
"Unable to download required input file",
"The in-progress analysis file was not downloaded successfully.")
return
self.retrieveInputFile()
@pyqtSlot(bool, str, str, object)
def onRetrieveInputFileResult(self, success, status, detail, file_path):
if not success:
try:
os.remove(file_path)
except Exception as e:
logging.warning("Unable to remove file [%s]: %s" % (file_path, format_exception(e)))
self.resetUI(status, detail)
self.serverProblemMessageBox(
"Unable to download required input file",
"The image input file was not downloaded successfully.")
return
self.executeViewer(file_path)
@pyqtSlot(bool, str, str, object)
def onSubprocessExecuteResult(self, success, status, detail, is_owner):
qApp.restoreOverrideCursor()
if not success:
self.resetUI(status, detail)
return
if not bool(is_owner) or self.curator_mode:
self.resetUI(status, detail)
return
# prompt for save/complete/discard
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("Confirm Action")
msg.setText("How would you like to proceed?")
msg.setInformativeText(
"Select \"Save Progress\" to save your progress and upload the output to the server.\n\n"
"Select \"Complete\" to upload the output to the server and mark this task as completed.\n\n"
"Select \"Discard\" to abort the process and leave the task state unchanged.")
saveButton = msg.addButton("Save Progress", QMessageBox.ActionRole)
completeButton = msg.addButton("Complete", QMessageBox.ActionRole)
discardButton = msg.addButton("Discard", QMessageBox.RejectRole)
msg.exec_()
if msg.clickedButton() == discardButton:
self.resetUI("Aborted.")
return
update_state = None
if msg.clickedButton() == saveButton:
update_state = ("incomplete", "analysis in progress")
elif msg.clickedButton() == completeButton:
update_state = ("complete", "analysis complete")
self.uploadAnalysisResult(update_state)
@pyqtSlot(bool, str, str, object)
def onUploadFileResult(self, success, status, detail, result):
if not success:
self.resetUI(status, detail)
self.serverProblemMessageBox(
"Unable to upload required file(s)",
"One or more required files were not uploaded successfully.")
return
state = result[0]
RID = self.ui.workList.getCurrentTableItemTextByName("RID")
body = [{"RID": RID, "Segments Filtered URL": result[1], "Status": state[1]}]
self.updateStatus("Updating task status for %s..." % RID)
updateTask = CatalogUpdateTask(self.catalog)
updateTask.status_update_signal.connect(self.onCatalogUpdateResult)
updateTask.update(WORKLIST_UPDATE, json=body)
@pyqtSlot(bool, str, str, object)
def onCatalogUpdateResult(self, success, status, detail, result):
if not success:
self.resetUI(status, detail)
self.serverProblemMessageBox(
"Unable to update catalog data",
"The catalog state was not updated successfully.")
return
qApp.restoreOverrideCursor()
self.on_actionRefresh_triggered()
@pyqtSlot()
def on_actionRefresh_triggered(self):
if not self.identity:
self.updateStatus("Unable to get worklist -- not logged in.")
return
qApp.setOverrideCursor(Qt.WaitCursor)
self.disableControls()
self.updateStatus("Refreshing worklist...")
queryTask = CatalogQueryTask(self.catalog)
queryTask.status_update_signal.connect(self.onRefreshResult)
if self.is_curator() and self.curator_mode:
queryTask.query(WORKLIST_CURATOR_QUERY)
else:
queryTask.query(WORKLIST_QUERY % urlquote(self.identity, ""))
@pyqtSlot(bool, str, str, object)
def onRefreshResult(self, success, status, detail, result):
if success:
self.displayWorklist(result)
self.resetUI("Ready.")
else:
self.resetUI(status, detail)
if (self.ui.workList.rowCount() > 0) and self.identity:
self.ui.actionLaunch.setEnabled(True)
else:
self.ui.actionLaunch.setEnabled(False)
@pyqtSlot()
def on_actionLogin_triggered(self):
self.authWindow.show()
self.authWindow.login()
@pyqtSlot()
def on_actionLogout_triggered(self):
self.authWindow.logout()
self.setWindowTitle("%s %s (synspy: %s)" % (self.ui.title, launcher_version, synspy_version))
self.ui.workList.clearContents()
self.ui.workList.setRowCount(0)
self.identity = None
self.ui.actionLaunch.setEnabled(False)
self.ui.actionLogout.setEnabled(False)
self.ui.actionLogin.setEnabled(True)
@pyqtSlot()
def on_actionHelp_triggered(self):
pass
@pyqtSlot()
def on_actionOptions_triggered(self):
OptionsDialog.getOptions(self)
@pyqtSlot()
def on_actionExit_triggered(self):
self.closeEvent()
QCoreApplication.quit()
class OptionsDialog(QDialog):
def __init__(self, parent):
super(OptionsDialog, self).__init__(parent)
self.refreshWorklist = False
self.setWindowTitle("Options")
self.setWindowIcon(QIcon(":/images/synapse.png"))
self.setWindowFlags(self.windowFlags() & ~Qt.WindowContextHelpButtonHint)
self.setMinimumWidth(400)
self.resize(400, 100)
layout = QVBoxLayout(self)
self.horizontalLayout = QHBoxLayout()
self.pathLabel = QLabel("Downloads:")
self.horizontalLayout.addWidget(self.pathLabel)
self.pathTextBox = QLineEdit()
self.pathTextBox.setReadOnly(True)
self.cache_path = os.path.expanduser(os.path.normpath(parent.config.get("cache_dir", ".")))
self.pathTextBox.setText(os.path.normpath(self.cache_path))
self.horizontalLayout.addWidget(self.pathTextBox)
self.browseButton = QPushButton("Browse", parent)
self.browseButton.clicked.connect(self.on_actionBrowse_triggered)
self.horizontalLayout.addWidget(self.browseButton)
layout.addLayout(self.horizontalLayout)
curator_mode = QCheckBox("&Curator Mode", parent)
curator_mode.setChecked(parent.curator_mode)
layout.addWidget(curator_mode)
if not parent.is_curator():
curator_mode.setEnabled(False)
curator_mode.toggled.connect(self.onCuratorModeToggled)
use_3D_viewer = QCheckBox("Use &3D Viewer", parent)
use_3D_viewer.setChecked(parent.use_3D_viewer)
layout.addWidget(use_3D_viewer)
if platform.system() != "Linux":
use_3D_viewer.setEnabled(False)
use_3D_viewer.toggled.connect(self.onUse3DViewerToggled)
@pyqtSlot(bool)
def onCuratorModeToggled(self, toggled):
parent = self.parent()
parent.curator_mode = toggled
parent.config["curator_mode"] = toggled
self.refreshWorklist = True
@pyqtSlot(bool)
def onUse3DViewerToggled(self, toggled):
parent = self.parent()
parent.use_3D_viewer = toggled
parent.config["viewer_mode"] = "3D" if toggled else "2D"
@pyqtSlot()
def on_actionBrowse_triggered(self):
parent = self.parent()
dialog = QFileDialog()
path = dialog.getExistingDirectory(self,
"Select Directory",
self.cache_path,
QFileDialog.ShowDirsOnly)
if path:
self.pathTextBox.setText(os.path.normpath(path))
parent.config["cache_dir"] = os.path.normpath(path)
@staticmethod
def getOptions(parent):
dialog = OptionsDialog(parent)
dialog.exec_()
write_config(parent.config_path, parent.config)
if dialog.refreshWorklist:
parent.on_actionRefresh_triggered()
# noinspection PyArgumentList
class MainWindowUI(object):
title = "Synspy Launcher"
def __init__(self, MainWin):
super(MainWindow).__init__()
# Main Window
MainWin.setObjectName("MainWindow")
MainWin.setWindowTitle(MainWin.tr("%s %s (synspy: %s)" % (self.title, launcher_version, synspy_version)))
MainWin.resize(800, 600)
self.centralWidget = QWidget(MainWin)
self.centralWidget.setObjectName("centralWidget")
MainWin.setCentralWidget(self.centralWidget)
self.verticalLayout = QVBoxLayout(self.centralWidget)
self.verticalLayout.setContentsMargins(11, 11, 11, 11)
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
# Splitter for Worklist/Log
self.splitter = QSplitter(Qt.Vertical)
# Table View (Work list)
self.workList = TableWidget(self.centralWidget)
self.workList.setObjectName("tableWidget")
self.workList.setStyleSheet(
"""
QTableWidget {
border: 2px solid grey;
border-radius: 5px;
}
""")
self.workList.setEditTriggers(QAbstractItemView.NoEditTriggers) # use NoEditTriggers to disable editing
self.workList.setAlternatingRowColors(True)
self.workList.setSelectionBehavior(QAbstractItemView.SelectRows)
self.workList.setSelectionMode(QAbstractItemView.SingleSelection)
self.workList.verticalHeader().setDefaultSectionSize(18) # tighten up the row size
self.workList.horizontalHeader().setStretchLastSection(True)
# self.workList.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.workList.setSortingEnabled(True) # allow sorting
self.workList.setContextMenuPolicy(Qt.ActionsContextMenu)
self.workList.doubleClicked.connect(MainWin.on_actionLaunch_triggered)
self.splitter.addWidget(self.workList)
# Log Widget
self.logTextBrowser = QPlainTextEditLogger(self.centralWidget)
self.logTextBrowser.widget.setObjectName("logTextBrowser")
self.logTextBrowser.widget.setStyleSheet(
"""
QPlainTextEdit {
border: 2px solid grey;
border-radius: 5px;
background-color: lightgray;
}
""")
self.splitter.addWidget(self.logTextBrowser.widget)
# add splitter
self.splitter.setSizes([600, 100])
self.verticalLayout.addWidget(self.splitter)
# Actions
# Launch
self.actionLaunch = QAction(MainWin)
self.actionLaunch.setObjectName("actionLaunch")
self.actionLaunch.setText(MainWin.tr("Launch Analysis"))
self.actionLaunch.setToolTip(MainWin.tr("Launch the synspy-viewer process"))
self.actionLaunch.setShortcut(MainWin.tr("Ctrl+L"))
# Refresh
self.actionRefresh = QAction(MainWin)
self.actionRefresh.setObjectName("actionRefresh")
self.actionRefresh.setText(MainWin.tr("Refresh Work List"))
self.actionRefresh.setToolTip(MainWin.tr("Refresh the work list"))
self.actionRefresh.setShortcut(MainWin.tr("Ctrl+R"))
# Options
self.actionOptions = QAction(MainWin)
self.actionOptions.setObjectName("actionOptions")
self.actionOptions.setText(MainWin.tr("Options"))
self.actionOptions.setToolTip(MainWin.tr("Configuration Options"))
self.actionOptions.setShortcut(MainWin.tr("Ctrl+P"))
# Login
self.actionLogin = QAction(MainWin)
self.actionLogin.setObjectName("actionLogin")
self.actionLogin.setText(MainWin.tr("Login"))
self.actionLogin.setToolTip(MainWin.tr("Login to the server"))
self.actionLogin.setShortcut(MainWin.tr("Ctrl+G"))
# Logout
self.actionLogout = QAction(MainWin)
self.actionLogout.setObjectName("actionLogout")
self.actionLogout.setText(MainWin.tr("Logout"))
self.actionLogout.setToolTip(MainWin.tr("Logout of the server"))
self.actionLogout.setShortcut(MainWin.tr("Ctrl+O"))
# Exit
self.actionExit = QAction(MainWin)
self.actionExit.setObjectName("actionExit")
self.actionExit.setText(MainWin.tr("Exit"))
self.actionExit.setToolTip(MainWin.tr("Exit the application"))
self.actionExit.setShortcut(MainWin.tr("Ctrl+Z"))
# Help
self.actionHelp = QAction(MainWin)
self.actionHelp.setObjectName("actionHelp")
self.actionHelp.setText(MainWin.tr("Help"))
self.actionHelp.setToolTip(MainWin.tr("Help"))
self.actionHelp.setShortcut(MainWin.tr("Ctrl+H"))
# Mark Incomplete
self.markIncompleteAction = QAction('Mark Incomplete', self.workList)
self.markIncompleteAction.triggered.connect(MainWin.markIncomplete)
# Tool Bar
self.mainToolBar = QToolBar(MainWin)
self.mainToolBar.setObjectName("mainToolBar")
self.mainToolBar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
MainWin.addToolBar(Qt.TopToolBarArea, self.mainToolBar)
# Launch
self.mainToolBar.addAction(self.actionLaunch)
self.actionLaunch.setIcon(qApp.style().standardIcon(QStyle.SP_MediaPlay))
# Reload
self.mainToolBar.addAction(self.actionRefresh)
self.actionRefresh.setIcon(qApp.style().standardIcon(QStyle.SP_BrowserReload))
# Options
self.mainToolBar.addAction(self.actionOptions)
self.actionOptions.setIcon(qApp.style().standardIcon(QStyle.SP_FileDialogDetailedView))
# this spacer right justifies everything that comes after it
spacer = QWidget()
spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.mainToolBar.addWidget(spacer)
# Login
self.mainToolBar.addAction(self.actionLogin)
self.actionLogin.setIcon(qApp.style().standardIcon(QStyle.SP_DialogApplyButton))
# Logout
self.mainToolBar.addAction(self.actionLogout)
self.actionLogout.setIcon(qApp.style().standardIcon(QStyle.SP_DialogOkButton))
# Help
#self.mainToolBar.addAction(self.actionHelp)
self.actionHelp.setIcon(qApp.style().standardIcon(QStyle.SP_MessageBoxQuestion))
# Exit
self.mainToolBar.addAction(self.actionExit)
self.actionExit.setIcon(qApp.style().standardIcon(QStyle.SP_DialogCancelButton))
# Status Bar
self.statusBar = QStatusBar(MainWin)
self.statusBar.setToolTip("")
self.statusBar.setStatusTip("")
self.statusBar.setObjectName("statusBar")
MainWin.setStatusBar(self.statusBar)
# finalize UI setup
QMetaObject.connectSlotsByName(MainWin)
|
|
import logging
import os
import subprocess
from typing import Any, Dict, List, Set
import bson
from django.conf import settings
from django.forms.models import model_to_dict
from zerver.data_import.import_util import (
SubscriberHandler,
ZerverFieldsT,
build_huddle,
build_huddle_subscriptions,
build_message,
build_personal_subscriptions,
build_realm,
build_recipients,
build_stream,
build_stream_subscriptions,
build_user_profile,
build_zerver_realm,
create_converted_data_files,
make_subscriber_map,
make_user_messages,
)
from zerver.data_import.sequencer import NEXT_ID, IdMapper
from zerver.data_import.user_handler import UserHandler
from zerver.lib.emoji import name_to_codepoint
from zerver.lib.utils import process_list_in_batches
from zerver.models import Reaction, Recipient, UserProfile
def make_realm(
realm_id: int, realm_subdomain: str, domain_name: str, rc_instance: Dict[str, Any]
) -> ZerverFieldsT:
created_at = float(rc_instance["_createdAt"].timestamp())
zerver_realm = build_zerver_realm(realm_id, realm_subdomain, created_at, "Rocket.Chat")
realm = build_realm(zerver_realm, realm_id, domain_name)
# We may override these later.
realm["zerver_defaultstream"] = []
return realm
def process_users(
user_id_to_user_map: Dict[str, Dict[str, Any]],
realm_id: int,
domain_name: str,
user_handler: UserHandler,
user_id_mapper: IdMapper,
) -> None:
realm_owners: List[int] = []
bots: List[int] = []
for rc_user_id in user_id_to_user_map:
user_dict = user_id_to_user_map[rc_user_id]
is_mirror_dummy = False
is_bot = False
is_active = True
# Rocket.Chat has three user types:
# "user": This is a regular user of the system.
# "bot": A special user types for bots.
# "unknown": This usually represents a livechat guest.
if user_dict["type"] != "user":
is_active = False
if user_dict["type"] == "bot":
is_bot = True
else:
is_mirror_dummy = True
if not user_dict.get("emails"):
user_dict["emails"] = [
{
"address": "{}-{}@{}".format(
user_dict["username"], user_dict["type"], domain_name
)
}
]
# TODO: Change this to use actual exported avatar
avatar_source = "G"
full_name = user_dict["name"]
id = user_id_mapper.get(rc_user_id)
delivery_email = user_dict["emails"][0]["address"]
email = user_dict["emails"][0]["address"]
short_name = user_dict["username"]
date_joined = float(user_dict["createdAt"].timestamp())
timezone = "UTC"
role = UserProfile.ROLE_MEMBER
if "admin" in user_dict["roles"]:
role = UserProfile.ROLE_REALM_OWNER
realm_owners.append(id)
elif "guest" in user_dict["roles"]:
role = UserProfile.ROLE_GUEST
if is_bot:
bots.append(id)
user = build_user_profile(
avatar_source=avatar_source,
date_joined=date_joined,
delivery_email=delivery_email,
email=email,
full_name=full_name,
id=id,
is_active=is_active,
role=role,
is_mirror_dummy=is_mirror_dummy,
realm_id=realm_id,
short_name=short_name,
timezone=timezone,
is_bot=is_bot,
bot_type=1 if is_bot else None,
)
user_handler.add_user(user)
# Set the first realm_owner as the owner of
# all the bots.
if realm_owners:
for bot_id in bots:
bot_user = user_handler.get_user(user_id=bot_id)
bot_user["bot_owner"] = realm_owners[0]
def convert_channel_data(
room_id_to_room_map: Dict[str, Dict[str, Any]],
team_id_to_team_map: Dict[str, Dict[str, Any]],
stream_id_mapper: IdMapper,
realm_id: int,
) -> List[ZerverFieldsT]:
streams = []
for rc_room_id in room_id_to_room_map:
channel_dict = room_id_to_room_map[rc_room_id]
date_created = float(channel_dict["ts"].timestamp())
stream_id = stream_id_mapper.get(rc_room_id)
invite_only = channel_dict["t"] == "p"
stream_name = channel_dict["name"]
stream_desc = channel_dict.get("description", "")
if channel_dict.get("teamId"):
if channel_dict.get("teamMain") is True:
stream_name = "[TEAM] " + stream_name
else:
stream_desc = "[Team {} channel]. {}".format(
team_id_to_team_map[channel_dict["teamId"]]["name"], stream_desc
)
# If the channel is read-only, then only admins and moderators
# should be allowed to post in the converted Zulip stream.
# For more details: https://zulip.com/help/stream-sending-policy
#
# See `Stream` model in `zerver/models.py` to know about what each
# number represent.
stream_post_policy = 4 if channel_dict.get("ro", False) else 1
stream = build_stream(
date_created=date_created,
realm_id=realm_id,
name=stream_name,
description=stream_desc,
stream_id=stream_id,
deactivated=False,
invite_only=invite_only,
stream_post_policy=stream_post_policy,
)
streams.append(stream)
return streams
def convert_stream_subscription_data(
user_id_to_user_map: Dict[str, Dict[str, Any]],
dsc_id_to_dsc_map: Dict[str, Dict[str, Any]],
zerver_stream: List[ZerverFieldsT],
stream_id_mapper: IdMapper,
user_id_mapper: IdMapper,
subscriber_handler: SubscriberHandler,
) -> None:
stream_members_map: Dict[int, Set[int]] = {}
for rc_user_id in user_id_to_user_map:
user_dict = user_id_to_user_map[rc_user_id]
if not user_dict.get("__rooms"):
continue
for channel in user_dict["__rooms"]:
if channel in dsc_id_to_dsc_map:
# Ignore discussion rooms as these are not
# imported as streams, but topics.
continue
stream_id = stream_id_mapper.get(channel)
if stream_id not in stream_members_map:
stream_members_map[stream_id] = set()
stream_members_map[stream_id].add(user_id_mapper.get(rc_user_id))
for stream in zerver_stream:
if stream["id"] in stream_members_map:
users = stream_members_map[stream["id"]]
else:
users = set()
# Set the stream without any subscribers
# as deactivated.
stream["deactivated"] = True
subscriber_handler.set_info(users=users, stream_id=stream["id"])
def convert_huddle_data(
huddle_id_to_huddle_map: Dict[str, Dict[str, Any]],
huddle_id_mapper: IdMapper,
user_id_mapper: IdMapper,
subscriber_handler: SubscriberHandler,
) -> List[ZerverFieldsT]:
zerver_huddle: List[ZerverFieldsT] = []
for rc_huddle_id in huddle_id_to_huddle_map:
huddle_id = huddle_id_mapper.get(rc_huddle_id)
huddle = build_huddle(huddle_id)
zerver_huddle.append(huddle)
huddle_dict = huddle_id_to_huddle_map[rc_huddle_id]
huddle_user_ids = set()
for rc_user_id in huddle_dict["uids"]:
huddle_user_ids.add(user_id_mapper.get(rc_user_id))
subscriber_handler.set_info(
users=huddle_user_ids,
huddle_id=huddle_id,
)
return zerver_huddle
def build_reactions(
total_reactions: List[ZerverFieldsT],
reactions: List[Dict[str, Any]],
message_id: int,
) -> None:
# For the Unicode emoji codes, we use equivalent of
# function 'emoji_name_to_emoji_code' in 'zerver/lib/emoji' here
for reaction in reactions:
emoji_name = reaction["name"]
user_id = reaction["user_id"]
# Check in Unicode emoji
if emoji_name in name_to_codepoint:
emoji_code = name_to_codepoint[emoji_name]
reaction_type = Reaction.UNICODE_EMOJI
else: # nocoverage
continue
reaction_id = NEXT_ID("reaction")
reaction = Reaction(
id=reaction_id,
emoji_code=emoji_code,
emoji_name=emoji_name,
reaction_type=reaction_type,
)
reaction_dict = model_to_dict(reaction, exclude=["message", "user_profile"])
reaction_dict["message"] = message_id
reaction_dict["user_profile"] = user_id
total_reactions.append(reaction_dict)
def process_raw_message_batch(
realm_id: int,
raw_messages: List[Dict[str, Any]],
subscriber_map: Dict[int, Set[int]],
user_handler: UserHandler,
is_pm_data: bool,
output_dir: str,
total_reactions: List[ZerverFieldsT],
) -> None:
def fix_mentions(content: str, mention_user_ids: Set[int]) -> str:
for user_id in mention_user_ids:
user = user_handler.get_user(user_id=user_id)
rc_mention = "@{short_name}".format(**user)
zulip_mention = "@**{full_name}**".format(**user)
content = content.replace(rc_mention, zulip_mention)
content = content.replace("@all", "@**all**")
# We don't have an equivalent for Rocket.Chat's @here mention
# which mentions all users active in the channel.
content = content.replace("@here", "@**all**")
return content
mention_map: Dict[int, Set[int]] = {}
zerver_message: List[ZerverFieldsT] = []
for raw_message in raw_messages:
message_id = NEXT_ID("message")
mention_user_ids = raw_message["mention_user_ids"]
mention_map[message_id] = mention_user_ids
content = fix_mentions(
content=raw_message["content"],
mention_user_ids=mention_user_ids,
)
if len(content) > 10000: # nocoverage
logging.info("skipping too-long message of length %s", len(content))
continue
date_sent = raw_message["date_sent"]
sender_user_id = raw_message["sender_id"]
recipient_id = raw_message["recipient_id"]
rendered_content = None
topic_name = raw_message["topic_name"]
message = build_message(
content=content,
message_id=message_id,
date_sent=date_sent,
recipient_id=recipient_id,
rendered_content=rendered_content,
topic_name=topic_name,
user_id=sender_user_id,
has_attachment=False,
)
zerver_message.append(message)
build_reactions(
total_reactions=total_reactions,
reactions=raw_message["reactions"],
message_id=message_id,
)
zerver_usermessage = make_user_messages(
zerver_message=zerver_message,
subscriber_map=subscriber_map,
is_pm_data=is_pm_data,
mention_map=mention_map,
)
message_json = dict(
zerver_message=zerver_message,
zerver_usermessage=zerver_usermessage,
)
dump_file_id = NEXT_ID("dump_file_id" + str(realm_id))
message_file = f"/messages-{dump_file_id:06}.json"
create_converted_data_files(message_json, output_dir, message_file)
def process_messages(
realm_id: int,
messages: List[Dict[str, Any]],
subscriber_map: Dict[int, Set[int]],
is_pm_data: bool,
username_to_user_id_map: Dict[str, str],
user_id_mapper: IdMapper,
user_handler: UserHandler,
user_id_to_recipient_id: Dict[int, int],
stream_id_mapper: IdMapper,
stream_id_to_recipient_id: Dict[int, int],
huddle_id_mapper: IdMapper,
huddle_id_to_recipient_id: Dict[int, int],
dsc_id_to_dsc_map: Dict[str, Dict[str, Any]],
direct_id_to_direct_map: Dict[str, Dict[str, Any]],
huddle_id_to_huddle_map: Dict[str, Dict[str, Any]],
total_reactions: List[ZerverFieldsT],
output_dir: str,
) -> None:
def list_reactions(reactions: Dict[str, Dict[str, Any]]) -> List[Dict[str, Any]]:
# List of dictionaries of form:
# {"name": "smile", "user_id": 2}
reactions_list: List[Dict[str, Any]] = []
for react_code in reactions:
name = react_code.split(":")[1]
usernames = reactions[react_code]["usernames"]
for username in usernames:
rc_user_id = username_to_user_id_map[username]
user_id = user_id_mapper.get(rc_user_id)
reactions_list.append({"name": name, "user_id": user_id})
return reactions_list
def message_to_dict(message: Dict[str, Any]) -> Dict[str, Any]:
rc_sender_id = message["u"]["_id"]
sender_id = user_id_mapper.get(rc_sender_id)
content = message["msg"]
if message.get("reactions"):
reactions = list_reactions(message["reactions"])
else:
reactions = []
message_dict = dict(
sender_id=sender_id,
content=content,
date_sent=int(message["ts"].timestamp()),
reactions=reactions,
)
# Add recipient_id and topic to message_dict
if is_pm_data:
# Message is in a PM or a huddle.
rc_channel_id = message["rid"]
if rc_channel_id in huddle_id_to_huddle_map:
huddle_id = huddle_id_mapper.get(rc_channel_id)
message_dict["recipient_id"] = huddle_id_to_recipient_id[huddle_id]
else:
rc_member_ids = direct_id_to_direct_map[rc_channel_id]["uids"]
if rc_sender_id == rc_member_ids[0]:
zulip_member_id = user_id_mapper.get(rc_member_ids[1])
message_dict["recipient_id"] = user_id_to_recipient_id[zulip_member_id]
else:
zulip_member_id = user_id_mapper.get(rc_member_ids[0])
message_dict["recipient_id"] = user_id_to_recipient_id[zulip_member_id]
# PMs and huddles don't have topics, but topic_name field is required in `build_message`.
message_dict["topic_name"] = ""
elif message["rid"] in dsc_id_to_dsc_map:
# Message is in a discussion
dsc_channel = dsc_id_to_dsc_map[message["rid"]]
parent_channel_id = dsc_channel["prid"]
stream_id = stream_id_mapper.get(parent_channel_id)
message_dict["recipient_id"] = stream_id_to_recipient_id[stream_id]
message_dict["topic_name"] = f'{dsc_channel["fname"]} (Imported from Rocket.Chat)'
else:
stream_id = stream_id_mapper.get(message["rid"])
message_dict["recipient_id"] = stream_id_to_recipient_id[stream_id]
message_dict["topic_name"] = "Imported from Rocket.Chat"
# Add mentions to message_dict
mention_user_ids = set()
for mention in message.get("mentions", []):
mention_id = mention["_id"]
if mention_id in ["all", "here"]:
continue
user_id = user_id_mapper.get(mention_id)
mention_user_ids.add(user_id)
message_dict["mention_user_ids"] = mention_user_ids
return message_dict
raw_messages: List[Dict[str, Any]] = []
for message in messages:
if message.get("t") is not None:
# Messages with a type are system notifications like user_joined
# that we don't include.
continue
raw_messages.append(message_to_dict(message))
def process_batch(lst: List[Dict[str, Any]]) -> None:
process_raw_message_batch(
realm_id=realm_id,
raw_messages=lst,
subscriber_map=subscriber_map,
user_handler=user_handler,
is_pm_data=is_pm_data,
output_dir=output_dir,
total_reactions=total_reactions,
)
chunk_size = 1000
process_list_in_batches(
lst=raw_messages,
chunk_size=chunk_size,
process_batch=process_batch,
)
def separate_channel_and_private_messages(
messages: List[Dict[str, Any]],
direct_id_to_direct_map: Dict[str, Dict[str, Any]],
huddle_id_to_huddle_map: Dict[str, Dict[str, Any]],
channel_messages: List[Dict[str, Any]],
private_messages: List[Dict[str, Any]],
) -> None:
private_channels_list = list(direct_id_to_direct_map.keys()) + list(
huddle_id_to_huddle_map.keys()
)
for message in messages:
if not message.get("rid"):
# Message does not belong to any channel (might be
# related to livechat), so ignore all such messages.
continue
if message["rid"] in private_channels_list:
private_messages.append(message)
else:
channel_messages.append(message)
def map_receiver_id_to_recipient_id(
zerver_recipient: List[ZerverFieldsT],
stream_id_to_recipient_id: Dict[int, int],
huddle_id_to_recipient_id: Dict[int, int],
user_id_to_recipient_id: Dict[int, int],
) -> None:
# receiver_id represents stream_id/huddle_id/user_id
for recipient in zerver_recipient:
if recipient["type"] == Recipient.STREAM:
stream_id_to_recipient_id[recipient["type_id"]] = recipient["id"]
elif recipient["type"] == Recipient.HUDDLE:
huddle_id_to_recipient_id[recipient["type_id"]] = recipient["id"]
elif recipient["type"] == Recipient.PERSONAL:
user_id_to_recipient_id[recipient["type_id"]] = recipient["id"]
def categorize_channels_and_map_with_id(
channel_data: List[Dict[str, Any]],
room_id_to_room_map: Dict[str, Dict[str, Any]],
team_id_to_team_map: Dict[str, Dict[str, Any]],
dsc_id_to_dsc_map: Dict[str, Dict[str, Any]],
direct_id_to_direct_map: Dict[str, Dict[str, Any]],
huddle_id_to_huddle_map: Dict[str, Dict[str, Any]],
) -> None:
for channel in channel_data:
if channel.get("prid"):
dsc_id_to_dsc_map[channel["_id"]] = channel
elif channel["t"] == "d":
if len(channel["uids"]) > 2:
huddle_id_to_huddle_map[channel["_id"]] = channel
else:
direct_id_to_direct_map[channel["_id"]] = channel
else:
room_id_to_room_map[channel["_id"]] = channel
if channel.get("teamMain") is True:
team_id_to_team_map[channel["teamId"]] = channel
def map_username_to_user_id(user_id_to_user_map: Dict[str, Dict[str, Any]]) -> Dict[str, str]:
username_to_user_id_map: Dict[str, str] = {}
for user_id, user_dict in user_id_to_user_map.items():
username_to_user_id_map[user_dict["username"]] = user_id
return username_to_user_id_map
def map_user_id_to_user(user_data_list: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
user_id_to_user_map = {}
for user in user_data_list:
user_id_to_user_map[user["_id"]] = user
return user_id_to_user_map
def rocketchat_data_to_dict(rocketchat_data_dir: str) -> Dict[str, Any]:
rocketchat_data: Dict[str, Any] = {}
rocketchat_data["instance"] = []
rocketchat_data["user"] = []
rocketchat_data["avatar"] = {"avatar": [], "file": [], "chunk": []}
rocketchat_data["room"] = []
rocketchat_data["message"] = []
# Get instance
with open(os.path.join(rocketchat_data_dir, "instances.bson"), "rb") as fcache:
rocketchat_data["instance"] = bson.decode_all(fcache.read())
# Get user
with open(os.path.join(rocketchat_data_dir, "users.bson"), "rb") as fcache:
rocketchat_data["user"] = bson.decode_all(fcache.read())
# Get avatar
with open(os.path.join(rocketchat_data_dir, "rocketchat_avatars.bson"), "rb") as fcache:
rocketchat_data["avatar"]["avatar"] = bson.decode_all(fcache.read())
with open(os.path.join(rocketchat_data_dir, "rocketchat_avatars.chunks.bson"), "rb") as fcache:
rocketchat_data["avatar"]["chunk"] = bson.decode_all(fcache.read())
with open(os.path.join(rocketchat_data_dir, "rocketchat_avatars.files.bson"), "rb") as fcache:
rocketchat_data["avatar"]["file"] = bson.decode_all(fcache.read())
# Get room
with open(os.path.join(rocketchat_data_dir, "rocketchat_room.bson"), "rb") as fcache:
rocketchat_data["room"] = bson.decode_all(fcache.read())
# Get messages
with open(os.path.join(rocketchat_data_dir, "rocketchat_message.bson"), "rb") as fcache:
rocketchat_data["message"] = bson.decode_all(fcache.read())
return rocketchat_data
def do_convert_data(rocketchat_data_dir: str, output_dir: str) -> None:
# Get all required exported data in a dictionary
rocketchat_data = rocketchat_data_to_dict(rocketchat_data_dir)
# Subdomain is set by the user while running the import command
realm_subdomain = ""
realm_id = 0
domain_name = settings.EXTERNAL_HOST
realm = make_realm(realm_id, realm_subdomain, domain_name, rocketchat_data["instance"][0])
user_id_to_user_map: Dict[str, Dict[str, Any]] = map_user_id_to_user(rocketchat_data["user"])
username_to_user_id_map: Dict[str, str] = map_username_to_user_id(user_id_to_user_map)
user_handler = UserHandler()
subscriber_handler = SubscriberHandler()
user_id_mapper = IdMapper()
stream_id_mapper = IdMapper()
huddle_id_mapper = IdMapper()
process_users(
user_id_to_user_map=user_id_to_user_map,
realm_id=realm_id,
domain_name=domain_name,
user_handler=user_handler,
user_id_mapper=user_id_mapper,
)
room_id_to_room_map: Dict[str, Dict[str, Any]] = {}
team_id_to_team_map: Dict[str, Dict[str, Any]] = {}
dsc_id_to_dsc_map: Dict[str, Dict[str, Any]] = {}
direct_id_to_direct_map: Dict[str, Dict[str, Any]] = {}
huddle_id_to_huddle_map: Dict[str, Dict[str, Any]] = {}
categorize_channels_and_map_with_id(
channel_data=rocketchat_data["room"],
room_id_to_room_map=room_id_to_room_map,
team_id_to_team_map=team_id_to_team_map,
dsc_id_to_dsc_map=dsc_id_to_dsc_map,
direct_id_to_direct_map=direct_id_to_direct_map,
huddle_id_to_huddle_map=huddle_id_to_huddle_map,
)
zerver_stream = convert_channel_data(
room_id_to_room_map=room_id_to_room_map,
team_id_to_team_map=team_id_to_team_map,
stream_id_mapper=stream_id_mapper,
realm_id=realm_id,
)
realm["zerver_stream"] = zerver_stream
# Add stream subscription data to `subscriber_handler`
convert_stream_subscription_data(
user_id_to_user_map=user_id_to_user_map,
dsc_id_to_dsc_map=dsc_id_to_dsc_map,
zerver_stream=zerver_stream,
stream_id_mapper=stream_id_mapper,
user_id_mapper=user_id_mapper,
subscriber_handler=subscriber_handler,
)
zerver_huddle = convert_huddle_data(
huddle_id_to_huddle_map=huddle_id_to_huddle_map,
huddle_id_mapper=huddle_id_mapper,
user_id_mapper=user_id_mapper,
subscriber_handler=subscriber_handler,
)
realm["zerver_huddle"] = zerver_huddle
all_users = user_handler.get_all_users()
zerver_recipient = build_recipients(
zerver_userprofile=all_users,
zerver_stream=zerver_stream,
zerver_huddle=zerver_huddle,
)
realm["zerver_recipient"] = zerver_recipient
stream_subscriptions = build_stream_subscriptions(
get_users=subscriber_handler.get_users,
zerver_recipient=zerver_recipient,
zerver_stream=zerver_stream,
)
huddle_subscriptions = build_huddle_subscriptions(
get_users=subscriber_handler.get_users,
zerver_recipient=zerver_recipient,
zerver_huddle=zerver_huddle,
)
personal_subscriptions = build_personal_subscriptions(
zerver_recipient=zerver_recipient,
)
zerver_subscription = personal_subscriptions + stream_subscriptions + huddle_subscriptions
realm["zerver_subscription"] = zerver_subscription
subscriber_map = make_subscriber_map(
zerver_subscription=zerver_subscription,
)
stream_id_to_recipient_id: Dict[int, int] = {}
huddle_id_to_recipient_id: Dict[int, int] = {}
user_id_to_recipient_id: Dict[int, int] = {}
map_receiver_id_to_recipient_id(
zerver_recipient=zerver_recipient,
stream_id_to_recipient_id=stream_id_to_recipient_id,
huddle_id_to_recipient_id=huddle_id_to_recipient_id,
user_id_to_recipient_id=user_id_to_recipient_id,
)
channel_messages: List[Dict[str, Any]] = []
private_messages: List[Dict[str, Any]] = []
separate_channel_and_private_messages(
messages=rocketchat_data["message"],
direct_id_to_direct_map=direct_id_to_direct_map,
huddle_id_to_huddle_map=huddle_id_to_huddle_map,
channel_messages=channel_messages,
private_messages=private_messages,
)
total_reactions: List[ZerverFieldsT] = []
# Process channel messages
process_messages(
realm_id=realm_id,
messages=channel_messages,
subscriber_map=subscriber_map,
is_pm_data=False,
username_to_user_id_map=username_to_user_id_map,
user_id_mapper=user_id_mapper,
user_handler=user_handler,
user_id_to_recipient_id=user_id_to_recipient_id,
stream_id_mapper=stream_id_mapper,
stream_id_to_recipient_id=stream_id_to_recipient_id,
huddle_id_mapper=huddle_id_mapper,
huddle_id_to_recipient_id=huddle_id_to_recipient_id,
dsc_id_to_dsc_map=dsc_id_to_dsc_map,
direct_id_to_direct_map=direct_id_to_direct_map,
huddle_id_to_huddle_map=huddle_id_to_huddle_map,
total_reactions=total_reactions,
output_dir=output_dir,
)
# Process private messages
process_messages(
realm_id=realm_id,
messages=private_messages,
subscriber_map=subscriber_map,
is_pm_data=True,
username_to_user_id_map=username_to_user_id_map,
user_id_mapper=user_id_mapper,
user_handler=user_handler,
user_id_to_recipient_id=user_id_to_recipient_id,
stream_id_mapper=stream_id_mapper,
stream_id_to_recipient_id=stream_id_to_recipient_id,
huddle_id_mapper=huddle_id_mapper,
huddle_id_to_recipient_id=huddle_id_to_recipient_id,
dsc_id_to_dsc_map=dsc_id_to_dsc_map,
direct_id_to_direct_map=direct_id_to_direct_map,
huddle_id_to_huddle_map=huddle_id_to_huddle_map,
total_reactions=total_reactions,
output_dir=output_dir,
)
realm["zerver_reaction"] = total_reactions
realm["zerver_userprofile"] = user_handler.get_all_users()
realm["sort_by_date"] = True
create_converted_data_files(realm, output_dir, "/realm.json")
# TODO: Add support for importing avatars
create_converted_data_files([], output_dir, "/avatars/records.json")
# TODO: Add support for importing uploads
create_converted_data_files([], output_dir, "/uploads/records.json")
# TODO: Add support for importing attachments
attachment: Dict[str, List[Any]] = {"zerver_attachment": []}
create_converted_data_files(attachment, output_dir, "/attachment.json")
logging.info("Start making tarball")
subprocess.check_call(["tar", "-czf", output_dir + ".tar.gz", output_dir, "-P"])
logging.info("Done making tarball")
|
|
# -*- coding: utf-8 -*-
import pytest
import sys
import cPickle as pickle
from test_base_class import TestBaseClass
aerospike = pytest.importorskip("aerospike")
try:
from aerospike.exception import *
except:
print "Please install aerospike python client."
sys.exit(1)
class TestMapKeysIndex(object):
def setup_class(cls):
"""
Setup method.
"""
hostlist, user, password = TestBaseClass.get_hosts()
config = {'hosts': hostlist}
if user == None and password == None:
TestMapKeysIndex.client = aerospike.client(config).connect()
else:
TestMapKeysIndex.client = aerospike.client(config).connect(
user, password)
def teardown_class(cls):
TestMapKeysIndex.client.close()
def setup_method(self, method):
"""
Setup method.
"""
for i in xrange(5):
key = ('test', u'demo', i)
rec = {
'name': 'name%s' % (str(i)),
'addr': 'name%s' % (str(i)),
'numeric_map': {"a": 1,
"b": 2,
"c": 3},
'string_map': {"sa": "a",
"sb": "b",
"sc": "c"},
'age': i,
'no': i
}
TestMapKeysIndex.client.put(key, rec)
def teardown_method(self, method):
"""
Teardoen method.
"""
for i in xrange(5):
key = ('test', u'demo', i)
TestMapKeysIndex.client.remove(key)
def test_mapkeysindex_with_no_paramters(self):
"""
Invoke index_mapkeys_create() without any mandatory parameters.
"""
with pytest.raises(TypeError) as typeError:
TestMapKeysIndex.client.index_map_keys_create()
assert "Required argument 'ns' (pos 1) not found" in typeError.value
def test_mapkeysindex_with_correct_parameters(self):
"""
Invoke index_mapkeys_create() with correct arguments
"""
policy = {}
retobj = TestMapKeysIndex.client.index_map_keys_create(
'test', 'demo', 'string_map', aerospike.INDEX_STRING,
'test_string_map_index', policy)
assert retobj == 0L
TestMapKeysIndex.client.index_remove('test', 'test_string_map_index',
policy)
def test_mapkeysindex_with_correct_parameters_numeric(self):
"""
Invoke index_mapkeys_create() with correct arguments
"""
policy = {}
retobj = TestMapKeysIndex.client.index_map_keys_create(
'test', 'demo', 'numeric_map', aerospike.INDEX_NUMERIC,
'test_numeric_map_index', policy)
assert retobj == 0L
TestMapKeysIndex.client.index_remove('test', 'test_numeric_map_index',
policy)
def test_mapkeys_with_correct_parameters_set_length_extra(self):
#Invoke index_map_keys_create() with correct arguments and set length extra
set_name = 'a'
for i in xrange(100):
set_name = set_name + 'a'
policy = {}
with pytest.raises(Exception) as exception:
retobj = TestMapKeysIndex.client.index_map_keys_create('test', set_name,
'string_map', aerospike.INDEX_STRING, "test_string_map_index", policy)
assert exception.value[0] == 4
assert exception.value[1] == 'Invalid Set Name'
def test_mapkeysindex_with_incorrect_namespace(self):
"""
Invoke createindex() with incorrect namespace
"""
policy = {}
try:
retobj = TestMapKeysIndex.client.index_map_keys_create( 'test1', 'demo',
'numeric_map', aerospike.INDEX_NUMERIC, 'test_numeric_map_index', policy )
except InvalidRequest as exception:
assert exception.code == 4
assert exception.msg == 'Namespace Not Found'
def test_mapkeysindex_with_incorrect_set(self):
"""
Invoke createindex() with incorrect set
"""
policy = {}
retobj = TestMapKeysIndex.client.index_map_keys_create(
'test', 'demo1', 'numeric_map', aerospike.INDEX_NUMERIC,
'test_numeric_map_index', policy)
assert retobj == 0L
TestMapKeysIndex.client.index_remove('test', 'test_numeric_map_index',
policy)
def test_mapkeysindex_with_incorrect_bin(self):
"""
Invoke createindex() with incorrect bin
"""
policy = {}
retobj = TestMapKeysIndex.client.index_map_keys_create(
'test', 'demo', 'string_map1', aerospike.INDEX_STRING,
'test_string_map_index', policy)
assert retobj == 0L
TestMapKeysIndex.client.index_remove('test', 'test_string_map_index',
policy)
def test_mapkeysindex_with_namespace_is_none(self):
"""
Invoke createindex() with namespace is None
"""
policy = {}
try:
retobj = TestMapKeysIndex.client.index_map_keys_create( None, 'demo',
'string_map', aerospike.INDEX_STRING, 'test_string_map_index', policy )
except ParamError as exception:
assert exception.code == -2
assert exception.msg == 'Namespace should be a string'
def test_mapkeysindex_with_set_is_int(self):
"""
Invoke createindex() with set is int
"""
policy = {}
with pytest.raises(Exception) as exception:
retobj = TestMapKeysIndex.client.index_map_keys_create(
'test', 1, 'string_map', aerospike.INDEX_STRING,
'test_string_map_index', policy)
assert exception.value[0] == -2
assert exception.value[1] == 'Set should be string, unicode or None'
def test_mapkeysindex_with_set_is_none(self):
"""
Invoke createindex() with set is None
"""
policy = {}
try:
retobj = TestMapKeysIndex.client.index_map_keys_create( 'test', None,
'string_map', aerospike.INDEX_STRING, 'test_string_map_index' , policy)
except ParamError as exception:
assert exception.code == -2
assert exception.msg == 'Set should be a string'
def test_mapkeysindex_with_bin_is_none(self):
"""
Invoke createindex() with bin is None
"""
policy = {}
try:
retobj = TestMapKeysIndex.client.index_map_keys_create( 'test', 'demo',
None, aerospike.INDEX_NUMERIC, 'test_numeric_map_index' , policy)
except ParamError as exception:
assert exception.code == -2
assert exception.msg == 'Bin should be a string'
def test_mapkeysindex_with_index_is_none(self):
"""
Invoke createindex() with index is None
"""
policy = {}
try:
retobj = TestMapKeysIndex.client.index_map_keys_create( 'test', 'demo',
'string_map', aerospike.INDEX_STRING, None, policy )
except ParamError as exception:
assert exception.code == -2
assert exception.msg == 'Index name should be string or unicode'
def test_create_same_mapindex_multiple_times(self):
"""
Invoke createindex() with multiple times on same bin
"""
policy = {}
retobj = TestMapKeysIndex.client.index_map_keys_create(
'test', 'demo', 'numeric_map', aerospike.INDEX_NUMERIC,
'test_numeric_map_index', policy)
if retobj == 0L:
retobj = TestMapKeysIndex.client.index_map_keys_create(
'test', 'demo', 'numeric_map', aerospike.INDEX_NUMERIC,
'test_numeric_map_index', policy)
TestMapKeysIndex.client.index_remove(
'test', 'test_numeric_map_index', policy)
assert retobj == 0L
else:
assert True == False
def test_create_same_mapindex_multiple_times_different_bin(self):
"""
Invoke createindex() with multiple times on different bin
"""
policy = {}
retobj = TestMapKeysIndex.client.index_map_keys_create(
'test', 'demo', 'string_map', aerospike.INDEX_STRING,
'test_string_map_index', policy)
if retobj == 0L:
retobj = TestMapKeysIndex.client.index_map_keys_create(
'test', 'demo', 'numeric_map', aerospike.INDEX_NUMERIC,
'test_string_map_index', policy)
assert retobj == 0L
TestMapKeysIndex.client.index_remove(
'test', 'test_string_map_index', policy)
else:
assert True == False
def test_create_different_mapindex_multiple_times_same_bin(self):
"""
Invoke createindex() with multiple times on same bin with different
name
"""
policy = {}
retobj = TestMapKeysIndex.client.index_map_keys_create(
'test', 'demo', 'string_map', aerospike.INDEX_STRING,
'test_string_map_index', policy)
if retobj == 0L:
retobj = TestMapKeysIndex.client.index_map_keys_create(
'test', 'demo', 'string_map', aerospike.INDEX_STRING,
'test_string_map_index1', policy)
assert retobj == 0L
TestMapKeysIndex.client.index_remove(
'test', 'test_string_map_index', policy)
TestMapKeysIndex.client.index_remove(
'test', 'test_string_map_index1', policy)
else:
assert True == False
def test_createmapindex_with_policy(self):
"""
Invoke createindex() with policy
"""
policy = {'timeout': 1000}
retobj = TestMapKeysIndex.client.index_map_keys_create(
'test', 'demo', 'numeric_map', aerospike.INDEX_NUMERIC,
'test_numeric_map_index', policy)
assert retobj == 0L
TestMapKeysIndex.client.index_remove('test', 'test_numeric_map_index',
policy)
def test_createmapindex_with_policystring(self):
"""
Invoke createindex() with policy
"""
policy = {'timeout': 1000}
retobj = TestMapKeysIndex.client.index_map_keys_create(
'test', 'demo', 'string_map', aerospike.INDEX_STRING,
'test_string_map_index', policy)
assert retobj == 0L
TestMapKeysIndex.client.index_remove('test', 'test_string_map_index',
policy)
"""
This test case causes a db crash and hence has been commented. Work pending
on the C-client side
def test_createindex_with_long_index_name(self):
Invoke createindex() with long index name
policy = {}
retobj = TestMapKeysIndex.client.index_map_keys_create( 'test', 'demo',
'age',
'bin2_integer_indexsdadadfasdfasdfeartfqrgahfasdfheudsdfasdfawf312342q3453rf9qwfasdcfasdcalskdcbacfq34915rwcfasdcascnabscbaskjdbcalsjkbcdasc', policy)
assert retobj == 0L
TestMapKeysIndex.client.index_remove(policy, 'test',
'bin2_integer_indexsdadadfasdfasdfeartfqrgahfasdfheudsdfasdfawf312342q3453rf9qwfasdcfasdcalskdcbacfq34915rwcfasdcascnabscbaskjdbcalsjkbcdasc');
"""
def test_create_mapstringindex_unicode_positive(self):
"""
Invoke create string index() with correct arguments
"""
policy = {}
retobj = TestMapKeysIndex.client.index_map_keys_create(
'test', u'demo', u'string_map', aerospike.INDEX_STRING,
u'uni_name_index', policy)
assert retobj == 0L
TestMapKeysIndex.client.index_remove('test', u'uni_name_index', policy)
def test_create_map_integer_index_unicode(self):
"""
Invoke createindex() with correct arguments
"""
policy = {}
retobj = TestMapKeysIndex.client.index_map_keys_create(
'test', u'demo', u'numeric_map', aerospike.INDEX_NUMERIC,
u'uni_age_index', policy)
assert retobj == 0L
TestMapKeysIndex.client.index_remove('test', u'uni_age_index', policy)
def test_mapkeysindex_with_correct_parameters_no_connection(self):
"""
Invoke index_map_keys_create() with correct arguments no connection
"""
policy = {}
config = {'hosts': [('127.0.0.1', 3000)]}
client1 = aerospike.client(config)
try:
retobj = client1.index_map_keys_create('test', 'demo', 'string_map', aerospike.INDEX_STRING, 'test_string_map_index', policy)
except ClusterError as exception:
assert exception.code == 11
assert exception.msg == 'No connection to aerospike cluster'
|
|
# -*- coding: utf-8 -*-
import functools
import re
import time
import warnings
from base64 import b64encode, b64decode
from http.client import HTTPResponse
from io import BytesIO
from typing import Union, Dict, Tuple, Optional
import requests
import urllib3
from requests import Timeout, Response, ConnectionError
from requests.adapters import HTTPAdapter
# SSO not supported for Linux
from TM1py.Exceptions.Exceptions import TM1pyTimeout
from TM1py.Utils import case_and_space_insensitive_equals, CaseAndSpaceInsensitiveSet
try:
from requests_negotiate_sspi import HttpNegotiateAuth
except ImportError:
warnings.warn("requests_negotiate_sspi failed to import. SSO will not work", ImportWarning)
from TM1py.Exceptions import TM1pyRestException
import http.client as http_client
def httpmethod(func):
""" Higher Order Function to wrap the GET, POST, PATCH, PUT, DELETE methods
Takes care of:
- encoding of url and payload
- verifying response. Throws TM1pyException if StatusCode of Response is not OK
"""
@functools.wraps(func)
def wrapper(self, url: str, data: str = '', encoding='utf-8', async_requests_mode: Optional[bool] = None, **kwargs):
# url encoding
url, data = self._url_and_body(
url=url,
data=data,
encoding=encoding)
# execute request
try:
# determine async_requests_mode
if async_requests_mode is None:
async_requests_mode = self._async_requests_mode
if not async_requests_mode:
response = func(self, url, data, **kwargs)
else:
additional_header = {'Prefer': 'respond-async'}
http_headers = kwargs.get('headers', dict())
http_headers.update(additional_header)
kwargs['headers'] = http_headers
response = func(self, url, data, **kwargs)
self.verify_response(response=response)
if 'Location' not in response.headers or "'" not in response.headers['Location']:
raise ValueError(f"Failed to retrieve async_id from request {func.__name__} '{url}'")
async_id = response.headers.get('Location').split("'")[1]
for wait in RestService.wait_time_generator(kwargs.get('timeout', self._timeout)):
response = self.retrieve_async_response(async_id)
if response.status_code in [200, 201]:
break
time.sleep(wait)
# all wait times consumed and still no 200
if response.status_code not in [200, 201]:
if kwargs.get("cancel_at_timeout", False):
self.cancel_async_operation(async_id)
raise TM1pyTimeout(method=func.__name__, url=url, timeout=kwargs['timeout'])
response = self.build_response_from_raw_bytes(response.content)
# verify
self.verify_response(response=response)
# response encoding
response.encoding = encoding
return response
except Timeout:
if kwargs.get("cancel_at_timeout", False):
self.cancel_running_operation()
raise TM1pyTimeout(method=func.__name__, url=url, timeout=kwargs['timeout'])
except ConnectionError as e:
# cater for issue in requests library: https://github.com/psf/requests/issues/5430
if re.search('Read timed out', str(e), re.IGNORECASE):
if kwargs.get("cancel_at_timeout", False):
self.cancel_running_operation()
raise TM1pyTimeout(method=func.__name__, url=url, timeout=kwargs['timeout'])
return wrapper
class RestService:
""" Low level communication with TM1 instance through HTTP.
Allows to execute HTTP Methods
- GET
- POST
- PATCH
- DELETE
Takes Care of
- Encodings
- TM1 User-Login
- HTTP Headers
- HTTP Session Management
- Response Handling
Based on requests module
"""
HEADERS = {'Connection': 'keep-alive',
'User-Agent': 'TM1py',
'Content-Type': 'application/json; odata.streaming=true; charset=utf-8',
'Accept': 'application/json;odata.metadata=none,text/plain',
'TM1-SessionContext': 'TM1py'}
def __init__(self, **kwargs):
""" Create an instance of RESTService
:param address: String - address of the TM1 instance
:param port: Int - HTTPPortNumber as specified in the tm1s.cfg
:param base_url - base url e.g. https://localhost:12354/api/v1
:param user: String - name of the user
:param password String - password of the user
:param decode_b64 - whether password argument is b64 encoded
:param namespace String - optional CAM namespace
:param ssl: boolean - as specified in the tm1s.cfg
:param session_id: String - TM1SessionId e.g. q7O6e1w49AixeuLVxJ1GZg
:param session_context: String - Name of the Application. Controls "Context" column in Arc / TM1top.
If None, use default: TM1py
:param verify: path to .cer file or 'True' / True / 'False' / False (if no ssl verification is required)
:param logging: boolean - switch on/off verbose http logging into sys.stdout
:param timeout: Float - Number of seconds that the client will wait to receive the first byte.
:param async_requests_mode: changes internal REST execution mode to avoid 60s timeout on IBM cloud
:param connection_pool_size - In a multithreaded environment, you should set this value to a
higher number, such as the number of threads
:param integrated_login: True for IntegratedSecurityMode3
:param integrated_login_domain: NT Domain name.
Default: '.' for local account.
:param integrated_login_service: Kerberos Service type for remote Service Principal Name.
Default: 'HTTP'
:param integrated_login_host: Host name for Service Principal Name.
Default: Extracted from request URI
:param integrated_login_delegate: Indicates that the user's credentials are to be delegated to the server.
Default: False
:param impersonate: Name of user to impersonate
"""
self._kwargs = kwargs
self._ssl = self.translate_to_boolean(kwargs['ssl'])
self._address = kwargs.get('address', None)
self._port = kwargs.get('port', None)
self._verify = False
self._timeout = None if kwargs.get('timeout', None) is None else float(kwargs.get('timeout'))
self._async_requests_mode = self.translate_to_boolean(kwargs.get('async_requests_mode', False))
# populated on the fly
if kwargs.get('user'):
self._is_admin = True if case_and_space_insensitive_equals(kwargs.get('user'), 'ADMIN') else None
else:
self._is_admin = None
if 'verify' in kwargs:
if isinstance(kwargs['verify'], str):
if kwargs['verify'].upper() == 'FALSE':
self._verify = False
elif kwargs['verify'].upper() == 'TRUE':
self._verify = True
# path to .cer file
else:
self._verify = kwargs.get('verify')
elif isinstance(kwargs['verify'], bool):
self._verify = kwargs['verify']
else:
raise ValueError("verify argument must be of type str or bool")
if 'base_url' in kwargs:
self._base_url = kwargs['base_url']
else:
self._base_url = "http{}://{}:{}".format(
's' if self._ssl else '',
'localhost' if len(self._address) == 0 else self._address,
self._port)
self._version = None
self._headers = self.HEADERS.copy()
if "session_context" in kwargs:
self._headers["TM1-SessionContext"] = kwargs["session_context"]
self.disable_http_warnings()
# re-use or create tm1 http session
self._s = requests.session()
if "session_id" in kwargs:
self._s.cookies.set("TM1SessionId", kwargs["session_id"])
else:
self._start_session(
user=kwargs.get("user", None),
password=kwargs.get("password", None),
namespace=kwargs.get("namespace", None),
gateway=kwargs.get("gateway", None),
decode_b64=self.translate_to_boolean(kwargs.get("decode_b64", False)),
integrated_login=self.translate_to_boolean(kwargs.get("integrated_login", False)),
integrated_login_domain=kwargs.get("integrated_login_domain"),
integrated_login_service=kwargs.get("integrated_login_service"),
integrated_login_host=kwargs.get("integrated_login_host"),
integrated_login_delegate=kwargs.get("integrated_login_delegate"),
impersonate=kwargs.get("impersonate", None))
if not self._version:
self.set_version()
self._sandboxing_disabled = None
# manage connection pool
if "connection_pool_size" in kwargs:
self._manage_http_connection_pool(kwargs.get("connection_pool_size"))
# Logging
if 'logging' in kwargs:
if self.translate_to_boolean(value=kwargs['logging']):
http_client.HTTPConnection.debuglevel = 1
def _manage_http_connection_pool(self, connection_pool_size: Union[str, int]):
self._s.mount(
self._base_url,
HTTPAdapter(
pool_connections=int(connection_pool_size),
pool_maxsize=int(connection_pool_size)))
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.logout()
@httpmethod
def GET(self, url: str, data: Union[str, bytes] = '', headers: Dict = None, timeout: float = None, **kwargs):
""" Perform a GET request against TM1 instance
:param url:
:param data: the payload
:param headers: custom headers
:param timeout: Number of seconds that the client will wait to receive the first byte.
:return: response object
"""
return self._s.get(
url=url,
headers={**self._headers, **headers} if headers else self._headers,
data=data,
verify=self._verify,
timeout=timeout if timeout else self._timeout)
@httpmethod
def POST(self, url: str, data: Union[str, bytes], headers: Dict = None, timeout: float = None, **kwargs):
""" POST request against the TM1 instance
:param url:
:param data: the payload
:param headers: custom headers
:param timeout: Number of seconds that the client will wait to receive the first byte.
:return: response object
"""
return self._s.post(
url=url,
headers={**self._headers, **headers} if headers else self._headers,
data=data,
verify=self._verify,
timeout=timeout if timeout else self._timeout)
@httpmethod
def PATCH(self, url: str, data: Union[str, bytes], headers: Dict = None, timeout: float = None, **kwargs):
""" PATCH request against the TM1 instance
:param url: String, for instance : /api/v1/Dimensions('plan_business_unit')
:param data: the payload
:param headers: custom headers
:param timeout: Number of seconds that the client will wait to receive the first byte.
:return: response object
"""
return self._s.patch(
url=url,
headers={**self._headers, **headers} if headers else self._headers,
data=data,
verify=self._verify,
timeout=timeout if timeout else self._timeout)
@httpmethod
def PUT(self, url: str, data: Union[str, bytes], headers: Dict = None, timeout: float = None, **kwargs):
""" PUT request against the TM1 instance
:param url: String, for instance : /api/v1/Dimensions('plan_business_unit')
:param data: the payload
:param headers: custom headers
:param timeout: Number of seconds that the client will wait to receive the first byte.
:return: response object
"""
return self._s.put(
url=url,
headers={**self._headers, **headers} if headers else self._headers,
data=data,
verify=self._verify,
timeout=timeout if timeout else self._timeout)
@httpmethod
def DELETE(self, url: str, data: Union[str, bytes], headers: Dict = None, timeout: float = None, **kwargs):
""" Delete request against TM1 instance
:param url: String, for instance : /api/v1/Dimensions('plan_business_unit')
:param data: the payload
:param headers: custom headers
:param timeout: Number of seconds that the client will wait to receive the first byte.
:return: response object
"""
return self._s.delete(
url=url,
headers={**self._headers, **headers} if headers else self._headers,
data=data,
verify=self._verify,
timeout=timeout if timeout else self._timeout)
def logout(self, timeout: float = None, **kwargs):
""" End TM1 Session and HTTP session
"""
# Easier to ask for forgiveness than permission
try:
# ProductVersion >= TM1 10.2.2 FP 6
self.POST('/api/v1/ActiveSession/tm1.Close', '', headers={"Connection": "close"}, timeout=timeout,
async_requests_mode=False, **kwargs)
except TM1pyRestException:
# ProductVersion < TM1 10.2.2 FP 6
self.POST('/api/logout', '', headers={"Connection": "close"}, timeout=timeout, **kwargs)
finally:
self._s.close()
def _start_session(self, user: str, password: str, decode_b64: bool = False, namespace: str = None,
gateway: str = None, integrated_login: bool = None, integrated_login_domain: str = None,
integrated_login_service: str = None, integrated_login_host: str = None,
integrated_login_delegate: bool = None, impersonate: str = None):
""" perform a simple GET request (Ask for the TM1 Version) to start a session
"""
# Authorization with integrated_login
if integrated_login:
self._s.auth = HttpNegotiateAuth(
domain=integrated_login_domain,
service=integrated_login_service,
host=integrated_login_host,
delegate=integrated_login_delegate)
# Authorization [Basic, CAM] through Headers
else:
token = self._build_authorization_token(
user,
self.b64_decode_password(password) if decode_b64 else password,
namespace,
gateway,
self._verify)
self.add_http_header('Authorization', token)
url = '/api/v1/Configuration/ProductVersion/$value'
try:
additional_headers = dict()
if impersonate:
additional_headers["TM1-Impersonate"] = impersonate
response = self.GET(url=url, headers=additional_headers)
self._version = response.text
finally:
# After we have session cookie, drop the Authorization Header
self.remove_http_header('Authorization')
def _url_and_body(self, url: str, data: str, encoding: str = 'utf-8') -> Tuple[str, bytes]:
""" create proper url and payload
"""
url = self._base_url + url
url = url.replace(' ', '%20')
if isinstance(data, str):
data = data.encode(encoding)
return url, data
def is_connected(self) -> bool:
""" Check if Connection to TM1 Server is established.
:Returns:
Boolean
"""
try:
self.GET('/api/v1/Configuration/ServerName/$value')
return True
except:
return False
def set_version(self):
url = '/api/v1/Configuration/ProductVersion/$value'
response = self.GET(url=url)
self._version = response.text
@property
def version(self) -> str:
return self._version
@property
def is_admin(self) -> bool:
if self._is_admin is None:
response = self.GET("/api/v1/ActiveUser/Groups")
self._is_admin = "ADMIN" in CaseAndSpaceInsensitiveSet(
*[group["Name"] for group in response.json()["value"]])
return self._is_admin
@property
def sandboxing_disabled(self):
if self._sandboxing_disabled is None:
value = self.GET("/api/v1/ActiveConfiguration/Administration/DisableSandboxing/$value")
self._sandboxing_disabled = value
return self._sandboxing_disabled
@property
def session_id(self) -> str:
return self._s.cookies["TM1SessionId"]
@staticmethod
def translate_to_boolean(value) -> bool:
""" Takes a boolean or string (eg. true, True, FALSE, etc.) value and returns (boolean) True or False
:param value: True, 'true', 'false' or 'False' ...
:return:
"""
if isinstance(value, bool) or isinstance(value, int):
return bool(value)
elif isinstance(value, str):
return value.replace(" ", "").lower() == 'true'
else:
raise ValueError("Invalid argument: '" + value + "'. Must be to be of type 'bool' or 'str'")
@staticmethod
def b64_decode_password(encrypted_password: str) -> str:
""" b64 decoding
:param encrypted_password: encrypted password with b64
:return: password in plain text
"""
return b64decode(encrypted_password).decode("UTF-8")
@staticmethod
def verify_response(response: Response):
""" check if Status Code is OK
:Parameters:
`response`: String
the response that is returned from a method call
:Exceptions:
TM1pyException, raises TM1pyException when Code is not 200, 204 etc.
"""
if not response.ok:
raise TM1pyRestException(response.text,
status_code=response.status_code,
reason=response.reason,
headers=response.headers)
@staticmethod
def _build_authorization_token(user: str, password: str, namespace: str = None, gateway: str = None,
verify: bool = False) -> str:
""" Build the Authorization Header for CAM and Native Security
"""
if namespace:
return RestService._build_authorization_token_cam(user, password, namespace, gateway, verify)
else:
return RestService._build_authorization_token_basic(user, password)
@staticmethod
def _build_authorization_token_cam(user: str = None, password: str = None, namespace: str = None,
gateway: str = None, verify: bool = False) -> str:
if gateway:
try:
HttpNegotiateAuth
except NameError:
raise RuntimeError(
"SSO failed due to missing dependency requests_negotiate_sspi.HttpNegotiateAuth. "
"SSO only supported for Windows")
response = requests.get(gateway, auth=HttpNegotiateAuth(), verify=verify,
params={"CAMNamespace": namespace})
if not response.status_code == 200:
raise RuntimeError(
"Failed to authenticate through CAM. Expected status_code 200, received status_code: "
+ str(response.status_code))
elif 'cam_passport' not in response.cookies:
raise RuntimeError(
"Failed to authenticate through CAM. HTTP response does not contain 'cam_passport' cookie")
else:
return 'CAMPassport ' + response.cookies['cam_passport']
else:
return 'CAMNamespace ' + b64encode(str.encode("{}:{}:{}".format(user, password, namespace))).decode("ascii")
@staticmethod
def _build_authorization_token_basic(user: str, password: str) -> str:
return 'Basic ' + b64encode(str.encode("{}:{}".format(user, password))).decode("ascii")
@staticmethod
def disable_http_warnings():
# disable HTTP verification warnings from requests library
requests.packages.urllib3.disable_warnings()
def get_http_header(self, key: str) -> str:
return self._headers[key]
def add_http_header(self, key: str, value: str):
self._headers[key] = value
def remove_http_header(self, key: str):
if key in self._headers:
self._headers.pop(key)
def retrieve_async_response(self, async_id: str, **kwargs) -> Response:
url = self._base_url + f"/api/v1/_async('{async_id}')"
return self._s.get(url, **kwargs)
def cancel_async_operation(self, async_id: str, **kwargs):
url = self._base_url + f"/api/v1/_async('{async_id}')"
response = self._s.delete(url, **kwargs)
self.verify_response(response)
def cancel_running_operation(self):
monitoring_service = self.get_monitoring_service()
threads = monitoring_service.get_active_session_threads(exclude_idle=True)
# if more than one thread is running in session, operation can not be identified unambiguously
if not len(threads) == 1:
return
monitoring_service.cancel_thread(threads[0]['ID'])
def get_monitoring_service(self):
from TM1py.Services import MonitoringService
return MonitoringService(self)
@staticmethod
def urllib3_response_from_bytes(data: bytes) -> HTTPResponse:
sock = BytesIOSocket(data)
response = HTTPResponse(sock)
response.begin()
return urllib3.HTTPResponse.from_httplib(response)
@staticmethod
def build_response_from_raw_bytes(data: bytes) -> Response:
urllib_response = RestService.urllib3_response_from_bytes(data)
adapter = HTTPAdapter()
requests_response = adapter.build_response(requests.PreparedRequest(), urllib_response)
# actual content of response needs to be set explicitly
requests_response._content = urllib_response.data
return requests_response
@staticmethod
def wait_time_generator(timeout: int):
yield 0.1
yield 0.3
yield 0.6
if timeout:
for _ in range(1, int(timeout)):
yield 1
else:
while True:
yield 1
class BytesIOSocket:
""" used in urllib3_response_from_bytes method to construct urllib3 response from raw bytes
"""
def __init__(self, content: bytes):
self.handle = BytesIO(content)
def makefile(self, mode) -> BytesIO:
return self.handle
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from openstackclient.tests.functional import base
class FlavorTests(base.TestCase):
"""Functional tests for flavor."""
PROJECT_NAME = uuid.uuid4().hex
@classmethod
def setUpClass(cls):
super(FlavorTests, cls).setUpClass()
# Make a project
cmd_output = json.loads(cls.openstack(
"project create -f json --enable " + cls.PROJECT_NAME
))
cls.project_id = cmd_output["id"]
@classmethod
def tearDownClass(cls):
try:
raw_output = cls.openstack("project delete " + cls.PROJECT_NAME)
cls.assertOutput('', raw_output)
finally:
super(FlavorTests, cls).tearDownClass()
def test_flavor_delete(self):
"""Test create w/project, delete multiple"""
name1 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
"flavor create -f json " +
"--project " + self.PROJECT_NAME + " " +
"--private " +
name1
))
self.assertIsNotNone(cmd_output["id"])
name2 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
"flavor create -f json " +
"--id qaz " +
"--project " + self.PROJECT_NAME + " " +
"--private " +
name2
))
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
"qaz",
cmd_output["id"],
)
raw_output = self.openstack(
"flavor delete " + name1 + " " + name2,
)
self.assertOutput('', raw_output)
def test_flavor_list(self):
"""Test create defaults, list filters, delete"""
name1 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
"flavor create -f json " +
"--property a=b " +
"--property c=d " +
name1
))
self.addCleanup(self.openstack, "flavor delete " + name1)
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
name1,
cmd_output["name"],
)
name2 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
"flavor create -f json " +
"--id qaz " +
"--ram 123 " +
"--private " +
"--property a=b2 " +
"--property b=d2 " +
name2
))
self.addCleanup(self.openstack, "flavor delete " + name2)
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
"qaz",
cmd_output["id"],
)
self.assertEqual(
name2,
cmd_output["name"],
)
self.assertEqual(
123,
cmd_output["ram"],
)
self.assertEqual(
0,
cmd_output["disk"],
)
self.assertFalse(
cmd_output["os-flavor-access:is_public"],
)
self.assertDictEqual(
{"a": "b2", "b": "d2"},
cmd_output["properties"],
)
# Test list
cmd_output = json.loads(self.openstack(
"flavor list -f json"
))
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
self.assertNotIn(name2, col_name)
# Test list --long
cmd_output = json.loads(self.openstack(
"flavor list -f json " +
"--long"
))
# We have list of complex json objects
# Iterate through the list setting flags
found_expected = False
for rec in cmd_output:
if rec['Name'] == name1:
found_expected = True
self.assertEqual('b', rec['Properties']['a'])
self.assertEqual('d', rec['Properties']['c'])
elif rec['Name'] == name2:
# We should have not seen private flavor
self.assertFalse(True)
self.assertTrue(found_expected)
# Test list --public
cmd_output = json.loads(self.openstack(
"flavor list -f json " +
"--public"
))
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
self.assertNotIn(name2, col_name)
# Test list --private
cmd_output = json.loads(self.openstack(
"flavor list -f json " +
"--private"
))
col_name = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, col_name)
self.assertIn(name2, col_name)
# Test list --all
cmd_output = json.loads(self.openstack(
"flavor list -f json " +
"--all"
))
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
self.assertIn(name2, col_name)
def test_flavor_properties(self):
"""Test create defaults, list filters, delete"""
name1 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
"flavor create -f json " +
"--id qaz " +
"--ram 123 " +
"--disk 20 " +
"--private " +
"--property a=first " +
"--property b=second " +
name1
))
self.addCleanup(self.openstack, "flavor delete " + name1)
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
"qaz",
cmd_output["id"],
)
self.assertEqual(
name1,
cmd_output["name"],
)
self.assertEqual(
123,
cmd_output["ram"],
)
self.assertEqual(
20,
cmd_output["disk"],
)
self.assertFalse(
cmd_output["os-flavor-access:is_public"],
)
self.assertDictEqual(
{"a": "first", "b": "second"},
cmd_output["properties"],
)
raw_output = self.openstack(
"flavor set " +
"--property a='third and 10' " +
"--property g=fourth " +
name1
)
self.assertEqual('', raw_output)
cmd_output = json.loads(self.openstack(
"flavor show -f json " +
name1
))
self.assertEqual(
"qaz",
cmd_output["id"],
)
self.assertEqual(
'third and 10',
cmd_output['properties']['a'])
self.assertEqual(
'second',
cmd_output['properties']['b'])
self.assertEqual(
'fourth',
cmd_output['properties']['g'])
raw_output = self.openstack(
"flavor unset " +
"--property b " +
name1
)
self.assertEqual('', raw_output)
cmd_output = json.loads(self.openstack(
"flavor show -f json " +
name1
))
self.assertNotIn('b', cmd_output['properties'])
|
|
import logging
import flask_login
from flask import jsonify, request, Response
import mediacloud
import mediacloud.error
import concurrent.futures
import server.util.csv as csv
import server.util.tags as tag_util
import server.views.topics.apicache as apicache
import server.views.apicache as base_apicache
from server import app, cliff
from server.auth import user_mediacloud_key, user_mediacloud_client
from server.cache import cache
from server.util.request import api_error_handler, filters_from_args
from server.views.topics import concatenate_query_for_solr, _parse_collection_ids, _parse_media_ids
from server.util.tags import TAG_SPIDERED_STORY
logger = logging.getLogger(__name__)
@cache.cache_on_arguments()
def _cached_geoname(geonames_id):
return cliff.geonames_lookup(geonames_id)
@app.route('/api/topics/<topics_id>/stories/counts', methods=['GET'])
@flask_login.login_required
@api_error_handler
def story_counts(topics_id):
query = request.form['keywords'] if 'keywords' in request.form else ''
#for preview information in subtopics and platforms - scope by media source info
collections = _parse_collection_ids(request.args)
sources = _parse_media_ids(request.args)
merged_args = {}
if ((sources not in [None, ''] and len(sources) > 0) or collections not in [None, ''] and len(collections) > 0):
query = concatenate_query_for_solr(query, sources, collections)
merged_args = {'q': query }
filtered = apicache.topic_story_count(user_mediacloud_key(), topics_id, **merged_args)
total = apicache.topic_story_count(user_mediacloud_key(), topics_id, timespans_id=None, snapshots_id=None, foci_id=None, q=None)
return jsonify({'counts': {'count': filtered['count'], 'total': total['count']}})
def _public_safe_topic_story_count(topics_id, q):
total = apicache.topic_story_count(user_mediacloud_key(), topics_id, q=apicache.add_to_user_query(None))
# force a count with just the query
matching = apicache.topic_story_count(user_mediacloud_key(), topics_id, q=apicache.add_to_user_query(q))
return jsonify({'counts': {'count': matching['count'], 'total': total['count']}})
def platform_csv_column_header_prefix(topic_seed_query):
return "{}_{}_".format(topic_seed_query['platform'], topic_seed_query['source'])
def stream_story_list_csv(user_key, filename, topic, **kwargs):
filename = topic['name'] + '-' + filename
has_twitter_data = (topic['ch_monitor_id'] is not None) and (topic['ch_monitor_id'] != 0)
# we have to make a separate call to the media info if the user wants to inlcude the media metadata
include_media_metadata = ('media_metadata' in kwargs) and (kwargs['media_metadata'] == '1')
# we have to make an extra, non-topic storyList calls if the user wants to include subtopics and themes (ie. story tags)
include_story_tags = ('story_tags' in kwargs) and (kwargs['story_tags'] == '1')
# if the focusId is a URL Sharing subtopic, then we have platform-specific post/author/channel share counts
include_platform_url_shares = kwargs['include_platform_url_shares'] if 'include_platform_url_shares' in kwargs else False
# if this topic includes platforms, then we have URL sharing counts (post/author/channel) for each platform
include_all_url_shares = kwargs['include_all_url_shares'] if 'include_all_url_shares' in kwargs else False
params = kwargs.copy()
snapshots_id, timespans_id, foci_id, q = filters_from_args(request.args)
merged_args = {
'timespans_id': timespans_id,
'snapshots_id': snapshots_id,
'foci_id': foci_id,
'q': q,
'sort': request.args['sort'] if 'sort' in request.args else None,
}
params.update(merged_args)
# do a check to see if the user has added in a real query or not
if 'q' in params:
params['q'] = params['q'] if 'q' not in [None, '', 'null', 'undefined'] else None
params['limit'] = 1000 # an arbitrary value to let us page through with big topics (note, this is the page size)
# set up the dict keys / column headers that the user cares about for this download
props = [
'stories_id', 'publish_date', 'title', 'url', 'language', 'ap_syndicated', 'inlink_count',
'facebook_share_count'
]
if include_platform_url_shares:
props += ['post_count', 'channel_count', 'author_count']
if include_all_url_shares:
# if the user requested to download all the url sharing counts by platform, we need to grab the config for that
# which is held in the platform seed query objects
topic_seed_queries = topic['topic_seed_queries']
extra_columns = []
for tsq in topic_seed_queries:
prefix = platform_csv_column_header_prefix(tsq)
extra_columns += [prefix+'post_count', prefix+'channel_count', prefix+'author_count']
props += extra_columns
params['topic_seed_queries'] = topic_seed_queries
if has_twitter_data:
props.append('simple_tweet_count')
if include_story_tags:
props += ['themes', 'subtopics']
props += ['outlink_count', 'media_inlink_count', 'media_id', 'media_name', 'media_url']
if include_media_metadata:
props += ['media_pub_country', 'media_pub_state', 'media_language', 'media_about_country', 'media_media_type']
timestamped_filename = csv.safe_filename(filename)
headers = {
"Content-Disposition": "attachment;filename=" + timestamped_filename
}
return Response(_topic_story_list_by_page_as_csv_row(user_key, topic['topics_id'], props, **params),
mimetype='text/csv; charset=utf-8', headers=headers)
# generator you can use to handle a long list of stories row by row (one row per story)
def _topic_story_list_by_page_as_csv_row(user_key, topics_id, props, **kwargs):
yield ','.join(props) + '\n' # first send the column names
include_all_url_shares = kwargs['include_all_url_shares'] if 'include_all_url_shares' in kwargs else False
story_count = 0
link_id = 0
more_pages = True
yet_to_hit_story_limit = True
has_story_limit = ('story_limit' in kwargs) and (kwargs['story_limit'] is not None)
# page through the story list results, until we run out or we hit the user's desired limit
while more_pages and ((not has_story_limit) or (has_story_limit and yet_to_hit_story_limit)):
page = _topic_story_page_with_media(user_key, topics_id, link_id, **kwargs)
if 'next' in page['link_ids']:
link_id = page['link_ids']['next']
else:
more_pages = False
for s in page['stories']:
if include_all_url_shares:
topic_seed_queries = kwargs['topic_seed_queries']
# add in each header col
for item in s['url_sharing_counts']:
seed_query = [tsq for tsq in topic_seed_queries if tsq['topic_seed_queries_id'] == item['topic_seed_queries_id']][0]
prefix = platform_csv_column_header_prefix(seed_query)
s[prefix + "post_count"] = item['post_count']
s[prefix + "channel_count"] = item['channel_count']
s[prefix + "author_count"] = item['author_count']
# first foci down to just the readable names
s['subtopics'] = ["{}: {}".format(f['focal_set_name'], f['name']) for f in s['foci']]
cleaned_row = csv.dict2row(props, s)
row_string = ','.join(cleaned_row) + '\n'
yield row_string
story_count += len(page['stories'])
yet_to_hit_story_limit = has_story_limit and (story_count < int(kwargs['story_limit']))
def _media_info_worker(info):
return base_apicache.get_media_with_key(info['user_key'], info['media_id'])
# generator you can use to do something for each page of story results
def _topic_story_page_with_media(user_key, topics_id, link_id, **kwargs):
media_lookup = {}
include_media_metadata = ('media_metadata' in kwargs) and (kwargs['media_metadata'] == '1')
include_story_tags = ('story_tags' in kwargs) and (kwargs['story_tags'] == '1')
# need to make sure invalid params don't make it to API call
args = {k: v for k, v in kwargs.copy().items() if k in apicache.TOPIC_STORY_LIST_API_PARAMS}
story_page = apicache.topic_story_list_by_page(user_key, topics_id, link_id=link_id, **args)
if len(story_page['stories']) > 0: # be careful to not construct malformed query if no story ids
# build a media lookup table in parallel so it is faster
if include_media_metadata:
with concurrent.futures.ProcessPoolExecutor() as executor:
media_ids = set([s['media_id'] for s in story_page['stories']])
jobs = [{'user_key': user_key, 'media_id': mid} for mid in media_ids]
job_results = executor.map(_media_info_worker, jobs) # blocks until they are all done
media_lookup = {j['media_id']: j for j in job_results}
if include_story_tags:
story_ids = [str(s['stories_id']) for s in story_page['stories']]
stories_with_tags = apicache.story_list(user_key, 'stories_id:(' + " ".join(story_ids) + ")", args['limit'])
# update story info for each story in the page, put it into the [stories] field, send updated page with
# stories back
for s in story_page['stories']:
# add in media metadata to the story (from page-level cache built earlier)
if include_media_metadata:
media = media_lookup[s['media_id']]
# add in media metadata items
for k, v in media['metadata'].items():
s['media_{}'.format(k)] = v['label'] if v is not None else None
# build lookup for id => story for all stories in stories with tags (non topic results)
if include_story_tags:
for st in stories_with_tags:
if s['stories_id'] == st['stories_id']:
s.update(st)
foci_names = [f['name'] for f in s['foci']]
s['subtopics'] = ", ".join(foci_names)
s['themes'] = ''
story_tag_ids = [t['tags_id'] for t in s['story_tags']]
if tag_util.NYT_LABELER_1_0_0_TAG_ID in story_tag_ids:
story_tag_ids = [t['tag'] for t in s['story_tags']
if t['tag_sets_id'] == tag_util.NYT_LABELS_TAG_SET_ID]
s['themes'] = ", ".join(story_tag_ids)
return story_page
@app.route('/api/topics/<topics_id>/stories/counts-by-snapshot', methods=['GET'])
@flask_login.login_required
@api_error_handler
def story_counts_by_snapshot(topics_id):
user_mc = user_mediacloud_client(user_mediacloud_key())
snapshots = user_mc.topicSnapshotList(topics_id)
counts = {}
for s in snapshots:
# get the count of stories in the overally timespan for this snapshot
timespans = apicache.cached_topic_timespan_list(topics_id, snapshots_id=s['snapshots_id'], foci_id=None)
try:
total = timespans[0]['story_count']
except mediacloud.error.MCException:
total = 0
except IndexError: # this doesn't have any snapshots (ie. it failed to generate correctly)
total = 0
# search by tag to find out how many stories were spidered
spidered = 0
try:
spidered = apicache.topic_story_count(user_mediacloud_key(), topics_id,
snapshots_id=s['snapshots_id'], foci_id=None,
timespans_id=timespans[0]['timespans_id'],
q="* AND tags_id_stories:{}".format(TAG_SPIDERED_STORY))['count']
except mediacloud.error.MCException:
spidered = 0
except IndexError: # this doesn't have any snapshots (ie. it failed to generate correctly)
total = 0
seeded = total - spidered
counts[s['snapshots_id']] = {'total': total, 'spidered': spidered, 'seeded': seeded}
return jsonify(counts)
|
|
"""
###############################################################################
ModelsDict: Abstract Class for Containing Models
###############################################################################
"""
import inspect
from collections import OrderedDict
from OpenPNM.Base import logging, Workspace
logger = logging.getLogger()
class ModelWrapper(dict):
r"""
Accepts a model from the OpenPNM model library, as well as all required
and optional argumnents, then wraps it in a custom dictionary with
various methods for working with the models.
"""
COMPONENTS = ['model', 'network', 'geometry', 'phase', 'physics', 'propname']
def __init__(self, **kwargs):
self.update(**kwargs)
def __call__(self):
return self['model'](**self)
def __str__(self):
if self['model'] is None:
return 'No model specified.'
horizontal_rule = '-' * 60
lines = [horizontal_rule]
lines.append(self['model'].__module__ + '.' + self['model'].__name__)
lines.append(horizontal_rule)
lines.append('{0:<20s} {1}'.format('Argument Name', 'Value / (Default)'))
lines.append(horizontal_rule)
# Scan default argument names and values of model
defs = {}
if self['model'].__defaults__ is not None:
vals = list(inspect.getargspec(self['model']).defaults)
keys = inspect.getargspec(self['model']).args[-len(vals):]
# Put defaults into the dict
defs.update(zip(keys, vals))
keys = list(self.keys())
keys.sort()
for item in keys:
if item not in self.COMPONENTS:
if item not in defs.keys():
defs[item] = '---'
lines.append('{0:<20s} {1} / ({2})'.format(item,
self[item],
defs[item]))
lines.append(horizontal_rule)
return '\n'.join(lines)
def regenerate(self):
return self.run()
logger.warning("This method has been depracated, use \'run\' instead.")
def run(self):
r"""
Regenerate the model
"""
master = self._find_master()
# Determine object type, and assign associated objects
self_type = [item.__name__ for item in master.__class__.__mro__]
kwargs = {}
if 'GenericGeometry' in self_type:
kwargs['network'] = master._net
kwargs['geometry'] = master
elif 'GenericPhase' in self_type:
kwargs['network'] = master._net
kwargs['phase'] = master
elif 'GenericPhysics' in self_type:
kwargs['network'] = master._net
kwargs['phase'] = master._phases[0]
kwargs['physics'] = master
else:
kwargs['network'] = master
kwargs.update(self)
return self['model'](**kwargs)
def _find_master(self):
mgr = Workspace()
master = []
for item in list(mgr.keys()):
if mgr[item].models is not None:
for model in list(mgr[item].models.keys()):
if mgr[item].models[model] is self:
master.append(mgr[item])
if len(master) > 1:
raise Exception('More than one master found! This model dictionary '
'has been associated with multiple objects. To use '
'the same dictionary multiple times use the copy'
'method.')
return master[0]
class GenericModel(ModelWrapper):
r"""
This class was deprecated, and replaced by ModelWrapper. Unfortunately,
this broke the ability to load files that were saved prior to the
deprecation. This is placed here as a 'wrapper' to the new ModelsWraper.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
class ModelsDict(OrderedDict):
r"""
This custom dictionary stores the models that are associated with each
OpenPNM object. This is an ordered dict with a few additional methods.
This ModelsDict class can be created as a standalone object, then
associated with an OpenPNM object, and ModelsDicts from one object can
be copied and attached to another.
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> Ps = pn.pores(labels='top',mode='not')
>>> geom = OpenPNM.Geometry.GenericGeometry(network=pn, pores=Ps, throats=pn.Ts)
Add a model to the object's ModelsDict:
>>> f = OpenPNM.Geometry.models.pore_seed.random
>>> geom.models.add(propname='pore.seed', model=f, seed=0)
It is possible to use the ModelsDict from one object with another object:
>>> Ps = pn.pores('top')
>>> boun = OpenPNM.Geometry.GenericGeometry(network=pn, pores=Ps)
>>> boun.models # The boun object has no models in its Models dict
ModelsDict()
>>> mod = geom.models.copy() # Create a copy of geom's models
>>> boun.models = mod # Use the same set of models on boun as geom
Because ``copy`` was used above, the ModelsDict on the two objects are
equal but different:
>>> boun.models == geom.models
True
>>> boun.models is geom.models
False
"""
def __setitem__(self, propname, model):
temp = ModelWrapper(propname=propname, model=None)
temp.update(**model)
super().__setitem__(propname, temp)
def __str__(self):
horizontal_rule = '-' * 60
lines = [horizontal_rule]
lines.append('{0:<5s} {1:<30s} {2}'.format('#',
'Property Name',
'Regeneration Mode'))
lines.append(horizontal_rule)
for i, item in enumerate(self.keys()):
str = '{0:<5d} {1:<30s} {2:<20s}'
lines.append(str.format(i + 1, item, self[item]['regen_mode']))
lines.append(horizontal_rule)
return '\n'.join(lines)
def keys(self):
return list(super().keys())
def regenerate(self, props='', mode='inclusive'):
r"""
This updates properties using any models on the object that were
assigned using ``add_model``
Parameters
----------
props : string or list of strings
The names of the properties that should be updated, defaults to 'all'
mode : string
This controls which props are regenerated and how. Options are:
* 'inclusive': (default) This regenerates all given properties
* 'exclude': This generates all given properties EXCEPT the given ones
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> geom = OpenPNM.Geometry.GenericGeometry(network=pn,
... pores=pn.pores(),
... throats=pn.throats())
>>> geom['pore.diameter'] = 1
>>> import OpenPNM.Geometry.models as gm # Import Geometry model library
>>> f = gm.pore_area.cubic
>>> geom.add_model(propname='pore.area', model=f)
>>> geom['pore.area'][0] # Look at area value in pore 0
1
>>> geom['pore.diameter'] = 2
>>> geom.models.regenerate() # Regenerate all models
>>> geom['pore.area'][0] # Look at pore area calculated with new diameter
4
"""
master = self._find_master()
# If empty, assume all models are to be regenerated
if props == '':
props = list(self.keys())
# Remove models if they are meant to be regenerated 'on_demand' only
for item in props:
if self[item]['regen_mode'] == 'on_demand':
props.remove(item)
elif type(props) == str:
props = [props]
if mode == 'exclude':
temp = list(self.keys())
for item in props:
temp.remove(item)
props = temp
for item in list(self.keys()):
if self[item]['regen_mode'] == 'constant' and item in props:
props.remove(item)
logger.info('Models are being recalculated in the following order: ')
count = 0
for item in props:
if item in list(self.keys()):
master[item] = self[item].run()
logger.info(str(count) + ' : ' + item)
count += 1
else:
logger.warning('Requested proptery is not a dynamic model: ' + item)
def add(self, propname, model, regen_mode='normal', **kwargs):
r"""
Add specified property estimation model to the object.
Parameters
----------
propname : string
The name of the property to use as dictionary key, such as
'pore.diameter' or 'throat.length'
model : function
The property estimation function to use
regen_mode : string
Controls when and if the property is regenerated. Options are:
* 'normal' : The property is stored as static data and is only
regenerated when the object's ``regenerate`` is called
* 'constant' : The property is calculated once when this method is first
run, but always maintains the same value
* 'deferred' : The model is stored on the object but not run until
``regenerate`` is called
* 'on_demand' : The model is stored on the object but not run, AND will
only run if specifically requested in ``regenerate``
Notes
-----
This method is inherited by all net/geom/phys/phase objects. It takes
the received model and stores it on the object under private dictionary
called _models. This dict is an 'OrderedDict', so that the models can
be run in the same order they are added.
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> geom = OpenPNM.Geometry.GenericGeometry(network=pn)
>>> import OpenPNM.Geometry.models as gm
>>> f = gm.pore_misc.random # Get model from Geometry library
>>> geom.add_model(propname='pore.seed',model=f)
>>> geom.models.keys() # Look in dict to verify model was added
['pore.seed']
>>> print(geom.models['pore.seed']) # Look at arguments for model
------------------------------------------------------------
OpenPNM.Geometry.models.pore_misc.random
------------------------------------------------------------
Argument Name Value / (Default)
------------------------------------------------------------
num_range [0, 1] / ([0, 1])
regen_mode normal / (---)
seed None / (None)
------------------------------------------------------------
"""
master = self._find_master()
if master is None:
logger.warning('ModelsDict has no master, changing regen_mode \
to deferred')
regen_mode = 'deferred'
# Build dictionary containing default model values, plus other required info
f = {'model': model, 'regen_mode': regen_mode}
# Scan default argument names and values of model
if model.__defaults__ is not None:
vals = list(inspect.getargspec(model).defaults)
keys = inspect.getargspec(model).args[-len(vals):]
# Put defaults into the dict
f.update(zip(keys, vals))
# Update dictionary with supplied arguments, overwriting defaults
f.update(**kwargs)
# Add model to ModelsDict
self[propname] = f
# Now generate data as necessary
if regen_mode in ['normal', 'constant']:
master[propname] = self[propname].run()
if regen_mode in ['deferred', 'on_demand']:
pass
def remove(self, propname):
r"""
Removes selected model from the dictionary, as well as removing its
associated data from the master Core object.
Parameters
----------
propname : string
The name of the model to remove
"""
master = self._find_master()
temp = master.pop(propname, None)
del self[propname]
def reorder(self, new_order):
r"""
Reorders the models on the object to change the order in which they
are regenerated, where item 0 is calculated first.
Parameters
----------
new_order : dict
A dictionary containing the model name(s) as the key, and the
location(s) in the new order as the value
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> geom = OpenPNM.Geometry.TestGeometry(network=pn,
... pores=pn.Ps,
... throats=pn.Ts)
>>> geom.models.keys()
['pore.seed', 'throat.seed', 'throat.length']
>>> geom.models.reorder({'pore.seed': 1, 'throat.length': 0})
>>> geom.models.keys()
['throat.length', 'pore.seed', 'throat.seed']
"""
# Generate numbered list of current models
order = [item for item in list(self.keys())]
# Remove supplied models from list
for item in new_order:
order.remove(item)
# Add models back to list in new order
inv_dict = {v: k for k, v in new_order.items()}
for item in sorted(inv_dict):
order.insert(item, inv_dict[item])
# Now rebuild models OrderedDict in new order
for item in order:
self.move_to_end(item)
def _find_master(self):
mgr = Workspace()
master = []
for item in list(mgr.keys()):
if mgr[item].models is self:
master.append(mgr[item])
if len(master) > 1:
raise Exception('More than one master found! This model dictionary '
'has been associated with multiple objects. To use the '
'same dictionary multiple times use the copy method.')
elif len(master) == 0:
raise Exception('ModelsDict has no master.')
return master[0]
|
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import re
import mock
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import units
from nova.openstack.common import uuidutils
from nova import test
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import vm_util
class fake_session(object):
def __init__(self, *ret):
self.ret = ret
self.ind = 0
def _call_method(self, *args):
# return fake objects in circular manner
self.ind = (self.ind + 1) % len(self.ret)
return self.ret[self.ind - 1]
def _get_vim(self):
fake_vim = fake.DataObject()
client = fake.DataObject()
client.factory = 'fake_factory'
fake_vim.client = client
return fake_vim
class partialObject(object):
def __init__(self, path='fake-path'):
self.path = path
self.fault = fake.DataObject()
class VMwareVMUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
fake.reset()
vm_util.vm_refs_cache_reset()
def tearDown(self):
super(VMwareVMUtilTestCase, self).tearDown()
fake.reset()
def test_get_datastore_ref_and_name(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore())
result = vm_util.get_datastore_ref_and_name(
fake_session(fake_objects))
self.assertEqual(result[1], "fake-ds")
self.assertEqual(result[2], units.Ti)
self.assertEqual(result[3], 500 * units.Gi)
def test_get_datastore_ref_and_name_with_regex(self):
# Test with a regex that matches with a datastore
datastore_valid_regex = re.compile("^openstack.*\d$")
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("openstack-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds1"))
result = vm_util.get_datastore_ref_and_name(
fake_session(fake_objects), None, None, datastore_valid_regex)
self.assertEqual("openstack-ds0", result[1])
def _test_get_stats_from_cluster(self, connection_state="connected",
maintenance_mode=False):
ManagedObjectRefs = [fake.ManagedObjectReference("host1",
"HostSystem"),
fake.ManagedObjectReference("host2",
"HostSystem")]
hosts = fake._convert_to_array_of_mor(ManagedObjectRefs)
respool = fake.ManagedObjectReference("resgroup-11", "ResourcePool")
prop_dict = {'host': hosts, 'resourcePool': respool}
hardware = fake.DataObject()
hardware.numCpuCores = 8
hardware.numCpuThreads = 16
hardware.vendor = "Intel"
hardware.cpuModel = "Intel(R) Xeon(R)"
runtime_host_1 = fake.DataObject()
runtime_host_1.connectionState = "connected"
runtime_host_1.inMaintenanceMode = False
runtime_host_2 = fake.DataObject()
runtime_host_2.connectionState = connection_state
runtime_host_2.inMaintenanceMode = maintenance_mode
prop_list_host_1 = [fake.Prop(name="hardware_summary", val=hardware),
fake.Prop(name="runtime_summary",
val=runtime_host_1)]
prop_list_host_2 = [fake.Prop(name="hardware_summary", val=hardware),
fake.Prop(name="runtime_summary",
val=runtime_host_2)]
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.ObjectContent("prop_list_host1",
prop_list_host_1))
fake_objects.add_object(fake.ObjectContent("prop_list_host1",
prop_list_host_2))
respool_resource_usage = fake.DataObject()
respool_resource_usage.maxUsage = 5368709120
respool_resource_usage.overallUsage = 2147483648
session = fake_session()
def fake_call_method(*args):
if "get_dynamic_properties" in args:
return prop_dict
elif "get_properties_for_a_collection_of_objects" in args:
return fake_objects
else:
return respool_resource_usage
with mock.patch.object(fake_session, '_call_method',
fake_call_method):
result = vm_util.get_stats_from_cluster(session, "cluster1")
cpu_info = {}
mem_info = {}
if connection_state == "connected" and not maintenance_mode:
cpu_info['vcpus'] = 32
cpu_info['cores'] = 16
cpu_info['vendor'] = ["Intel", "Intel"]
cpu_info['model'] = ["Intel(R) Xeon(R)",
"Intel(R) Xeon(R)"]
else:
cpu_info['vcpus'] = 16
cpu_info['cores'] = 8
cpu_info['vendor'] = ["Intel"]
cpu_info['model'] = ["Intel(R) Xeon(R)"]
mem_info['total'] = 5120
mem_info['free'] = 3072
expected_stats = {'cpu': cpu_info, 'mem': mem_info}
self.assertEqual(expected_stats, result)
def test_get_stats_from_cluster_hosts_connected_and_active(self):
self._test_get_stats_from_cluster()
def test_get_stats_from_cluster_hosts_disconnected_and_active(self):
self._test_get_stats_from_cluster(connection_state="disconnected")
def test_get_stats_from_cluster_hosts_connected_and_maintenance(self):
self._test_get_stats_from_cluster(maintenance_mode=True)
def test_get_datastore_ref_and_name_with_token(self):
regex = re.compile("^ds.*\d$")
fake0 = fake.FakeRetrieveResult()
fake0.add_object(fake.Datastore("ds0", 10 * units.Gi, 5 * units.Gi))
fake0.add_object(fake.Datastore("foo", 10 * units.Gi, 9 * units.Gi))
setattr(fake0, 'token', 'token-0')
fake1 = fake.FakeRetrieveResult()
fake1.add_object(fake.Datastore("ds2", 10 * units.Gi, 8 * units.Gi))
fake1.add_object(fake.Datastore("ds3", 10 * units.Gi, 1 * units.Gi))
result = vm_util.get_datastore_ref_and_name(
fake_session(fake0, fake1), None, None, regex)
self.assertEqual("ds2", result[1])
def test_get_datastore_ref_and_name_with_list(self):
# Test with a regex containing whitelist of datastores
datastore_valid_regex = re.compile("(openstack-ds0|openstack-ds2)")
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("openstack-ds0"))
fake_objects.add_object(fake.Datastore("openstack-ds1"))
fake_objects.add_object(fake.Datastore("openstack-ds2"))
result = vm_util.get_datastore_ref_and_name(
fake_session(fake_objects), None, None, datastore_valid_regex)
self.assertNotEqual("openstack-ds1", result[1])
def test_get_datastore_ref_and_name_with_regex_error(self):
# Test with a regex that has no match
# Checks if code raises DatastoreNotFound with a specific message
datastore_invalid_regex = re.compile("unknown-ds")
exp_message = (_("Datastore regex %s did not match any datastores")
% datastore_invalid_regex.pattern)
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("fake-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds1"))
# assertRaisesRegExp would have been a good choice instead of
# try/catch block, but it's available only from Py 2.7.
try:
vm_util.get_datastore_ref_and_name(
fake_session(fake_objects), None, None,
datastore_invalid_regex)
except exception.DatastoreNotFound as e:
self.assertEqual(exp_message, e.args[0])
else:
self.fail("DatastoreNotFound Exception was not raised with "
"message: %s" % exp_message)
def test_get_datastore_ref_and_name_without_datastore(self):
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(None), host="fake-host")
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(None), cluster="fake-cluster")
def test_get_host_ref_from_id(self):
fake_host_name = "ha-host"
fake_host_sys = fake.HostSystem(fake_host_name)
fake_host_id = fake_host_sys.obj.value
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake_host_sys)
ref = vm_util.get_host_ref_from_id(
fake_session(fake_objects), fake_host_id, ['name'])
self.assertIsInstance(ref, fake.HostSystem)
self.assertEqual(fake_host_id, ref.obj.value)
host_name = vm_util.get_host_name_from_host_ref(ref)
self.assertEqual(fake_host_name, host_name)
def test_get_host_ref_no_hosts_in_cluster(self):
self.assertRaises(exception.NoValidHost,
vm_util.get_host_ref,
fake_session(""), 'fake_cluster')
def test_get_datastore_ref_and_name_no_host_in_cluster(self):
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(""), 'fake_cluster')
@mock.patch.object(vm_util, '_get_vm_ref_from_vm_uuid',
return_value=None)
def test_get_host_name_for_vm(self, _get_ref_from_uuid):
fake_host = fake.HostSystem()
fake_host_id = fake_host.obj.value
fake_vm = fake.VirtualMachine(name='vm-123',
runtime_host=fake_host.obj)
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake_vm)
vm_ref = vm_util.get_vm_ref_from_name(
fake_session(fake_objects), 'vm-123')
self.assertIsNotNone(vm_ref)
host_id = vm_util.get_host_id_from_vm_ref(
fake_session(fake_objects), vm_ref)
self.assertEqual(fake_host_id, host_id)
def test_property_from_property_set(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
MoRef = collections.namedtuple('Val', ['value'])
good_objects = fake.FakeRetrieveResult()
results_good = [
ObjectContent(propSet=[
DynamicProperty(name='name', val=MoRef(value='vm-123'))]),
ObjectContent(propSet=[
DynamicProperty(name='foo', val=MoRef(value='bar1')),
DynamicProperty(
name='runtime.host', val=MoRef(value='host-123')),
DynamicProperty(name='foo', val=MoRef(value='bar2')),
]),
ObjectContent(propSet=[
DynamicProperty(
name='something', val=MoRef(value='thing'))]), ]
for result in results_good:
good_objects.add_object(result)
bad_objects = fake.FakeRetrieveResult()
results_bad = [
ObjectContent(propSet=[
DynamicProperty(name='name', val=MoRef(value='vm-123'))]),
ObjectContent(propSet=[
DynamicProperty(name='foo', val='bar1'),
DynamicProperty(name='foo', val='bar2'), ]),
ObjectContent(propSet=[
DynamicProperty(
name='something', val=MoRef(value='thing'))]), ]
for result in results_bad:
bad_objects.add_object(result)
prop = vm_util.property_from_property_set(
'runtime.host', good_objects)
self.assertIsNotNone(prop)
value = prop.val.value
self.assertEqual('host-123', value)
prop2 = vm_util.property_from_property_set(
'runtime.host', bad_objects)
self.assertIsNone(prop2)
prop3 = vm_util.property_from_property_set('foo', good_objects)
self.assertIsNotNone(prop3)
val3 = prop3.val.value
self.assertEqual('bar1', val3)
prop4 = vm_util.property_from_property_set('foo', bad_objects)
self.assertIsNotNone(prop4)
self.assertEqual('bar1', prop4.val)
def test_get_datastore_ref_and_name_inaccessible_ds(self):
data_store = fake.Datastore()
data_store.set("summary.accessible", False)
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(data_store)
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(fake_objects))
def test_get_resize_spec(self):
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': 'bda5fb9e-b347-40e8-8256-42397848cb00',
'vcpus': 2, 'memory_mb': 2048}
result = vm_util.get_vm_resize_spec(fake.FakeFactory(),
fake_instance)
expected = """{'memoryMB': 2048,
'numCPUs': 2,
'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_get_cdrom_attach_config_spec(self):
result = vm_util.get_cdrom_attach_config_spec(fake.FakeFactory(),
fake.Datastore(),
"/tmp/foo.iso",
200, 0)
expected = """{
'deviceChange': [
{
'device': {
'connectable': {
'allowGuestControl': False,
'startConnected': True,
'connected': True,
'obj_name': 'ns0: VirtualDeviceConnectInfo'
},
'backing': {
'datastore': {
"summary.type": "VMFS",
"summary.accessible":true,
"summary.name": "fake-ds",
"summary.capacity": 1099511627776,
"summary.freeSpace": 536870912000,
"browser": ""
},
'fileName': '/tmp/foo.iso',
'obj_name': 'ns0: VirtualCdromIsoBackingInfo'
},
'controllerKey': 200,
'unitNumber': 0,
'key': -1,
'obj_name': 'ns0: VirtualCdrom'
},
'operation': 'add',
'obj_name': 'ns0: VirtualDeviceConfigSpec'
}
],
'obj_name': 'ns0: VirtualMachineConfigSpec'
}
"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def test_lsilogic_controller_spec(self):
# Test controller spec returned for lsiLogic sas adapter type
config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
adapter_type="lsiLogicsas")
self.assertEqual("ns0:VirtualLsiLogicSASController",
config_spec.device.obj_name)
def _vmdk_path_and_adapter_type_devices(self, filename, parent=None):
# Test the adapter_type returned for a lsiLogic sas controller
controller_key = 1000
disk = fake.VirtualDisk()
disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = filename
if parent:
disk_backing.parent = parent
disk.backing = disk_backing
controller = fake.VirtualLsiLogicSASController()
controller.key = controller_key
devices = [disk, controller]
return devices
def test_get_vmdk_path_and_adapter_type(self):
filename = '[test_datastore] test_file.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(filename)
vmdk_info = vm_util.get_vmdk_path_and_adapter_type(devices)
adapter_type = vmdk_info[1]
self.assertEqual('lsiLogicsas', adapter_type)
self.assertEqual(vmdk_info[0], filename)
def test_get_vmdk_path_and_adapter_type_with_match(self):
n_filename = '[test_datastore] uuid/uuid.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(n_filename)
vmdk_info = vm_util.get_vmdk_path_and_adapter_type(
devices, uuid='uuid')
adapter_type = vmdk_info[1]
self.assertEqual('lsiLogicsas', adapter_type)
self.assertEqual(n_filename, vmdk_info[0])
def test_get_vmdk_path_and_adapter_type_with_nomatch(self):
n_filename = '[test_datastore] diuu/diuu.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(n_filename)
vmdk_info = vm_util.get_vmdk_path_and_adapter_type(
devices, uuid='uuid')
adapter_type = vmdk_info[1]
self.assertEqual('lsiLogicsas', adapter_type)
self.assertIsNone(vmdk_info[0])
def test_get_vmdk_adapter_type(self):
# Test for the adapter_type to be used in vmdk descriptor
# Adapter type in vmdk descriptor is same for LSI-SAS & LSILogic
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogic")
self.assertEqual("lsiLogic", vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogicsas")
self.assertEqual("lsiLogic", vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("dummyAdapter")
self.assertEqual("dummyAdapter", vmdk_adapter_type)
def test_find_allocated_slots(self):
disk1 = fake.VirtualDisk(200, 0)
disk2 = fake.VirtualDisk(200, 1)
disk3 = fake.VirtualDisk(201, 1)
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
scsi0 = fake.VirtualLsiLogicController(key=1000, scsiCtlrUnitNumber=7)
devices = [disk1, disk2, disk3, ide0, ide1, scsi0]
taken = vm_util._find_allocated_slots(devices)
self.assertEqual([0, 1], sorted(taken[200]))
self.assertEqual([1], taken[201])
self.assertEqual([7], taken[1000])
def test_allocate_controller_key_and_unit_number_ide_default(self):
# Test that default IDE controllers are used when there is a free slot
# on them
disk1 = fake.VirtualDisk(200, 0)
disk2 = fake.VirtualDisk(200, 1)
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
devices = [disk1, disk2, ide0, ide1]
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
None,
devices,
'ide')
self.assertEqual(201, controller_key)
self.assertEqual(0, unit_number)
self.assertIsNone(controller_spec)
def test_allocate_controller_key_and_unit_number_ide(self):
# Test that a new controller is created when there is no free slot on
# the default IDE controllers
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
devices = [ide0, ide1]
for controller_key in [200, 201]:
for unit_number in [0, 1]:
disk = fake.VirtualDisk(controller_key, unit_number)
devices.append(disk)
factory = fake.FakeFactory()
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
factory,
devices,
'ide')
self.assertEqual(-101, controller_key)
self.assertEqual(0, unit_number)
self.assertIsNotNone(controller_spec)
def test_allocate_controller_key_and_unit_number_scsi(self):
# Test that we allocate on existing SCSI controller if there is a free
# slot on it
devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7)]
for unit_number in range(7):
disk = fake.VirtualDisk(1000, unit_number)
devices.append(disk)
factory = fake.FakeFactory()
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
factory,
devices,
'lsiLogic')
self.assertEqual(1000, controller_key)
self.assertEqual(8, unit_number)
self.assertIsNone(controller_spec)
def _test_get_vnc_config_spec(self, port):
result = vm_util.get_vnc_config_spec(fake.FakeFactory(),
port)
return result
def test_get_vnc_config_spec(self):
result = self._test_get_vnc_config_spec(7)
expected = """{'extraConfig': [
{'value': 'true',
'key': 'RemoteDisplay.vnc.enabled',
'obj_name': 'ns0:OptionValue'},
{'value': 7,
'key': 'RemoteDisplay.vnc.port',
'obj_name': 'ns0:OptionValue'}],
'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
def _create_fake_vms(self):
fake_vms = fake.FakeRetrieveResult()
OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
for i in range(10):
vm = fake.ManagedObject()
opt_val = OptionValue(key='', value=5900 + i)
vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
fake_vms.add_object(vm)
return fake_vms
def test_get_vnc_port(self):
fake_vms = self._create_fake_vms()
self.flags(vnc_port=5900, group='vmware')
self.flags(vnc_port_total=10000, group='vmware')
actual = vm_util.get_vnc_port(fake_session(fake_vms))
self.assertEqual(actual, 5910)
def test_get_vnc_port_exhausted(self):
fake_vms = self._create_fake_vms()
self.flags(vnc_port=5900, group='vmware')
self.flags(vnc_port_total=10, group='vmware')
self.assertRaises(exception.ConsolePortRangeExhausted,
vm_util.get_vnc_port,
fake_session(fake_vms))
def test_get_all_cluster_refs_by_name_none(self):
fake_objects = fake.FakeRetrieveResult()
refs = vm_util.get_all_cluster_refs_by_name(fake_session(fake_objects),
['fake_cluster'])
self.assertTrue(not refs)
def test_get_all_cluster_refs_by_name_exists(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.ClusterComputeResource(name='cluster'))
refs = vm_util.get_all_cluster_refs_by_name(fake_session(fake_objects),
['cluster'])
self.assertTrue(len(refs) == 1)
def test_get_all_cluster_refs_by_name_missing(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(partialObject(path='cluster'))
refs = vm_util.get_all_cluster_refs_by_name(fake_session(fake_objects),
['cluster'])
self.assertTrue(not refs)
def test_propset_dict_simple(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
object = ObjectContent(propSet=[
DynamicProperty(name='foo', val="bar")])
propdict = vm_util.propset_dict(object.propSet)
self.assertEqual("bar", propdict['foo'])
def test_propset_dict_complex(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
MoRef = collections.namedtuple('Val', ['value'])
object = ObjectContent(propSet=[
DynamicProperty(name='foo', val="bar"),
DynamicProperty(name='some.thing',
val=MoRef(value='else')),
DynamicProperty(name='another.thing', val='value')])
propdict = vm_util.propset_dict(object.propSet)
self.assertEqual("bar", propdict['foo'])
self.assertTrue(hasattr(propdict['some.thing'], 'value'))
self.assertEqual("else", propdict['some.thing'].value)
self.assertEqual("value", propdict['another.thing'])
def _test_detach_virtual_disk_spec(self, destroy_disk=False):
virtual_device_config = vm_util.detach_virtual_disk_spec(
fake.FakeFactory(),
'fake_device',
destroy_disk)
self.assertEqual('remove', virtual_device_config.operation)
self.assertEqual('fake_device', virtual_device_config.device)
self.assertEqual('ns0:VirtualDeviceConfigSpec',
virtual_device_config.obj_name)
if destroy_disk:
self.assertEqual('destroy', virtual_device_config.fileOperation)
else:
self.assertFalse(hasattr(virtual_device_config, 'fileOperation'))
def test_detach_virtual_disk_spec(self):
self._test_detach_virtual_disk_spec(destroy_disk=False)
def test_detach_virtual_disk_destroy_spec(self):
self._test_detach_virtual_disk_spec(destroy_disk=True)
def test_get_vm_create_spec(self):
instance_uuid = uuidutils.generate_uuid()
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': instance_uuid,
'vcpus': 2, 'memory_mb': 2048}
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
fake_instance, instance_uuid,
'fake-datastore', [])
expected = """{
'files': {'vmPathName': '[fake-datastore]',
'obj_name': 'ns0:VirtualMachineFileInfo'},
'instanceUuid': '%(instance_uuid)s',
'name': '%(instance_uuid)s', 'deviceChange': [],
'extraConfig': [{'value': '%(instance_uuid)s',
'key': 'nvp.vm-uuid',
'obj_name': 'ns0:OptionValue'}],
'memoryMB': 2048,
'obj_name': 'ns0:VirtualMachineConfigSpec',
'guestId': 'otherGuest',
'tools': {'beforeGuestStandby': True,
'beforeGuestReboot': True,
'beforeGuestShutdown': True,
'afterResume': True,
'afterPowerOn': True,
'obj_name': 'ns0:ToolsConfigInfo'},
'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
expected = re.sub(r'\s+', '', expected)
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
|
|
"""
I/O routines
"""
import os
import time
from astropy.io import fits
import numpy as np
import multiprocessing
from desispec.interpolation import resample_flux
from desispec.io.util import write_bintable
import desispec.io
import desimodel.io
#-------------------------------------------------------------------------
#- simspec
def write_simspec(meta, truth, expid, night, header=None, outfile=None):
"""
Write $DESI_SPECTRO_SIM/$PIXPROD/{night}/simspec-{expid}.fits
Args:
meta : metadata table to write to "METADATA" HDU
truth : dictionary with keys:
FLUX - 2D array [nspec, nwave] in erg/s/cm2/A
WAVE - 1D array of vacuum wavelengths [Angstroms]
SKYFLUX - array of sky flux [erg/s/cm2/A/arcsec],
either 1D [nwave] or 2D [nspec, nwave]
PHOT_{B,R,Z} - 2D array [nspec, nwave] of object photons/bin
SKYPHOT_{B,R,Z} - 1D or 2D array of sky photons/bin
expid : integer exposure ID
night : string YEARMMDD
header : optional dictionary of header items to add to output
outfile : optional filename to write (otherwise auto-derived)
Returns:
full file path of output file written
"""
#- Where should this go?
if outfile is None:
outdir = simdir(night, mkdir=True)
outfile = '{}/simspec-{:08d}.fits'.format(outdir, expid)
#- Object flux HDU (which might be just a header, e.g. for an arc)
hdr = desispec.io.util.fitsheader(header)
wave = truth['WAVE']
hdr['CRVAL1'] = (wave[0], 'Starting wavelength [Angstroms]')
hdr['CDELT1'] = (wave[1]-wave[0], 'Wavelength step [Angstroms]')
hdr['AIRORVAC'] = ('vac', 'Vacuum wavelengths')
hdr['LOGLAM'] = (0, 'linear wavelength steps, not log10')
if 'FLUX' in truth:
hdr['EXTNAME'] = ('FLUX', 'Object flux [erg/s/cm2/A]')
fits.writeto(outfile, truth['FLUX'].astype(np.float32), header=hdr, clobber=True)
else:
fits.writeto(outfile, np.zeros(0), header=hdr, clobber=True)
#- Sky flux HDU
if 'SKYFLUX' in truth:
hdr['EXTNAME'] = ('SKYFLUX', 'Sky flux [erg/s/cm2/A/arcsec2]')
hdu = fits.ImageHDU(truth['SKYFLUX'].astype(np.float32), header=hdr)
fits.append(outfile, hdu.data, header=hdu.header)
#- Metadata table HDU
if meta is not None:
comments = dict(
OBJTYPE = 'Object type (ELG, LRG, QSO, STD, STAR)',
REDSHIFT = 'true object redshift',
TEMPLATEID = 'input template ID',
O2FLUX = '[OII] flux [erg/s/cm2]',
)
units = dict(
# OBJTYPE = 'Object type (ELG, LRG, QSO, STD, STAR)',
# REDSHIFT = 'true object redshift',
# TEMPLATEID = 'input template ID',
O2FLUX = 'erg/s/cm2',
)
write_bintable(outfile, meta, header=None, extname="METADATA",
comments=comments, units=units)
#- Write object photon and sky photons for each channel
for channel in ['B', 'R', 'Z']:
hdr = fits.Header()
wave = truth['WAVE_'+channel]
hdr['CRVAL1'] = (wave[0], 'Starting wavelength [Angstroms]')
hdr['CDELT1'] = (wave[1]-wave[0], 'Wavelength step [Angstroms]')
hdr['AIRORVAC'] = ('vac', 'Vacuum wavelengths')
hdr['LOGLAM'] = (0, 'linear wavelength steps, not log10')
extname = 'PHOT_'+channel
hdr['EXTNAME'] = (extname, channel+' channel object photons per bin')
hdu = fits.ImageHDU(truth[extname].astype(np.float32), header=hdr)
fits.append(outfile, hdu.data, header=hdu.header)
extname = 'SKYPHOT_'+channel
if extname in truth:
hdr['EXTNAME'] = (extname, channel+' channel sky photons per bin')
hdu = fits.ImageHDU(truth[extname].astype(np.float32), header=hdr)
fits.append(outfile, hdu.data, header=hdu.header)
return outfile
#- TODO: this is more than just I/O. Refactor.
def write_simpix(img, camera, flavor, night, expid, header=None):
"""
Add noise to input image and write output simpix and pix files.
Args:
img : 2D noiseless image array
camera : e.g. b0, r1, z9
flavor : arc or flat
night : YEARMMDD string
expid : integer exposure id
Writes to $DESI_SPECTRO_SIM/$PIXPROD/{night}/
simpix-{camera}-{expid}.fits
pix-{camera}-{expid}.fits
Returns:
filepath to pix*.fits file that was written
"""
outdir = simdir(night, mkdir=True)
params = desimodel.io.load_desiparams()
channel = camera[0].lower()
#- Add noise, generate inverse variance and mask
rdnoise = params['ccd'][channel]['readnoise']
pix = np.random.poisson(img) + np.random.normal(scale=rdnoise, size=img.shape)
ivar = 1.0/(pix.clip(0) + rdnoise**2)
mask = np.zeros(img.shape, dtype=np.int32)
#-----
#- Write noiseless image to simpix file
simpixfile = '{}/simpix-{}-{:08d}.fits'.format(outdir, camera, expid)
hdu = fits.PrimaryHDU(img, header=header)
hdu.header['VSPECTER'] = ('0.0.0', 'TODO: Specter version')
fits.writeto(simpixfile, hdu.data, header=hdu.header, clobber=True)
#- Add x y trace locations from PSF
psffile = '{}/data/specpsf/psf-{}.fits'.format(os.getenv('DESIMODEL'), channel)
psfxy = fits.open(psffile)
fits.append(simpixfile, psfxy['XCOEFF'].data, header=psfxy['XCOEFF'].header)
fits.append(simpixfile, psfxy['YCOEFF'].data, header=psfxy['YCOEFF'].header)
#-----
#- Write simulated raw data to pix file
#- Primary HDU: noisy image
outfile = '{}/pix-{}-{:08d}.fits'.format(outdir, camera, expid)
hdulist = fits.HDUList()
hdu = fits.PrimaryHDU(pix, header=header)
hdu.header.append( ('CAMERA', camera, 'Spectograph Camera') )
hdu.header.append( ('VSPECTER', '0.0.0', 'TODO: Specter version') )
hdu.header.append( ('EXPTIME', params['exptime'], 'Exposure time [sec]') )
hdu.header.append( ('RDNOISE', rdnoise, 'Read noise [electrons]'))
hdu.header.append( ('FLAVOR', flavor, 'Exposure type (arc, flat, science)'))
hdulist.append(hdu)
#- IVAR: Inverse variance (IVAR)
hdu = fits.ImageHDU(ivar, name='IVAR')
hdu.header.append(('RDNOISE', rdnoise, 'Read noise [electrons]'))
hdulist.append(hdu)
#- MASK: currently just zeros
hdu = fits.CompImageHDU(mask, name='MASK')
hdulist.append(hdu)
hdulist.writeto(outfile, clobber=True)
return outfile
#-------------------------------------------------------------------------
#- desimodel
def get_tile_radec(tileid):
"""
Return (ra, dec) in degrees for the requested tileid.
If tileid is not in DESI, return (0.0, 0.0)
TODO: should it raise and exception instead?
"""
tiles = desimodel.io.load_tiles()
if tileid in tiles['TILEID']:
i = np.where(tiles['TILEID'] == tileid)[0][0]
return tiles[i]['RA'], tiles[i]['DEC']
else:
return (0.0, 0.0)
#-------------------------------------------------------------------------
#- spectral templates
#- Utility function to wrap resample_flux for multiprocessing map
def _resample_flux(args):
return resample_flux(*args)
def read_templates(wave, objtype, nspec=None, randseed=1, infile=None):
"""
Returns n templates of type objtype sampled at wave
Inputs:
- wave : array of wavelengths to sample
- objtype : 'ELG', 'LRG', 'QSO', 'STD', or 'STAR'
- nspec : number of templates to return
- infile : (optional) input template file (see below)
Returns flux[n, len(wave)], meta[n]
where flux is in units of 1e-17 erg/s/cm2/A/[arcsec^2] and
meta is a metadata table from the input template file
with redshift, mags, etc.
If infile is None, then $DESI_{objtype}_TEMPLATES must be set, pointing to
a file that has the observer frame flux in HDU 0 and a metadata table for
these objects in HDU 1. This code randomly samples n spectra from that file.
TO DO: add a setable randseed for random reproducibility.
"""
if infile is None:
key = 'DESI_'+objtype.upper()+'_TEMPLATES'
if key not in os.environ:
raise ValueError("ERROR: $"+key+" not set; can't find "+objtype+" templates")
infile = os.getenv(key)
hdr = fits.getheader(infile)
flux = fits.getdata(infile, 0)
meta = fits.getdata(infile, 1).view(np.recarray)
ww = 10**(hdr['CRVAL1'] + np.arange(hdr['NAXIS1'])*hdr['CDELT1'])
#- Check flux units
fluxunits = hdr['BUNIT']
if not fluxunits.startswith('1e-17 erg'):
if fluxunits.startswith('erg'):
flux *= 1e17
else:
#- check for '1e-16 erg/s/cm2/A' style units
scale, units = fluxunits.split()
assert units.startswith('erg')
scale = float(scale)
flux *= (scale*1e17)
ntemplates = flux.shape[0]
randindex = np.arange(ntemplates)
np.random.shuffle(randindex)
if nspec is None:
nspec = flux.shape[0]
#- Serial version
# outflux = np.zeros([n, len(wave)])
# outmeta = np.empty(n, dtype=meta.dtype)
# for i in range(n):
# j = randindex[i%ntemplates]
# if 'Z' in meta:
# z = meta['Z'][j]
# else:
# z = 0.0
# if objtype == 'QSO':
# outflux[i] = resample_flux(wave, ww, flux[j])
# else:
# outflux[i] = resample_flux(wave, ww*(1+z), flux[j])
# outmeta[i] = meta[j]
#- Multiprocessing version
#- Assemble list of args to pass to multiprocesssing map
args = list()
outmeta = np.empty(nspec, dtype=meta.dtype)
for i in range(nspec):
j = randindex[i%ntemplates]
outmeta[i] = meta[j]
if 'Z' in meta.dtype.names:
z = meta['Z'][j]
else:
z = 0.0
#- ELG, LRG require shifting wave by (1+z); QSOs don't
if objtype == 'QSO':
args.append( (wave, ww, flux[j]) )
else:
args.append( (wave, ww*(1+z), flux[j]) )
ncpu = multiprocessing.cpu_count() // 2 #- avoid hyperthreading
pool = multiprocessing.Pool(ncpu)
outflux = pool.map(_resample_flux, args)
return outflux, outmeta
#-------------------------------------------------------------------------
#- Utility functions
def simdir(night='', mkdir=False):
"""
Return $DESI_SPECTRO_SIM/$PIXPROD/{night}
If mkdir is True, create directory if needed
"""
dirname = os.path.join(os.getenv('DESI_SPECTRO_SIM'), os.getenv('PIXPROD'), night)
if mkdir and not os.path.exists(dirname):
os.makedirs(dirname)
return dirname
def _parse_filename(filename):
"""
Parse filename and return (prefix, expid) or (prefix, camera, expid)
"""
base = os.path.basename(os.path.splitext(filename)[0])
x = base.split('-')
if len(x) == 2:
return x[0], None, int(x[1])
elif len(x) == 3:
return x[0], x[1].lower(), int(x[2])
|
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2015 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from __future__ import division, print_function, unicode_literals
__docformat__ = 'restructuredtext'
class Rect(object):
"""Define a rectangular area.
Many convenience handles and other properties are also defined - all of
which may be assigned to which will result in altering the position
and sometimes dimensions of the Rect:
- top -- y pixel extent
- bottom -- y pixel extent
- left -- x pixel extent
- right -- x pixel extent
- position -- (x, y) of bottom-left corner pixel
- origin -- (x, y) of bottom-left corner pixel
- center -- (x, y) of center pixel
- topleft -- (x, y) of top-left corner pixel
- topright -- (x, y) of top-right corner pixel
- bottomleft -- (x, y) of bottom-left corner pixel
- bottomright -- (x, y) of bottom-right corner pixel
- midtop -- (x, y) of middle of top side pixel
- midbottom -- (x, y) of middle of bottom side pixel
- midleft -- (x, y) of middle of left side pixel
- midright -- (x, y) of middle of right side pixel
- size -- (width, height) of rect
The Rect area includes the bottom and left borders but not the top and
right borders.
"""
def __init__(self, x, y, width, height):
"""Create a Rect with the bottom-left corner at (x, y) and
dimensions (width, height).
"""
self._x, self._y = x, y
self._width, self._height = width, height
def __nonzero__(self):
return bool(self.width and self.height)
def __repr__(self):
return 'Rect(xy=%.4g,%.4g; wh=%.4g,%.4g)' % (self.x, self.y,
self.width, self.height)
def __eq__(self, other):
"""Compare the two rects.
>>> r1 = Rect(0, 0, 10, 10)
>>> r1 == Rect(0, 0, 10, 10)
True
>>> r1 == Rect(1, 0, 10, 10)
False
>>> r1 == Rect(0, 1, 10, 10)
False
>>> r1 == Rect(0, 0, 11, 10)
False
>>> r1 == Rect(0, 0, 10, 11)
False
"""
return (self.x == other.x and self.y == other.y and
self.width == other.width and self.height == other.height)
# py3 compatibility: obj that defines __eq__ needs to define __hash__ to be
# hashable, and we need that class RectCell(Rect, Cell) be hashable
__hash__ = object.__hash__
def __ne__(self, other):
"""Compare the two rects.
>>> r1 = Rect(0, 0, 10, 10)
>>> r1 != Rect(0, 0, 10, 10)
False
>>> r1 != Rect(1, 0, 10, 10)
True
>>> r1 != Rect(0, 1, 10, 10)
True
>>> r1 != Rect(0, 0, 11, 10)
True
>>> r1 != Rect(0, 0, 10, 11)
True
"""
return not (self == other)
def copy(self):
return self.__class__(self.x, self.y, self.width, self.height)
# the following four properties will most likely be overridden in a
# subclass
def set_x(self, value):
self._x = value
x = property(lambda self: self._x, set_x)
def set_y(self, value):
self._y = value
y = property(lambda self: self._y, set_y)
def set_width(self, value):
self._width = value
width = property(lambda self: self._width, set_width)
def set_height(self, value):
self._height = value
height = property(lambda self: self._height, set_height)
def contains(self, x, y):
"""Return boolean whether the point defined by x, y is inside the
rect area.
"""
if x < self.x or x > self.x + self.width:
return False
if y < self.y or y > self.y + self.height:
return False
return True
def intersects(self, other):
"""Return boolean whether the interior of "other" rect (an object
with .x, .y, .width and .height attributes) overlaps the interior
of this Rect in any way.
"""
if self.x + self.width <= other.x:
return False
if other.x + other.width <= self.x:
return False
if self.y + self.height <= other.y:
return False
if other.y + other.height <= self.y:
return False
return True
def clippedBy(self, other):
"""bool. True iif intersection with other is smaller than self.
Equivalent: True if self doesn't fit entirely into other
>>> r1 = Rect(0, 0, 10, 10)
>>> r2 = Rect(1, 1, 9, 9)
>>> r2.clippedBy(r1) # r2 fits inside r1
False
>>> r1.clippedBy(r2) # r1 is clipped by r2
True
>>> r2 = Rect(1, 1, 11, 11)
>>> r1.intersect(r2)
Rect(xy=1,1; wh=9,9)
>>> r1.clippedBy(r2)
True
>>> r2.intersect(r1)
Rect(xy=1,1; wh=9,9)
>>> r2.clippedBy(r1)
True
>>> r2 = Rect(11, 11, 1, 1)
>>> r1.clippedBy(r2)
True
"""
intersection = self.intersect(other)
if intersection is None:
return True
return intersection != self
def intersect(self, other):
"""Find the intersection of two Rect s.
>>> r1 = Rect(0, 51, 200, 17)
>>> r2 = Rect(0, 64, 200, 55)
>>> r1.intersect(r2)
Rect(xy=0,64; wh=200,4)
>>> r1 = Rect(0, 64, 200, 55)
>>> r2 = Rect(0, 0, 200, 17)
>>> print r1.intersect(r2)
None
>>> r1 = Rect(10, 10, 10, 10)
>>> r2 = Rect(20, 20, 10, 10)
>>> print r1.intersect(r2)
None
>>> bool(Rect(0, 0, 1, 1))
True
>>> bool(Rect(0, 0, 1, 0))
False
>>> bool(Rect(0, 0, 0, 1))
False
>>> bool(Rect(0, 0, 0, 0))
False
"""
s_tr_x, s_tr_y = self.topright
o_tr_x, o_tr_y = other.topright
bl_x = max(self.x, other.x)
bl_y = max(self.y, other.y)
tr_x = min(s_tr_x, o_tr_x)
tr_y = min(s_tr_y, o_tr_y)
w, h = max(0, tr_x-bl_x), max(0, tr_y-bl_y)
if not w or not h:
return None
return self.__class__(bl_x, bl_y, w, h)
def set_position(self, value):
self._x, self._y = value
position = property(lambda self: (self._x, self._y), set_position)
def set_size(self, value):
self._width, self._height = value
size = property(lambda self: (self._width, self._height), set_size)
def get_origin(self):
return self.x, self.y
def set_origin(self, origin):
self.x, self.y = origin
origin = property(get_origin, set_origin)
def get_top(self):
return self.y + self.height
def set_top(self, y):
self.y = y - self.height
top = property(get_top, set_top)
# r/w, in pixels, y extent
def get_bottom(self):
return self.y
def set_bottom(self, y):
self.y = y
bottom = property(get_bottom, set_bottom)
def get_left(self):
return self.x
def set_left(self, x):
self.x = x
left = property(get_left, set_left)
def get_right(self):
return self.x + self.width
def set_right(self, x):
self.x = x - self.width
right = property(get_right, set_right)
def get_center(self):
return self.x + self.width//2, self.y + self.height//2
def set_center(self, center):
x, y = center
self.position = (x - self.width//2, y - self.height//2.0)
center = property(get_center, set_center)
def get_midtop(self):
return self.x + self.width//2, self.y + self.height
def set_midtop(self, midtop):
x, y = midtop
self.position = (x - self.width//2, y - self.height)
midtop = property(get_midtop, set_midtop)
def get_midbottom(self):
return self.x + self.width//2, self.y
def set_midbottom(self, midbottom):
x, y = midbottom
self.position = (x - self.width//2, y)
midbottom = property(get_midbottom, set_midbottom)
def get_midleft(self):
return self.x, self.y + self.height//2
def set_midleft(self, midleft):
x, y = midleft
self.position = (x, y - self.height//2)
midleft = property(get_midleft, set_midleft)
def get_midright(self):
return self.x + self.width, self.y + self.height//2
def set_midright(self, midright):
x, y = midright
self.position = (x - self.width, y - self.height//2)
midright = property(get_midright, set_midright)
def get_topleft(self):
return self.x, self.y + self.height
def set_topleft(self, position):
x, y = position
self.position = (x, y - self.height)
topleft = property(get_topleft, set_topleft)
def get_topright(self):
return self.x + self.width, self.y + self.height
def set_topright(self, position):
x, y = position
self.position = (x - self.width, y - self.height)
topright = property(get_topright, set_topright)
def get_bottomright(self):
return self.x + self.width, self.y
def set_bottomright(self, position):
x, y = position
self.position = (x - self.width, y)
bottomright = property(get_bottomright, set_bottomright)
def get_bottomleft(self):
return self.x, self.y
def set_bottomleft(self, position):
self.x, self.y = position
bottomleft = property(get_bottomleft, set_bottomleft)
|
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
Auto-align the net entities in a given nodespace
"""
__author__ = 'joscha'
__date__ = '15.10.12'
from collections import OrderedDict
from micropsi_core.tools import OrderedSet
import math
BORDER = 50.0
GRID = 170.0
PREFERRED_WIDTH = 8.0
def align(nodenet, nodespace):
"""aligns the entities in the given nodenet.
Arguments:
nodenet: current node net
nodespace: the nodespace in which the entities are to be aligned
Returns:
True on success, False otherwise
"""
nodespace = nodenet.get_nodespace(nodespace).uid
unaligned_nodespaces = sorted(nodenet.get_nodespace(nodespace).get_known_ids('nodespaces'),
key=lambda i:nodenet.get_nodespace(i).index)
unaligned_nodes = sorted(nodenet.get_nodespace(nodespace).get_known_ids('nodes'),
key = lambda i: nodenet.get_node(i).index)
sensors = [ s for s in unaligned_nodes if nodenet.get_node(s).type == "Sensor" ]
actors = [ a for a in unaligned_nodes if nodenet.get_node(a).type == "Actor" ]
activators = [ a for a in unaligned_nodes if nodenet.get_node(a).type == "Activator" ]
unaligned_nodes = [ n for n in unaligned_nodes if not nodenet.get_node(n).type in ("Sensor", "Actor", "Activator") ]
# position nodespaces
for i, id in enumerate(unaligned_nodespaces):
nodenet.get_nodespace(id).position = calculate_grid_position(i)
start_position = (BORDER + GRID/2, BORDER + (0.5+math.ceil(len(unaligned_nodespaces)/PREFERRED_WIDTH))*GRID)
# simplify linkage
group = unify_links(nodenet, unaligned_nodes)
# connect all nodes that have por- and ret-links
por_groups = group_horizontal_links(group)
# connect native modules
# group_other_links(por_groups)
# group nodes that share a sur-linked parent below that parent
group_with_same_parent(por_groups)
# put sensors and actors below
sensor_group = HorizontalGroup([ DisplayNode(i) for i in sensors ] + [ DisplayNode(i) for i in actors ])
actviator_group = HorizontalGroup([ DisplayNode(i) for i in activators ])
por_groups.append(sensor_group)
por_groups.append(actviator_group)
# calculate actual coordinates by traversing the group structure
por_groups.arrange(nodenet, start_position)
return True
INVERSE_DIRECTIONS = { "s": "n", "w": "e", "nw": "se", "ne": "sw",
"n": "s", "e": "w", "se": "nw", "sw": "ne",
"o": "O", "O": "o", "b": "a", "a": "b" }
class DisplayNode(object):
def __init__(self, uid, directions = None, parent = None):
self.uid = uid
self.directions = directions or {}
self.parent = parent
self.stackable = False
def __repr__(self):
params = "%s" % str(self.uid)
if self.directions:
params += ", dirs: "
for i in self.directions:
params += "[%s]: " % i
for j in self.directions[i]:
params += "%s, " % str(j.uid)
return '%s(%s)' % ("Node", params)
def __repr__2(self):
params = "'%s'" % self.uid
if self.directions:
params += ", directions=%r" % self.directions
if self.parent:
params += ", parent=%r" % self.parent
return '%s(%s)' % (self.__class__.__name__, params)
def width(self):
return 1
def height(self):
return 1
def arrange(self, nodenet, starting_point = (0,0)):
nodenet.get_node(self.uid).position = starting_point
def unify_links(nodenet, node_id_list):
"""create a proxy representation of the node space to simplify bi-directional links.
This structure is an ordered set of proxy nodes (DisplayNode) with directions.
Each direction is marked by its key (such as "n"), and followed by a list of nodes
that are linked in that direction. The nodes are sorted by their index (as marked in
the node net).
Arguments:
nodenet: the nodenet that we are working on
node_id_list: a list of node ids to be processed
"""
node_index = OrderedDict([(i, DisplayNode(i)) for i in node_id_list])
for node_id in node_id_list:
node = nodenet.get_node(node_id)
vertical_only = True
for gate_type in node.get_gate_types():
direction = {"sub": "s", "ret": "w", "cat": "ne", "sym":"nw",
"sur": "n", "por": "e", "exp": "sw", "ref":"se", "gen": "n"}.get(gate_type, "o")
if direction:
# "o" is for unknown gate types
for link in node.get_gate(gate_type).get_links():
target_node_id = link.target_node.uid
if target_node_id in node_index:
# otherwise, the link points outside the current nodespace and will be ignored here
if not direction in node_index[node_id].directions:
node_index[node_id].directions[direction]=OrderedSet()
node_index[node_id].directions[direction].add(node_index[target_node_id])
inverse = INVERSE_DIRECTIONS[direction]
if not inverse in node_index[target_node_id].directions:
node_index[target_node_id].directions[inverse]=OrderedSet()
node_index[target_node_id].directions[inverse].add(node_index[node_id])
if direction != 'n' and direction != 's':
vertical_only = False
node_index[node_id].stackable = vertical_only
# finally, let us sort all node_id_list in the direction groups
for node_id in node_index:
for direction in node_index[node_id].directions:
node_index[node_id].directions[direction] = list(node_index[node_id].directions[direction])
node_index[node_id].directions[direction].sort(key = lambda i: nodenet.get_node(i.uid).index)
return UnorderedGroup(node_index.values())
def group_horizontal_links(all_nodes):
"""group direct horizontal links (por)"""
h_groups = UnorderedGroup()
excluded_nodes = OrderedSet()
for i in all_nodes:
if not i.directions.get("w"): # find leftmost nodes
excluded_nodes.add(i)
if i.directions.get("e"):
h_group = HorizontalGroup([i])
_add_nodes_horizontally(i, h_group, excluded_nodes)
if len(h_group) > 1:
h_groups.append(h_group)
else:
h_groups.append(i)
else:
h_groups.append(i)
# now handle circles (we find them by identifying left-over nodes that still have "e" links)
for i in all_nodes:
if not i in excluded_nodes:
excluded_nodes.add(i)
if i.directions.get("e"):
h_group = HorizontalGroup([i])
_add_nodes_horizontally(i, h_group, excluded_nodes)
if len(h_group) > 1:
h_groups.append(h_group)
else:
h_groups.append(i)
else:
h_groups.append(i)
_fix_link_inheritance(h_groups, OrderedSet())
return h_groups
def _add_nodes_horizontally(display_node, h_group, excluded_nodes):
"""recursive helper function for adding horizontally linked nodes to a group"""
while True:
successor_nodes = [ node for node in display_node.directions.get("e", []) if node not in excluded_nodes ]
if len(successor_nodes) == 1:
display_node = successor_nodes[0]
excluded_nodes.add(display_node)
h_group.append(display_node)
if not display_node.directions.get("e"):
break
else:
break
def group_other_links(all_groups):
"""group other horizontal links (native modules)"""
excluded_nodes = OrderedSet()
_group_other_links(all_groups, excluded_nodes, "O")
_group_other_links(all_groups, excluded_nodes, "o")
_fix_link_inheritance(all_groups, OrderedSet())
return all_groups
def _group_other_links(groups, excluded_nodes, direction):
for i in groups:
if i.directions.get(direction): # inverse non-standard links
predecessors = []
for node in i.directions[direction]:
if not node in excluded_nodes and not node.directions.get("w") and not node.directions.get("e"):
# this node is not part of another group at this point
excluded_nodes.add(node)
predecessors.append(node.parent)
if len(predecessors) == 1:
i.insert(0, predecessors[0])
if len(predecessors) > 1:
i.insert(0, VerticalGroup(predecessors[0]))
def group_with_same_parent(all_groups):
"""group horizontal groups that share the same super-node"""
# find groups with same super-node
candidates = OrderedDict()
for g in all_groups:
if "n" in g.directions:
super_node = list(g.directions["n"])[0] # there can be multiple super-nodes, but we only take the 1st
if super_node not in candidates: candidates[super_node] = []
candidates[super_node].append(g)
# build vertical groups
for super_node in candidates:
h_group = HorizontalGroup()
for g in candidates[super_node]:
all_groups.remove(g)
if isinstance(g, HorizontalGroup):
for e in g: h_group.append(e)
else:
h_group.append(g)
parent_group = super_node.parent
v_group = VerticalGroup([super_node, h_group])
parent_group[parent_group.index(super_node)] = v_group
for clist in candidates.values():
if super_node in clist:
clist[clist.index(super_node)] = v_group
#_fix_link_inheritance(all_groups, OrderedSet())
return all_groups
def _fix_link_inheritance(group, excluded_nodes):
"""recursive helper function to mark for a group and every sub-group into which directions it is linked.
The function adds the links as .directions to the group and its sub-groups, and carries a set of
excluded_nodes to remember which links should not be inherited upwards"""
if hasattr(group, "uid"):
excluded_nodes.add(group)
else:
for i in group:
locally_excluded_nodes = OrderedSet()
_fix_link_inheritance(i, locally_excluded_nodes)
for d in i.directions:
if not d in group.directions: group.directions[d] = OrderedSet()
for node in i.directions[d]:
group.directions[d].add(node)
for i in locally_excluded_nodes:
excluded_nodes.add(i)
# now delete all links to excluded nodes
dirs_copy = group.directions.copy()
for d in dirs_copy:
for node in dirs_copy[d]:
if node in excluded_nodes: group.directions[d].remove(node)
if not group.directions[d]: del group.directions[d]
class UnorderedGroup(list):
@property
def stackable(self):
for i in self:
if not i.stackable:
return False
return True
def __init__(self, elements = None, parent = None):
self.directions = {}
self.parent = parent
if elements:
list.__init__(self, elements)
for i in elements:
i.parent = self
def __repr__(self):
sig = "Group"
if self.__class__.__name__ == "HorizontalGroup": sig = "HGroup"
if self.__class__.__name__ == "VerticalGroup": sig = "VGroup"
params = ""
if self.directions:
params += "dirs: "
for i in self.directions:
params += "[%s]: " % i
for j in self.directions[i]:
params += "%s, " % str(j.uid)
if len(self):
params += "%r, " % list(self)
return '%s(%s)' % (sig, params)
def __repr2__(self):
params = ""
if len(self):
params += "%r" % list(self)
# if self.parent:
# params += ", parent=%r" % self.parent
if self.directions:
params += ", directions=%r" % self.directions
return '%s(%s)' % (self.__class__.__name__, params)
def width(self):
width = 0
for i in self:
width = max(width, i.width())
return width
def height(self):
height = 0
for i in self:
height += i.height()
return height
def append(self, element):
element.parent = self
list.append(self, element)
def arrange(self, nodenet, start_position = (0, 0)):
# arrange elements of unordered group below each other
x, y = start_position
for i in self:
i.arrange(nodenet, (x, y))
y += i.height()*GRID
class HorizontalGroup(UnorderedGroup):
def width(self):
width = 0
for i in self:
width += i.width()
return width
def height(self):
height = 0
for i in self:
height = max(i.height(), height)
return height
def arrange(self, nodenet, start_position = (0,0)):
x, y = start_position
for i in self:
i.arrange(nodenet, (x, y))
xshift = 1 if self.stackable else i.width()
x += xshift*GRID
class VerticalGroup(UnorderedGroup):
def width(self):
width = 0
for i in self:
width = max(width, i.width())
return width
def height(self):
height = 0
for i in self:
height += i.height()
return height
def arrange(self, nodenet, start_position = (0,0)):
x, y = start_position
for i in self:
i.arrange(nodenet, (x, y))
y += i.height()*GRID
def calculate_grid_position(index, start_position = (0, 0)):
"""Determines the position of an item in a simple grid, based on default values defined here"""
return (
BORDER + (index % PREFERRED_WIDTH + 1) * GRID - GRID / 2,
BORDER + int(index / PREFERRED_WIDTH + 1) * GRID - GRID / 2,
)
|
|
from sklearn.utils.testing import assert_true
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
for average in (False, True):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0,
average=average)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
if average:
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
for average in (False, True):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0,
average=average)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
if average:
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced")
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5})
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5])
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch")
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
for average in (False, True):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0,
average=average)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
if average:
assert_true(hasattr(reg, 'average_coef_'))
assert_true(hasattr(reg, 'average_intercept_'))
assert_true(hasattr(reg, 'standard_intercept_'))
assert_true(hasattr(reg, 'standard_coef_'))
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for average in (False, True):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0,
average=average)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
if average:
assert_true(hasattr(reg, 'average_coef_'))
assert_true(hasattr(reg, 'average_intercept_'))
assert_true(hasattr(reg, 'standard_intercept_'))
assert_true(hasattr(reg, 'standard_coef_'))
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: utils
# Purpose:
#
# Author: Administrator
#
# Created: 15/06/2014
# Copyright: (c) Administrator 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
import json
import urllib
import urllib2
import threading
__version__ = '0.1.0'
__author__ = 'Liang Cha ([email protected])'
'''
Python client SDK for Micro Message Public Platform API.
'''
class APIError(StandardError):
'''
raise APIError if reciving json message indicating failure.
'''
def __init__(self, error_code, error_msg):
self.error_code = error_code
self.error_msg = error_msg
StandardError.__init__(self, error_msg)
def __str__(self):
return 'APIError: %s:%s' %(self.error_code, self.error_msg)
class JsonDict(dict):
' general json object that allows attributes to bound to and also behaves like a dict '
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError(r"'JsonDict' object has no attribute '%s'" %(attr))
def __setattr__(self, attr, value):
self[attr] = value
def _parse_json(s):
' parse str into JsonDict '
def _obj_hook(pairs):
o = JsonDict()
for k, v in pairs.iteritems():
o[str(k)] = v
return o
return json.loads(s, object_hook = _obj_hook)
(_HTTP_GET, _HTTP_POST, _HTTP_FILE) = range(3)
def _decode_str(v):
'''
do url-encode v
>>> _encode_params(R&D')
'R%26D'
'''
if isinstance(v, basestring):
qv = v if isinstance(v, unicode) else v.decode('utf-8')
return urllib.quote(qv)
else:
return None
def _encode_str(v):
'''
do url-encode v
>>> _encode_params(R&D')
'R%26D'
'''
if isinstance(v, basestring):
qv = v.encode('utf-8') if isinstance(v, unicode) else v
return urllib.quote(qv)
else:
return None
def _encode_params(**kw):
'''
do url-encode parmeters
>>> _encode_params(a=1, b='R&D')
'a=1&b=R%26D'
'''
args = []
body = None
base_path = None
for k, v in kw.iteritems():
if k == 'body':
body = v
continue
if k in ['pic']:
continue
if k == 'base_path':
base_path = v
continue
if isinstance(v, basestring):
qv = v.encode('utf-8') if isinstance(v, unicode) else v
args.append('%s=%s' %(k, urllib.quote(qv)))
else:
if v == None:
args.append('%s=' %(k))
else:
qv = str(v)
args.append('%s=%s' %(k, urllib.quote(qv)))
return ('&'.join(args), body, base_path)
def _encode_multipart(**kw):
' build a multipart/form-data body with randomly generated boundary '
boundary = '----------%s' % hex(int(time.time()) * 1000)
data = []
for k, v in kw.iteritems():
if hasattr(v, 'read'):
data.append('--%s' % boundary)
filename = getattr(v, 'name', '')
if filename == None or len(filename) == 0:
filename = '/tmp/test.jpg'
content = v.read()
data.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (k, filename))
data.append('Content-Length: %d' % len(content))
#data.append('Content-Type: application/octet-stream')
data.append('Content-Type: image/jpeg')
data.append('Content-Transfer-Encoding: binary\r\n')
data.append(content)
break
data.append('--%s--\r\n' % boundary)
return '\r\n'.join(data), boundary
def _http_call(the_url, method, token, **kw):
'''
send an http request and return a json object if no error occurred.
'''
params = None
boundary = None
body = None
base_path = None
(params, body, base_path) = _encode_params(**kw)
if method == _HTTP_FILE:
the_url = the_url.replace('https://api.', 'http://file.api.')
body, boundary = _encode_multipart(**kw)
if token == None:
http_url = '%s?%s' %(the_url, params)
else:
the_url = the_url + '?access_token=' + token
http_url = '%s&%s' %(the_url, params) if (method == _HTTP_GET or method == _HTTP_FILE) else the_url
http_body = str(body) if (method == _HTTP_POST) else body
req = urllib2.Request(http_url, data = http_body)
if boundary != None:
req.add_header('Content-Type', 'multipart/form-data; boundary=%s' % boundary)
try:
## resp = urllib2.urlopen(req, timeout = 5)
resp = urllib2.urlopen(req, timeout=5)
body = resp.read()
try:
rjson = _parse_json(body)
except Exception, e:
if resp.getcode() != 200:
raise e
filename = None
if resp.headers['Content-Type'] == 'image/jpeg':
filename = 'WX_%d.jpg' %(int(time.time()))
if base_path == None:
base_path = './'
else:
raise e
try:
print '%s/%s' %(base_path, filename)
fd = open('%s/%s' %(base_path, filename), 'wb')
fd.write(body)
except Exception, e:
raise e
fd.close()
return _parse_json('{"path":"%s/%s"}' %(base_path, filename))
if hasattr(rjson, 'errcode') and rjson['errcode'] != 0:
raise APIError(str(rjson['errcode']), rjson['errmsg'])
return rjson
except urllib2.HTTPError, e:
print 'urllib2.HTTPError:%s',e
try:
rjson = _parse_json(e.read())
except:
rjson = None
if hasattr(rjson, 'errcode'):
raise APIError(rjson['errcode'], rjson['errmsg'])
raise e
class filecache:
'''
the information is temporarily saved to the file.
'''
def __init__(self, path, create = False):
self.path = path
self.dict_data = None
fd = None
try:
fd = open(self.path, 'rb')
except Exception, e:
print 'filecache open error:', e
if not create:
return None
else:
fd = open(self.path, 'wb')
fd.close()
fd = open(self.path, 'rb')
data = fd.read()
if len(data) == 0:
data = '{}'
self.dict_data = eval(data)
fd.close()
def get(self, key):
if self.dict_data.has_key(key):
return self.dict_data[key]
return None
def set(self, key, value, time = 0):
if self.dict_data.has_key(key):
self.dict_data[key] = value
else:
self.dict_data.update({key:value})
def delete(self, key, time = 0):
if self.dict_data.has_key(key):
del self.dict_data[key]
def save(self):
fd = open(self.path, 'wb')
fd.write(repr(self.dict_data))
fd.close()
def __str__(self):
data = []
for key in self.dict_data.keys():
data += ['"%s":"%s"' %(str(key), str(self.dict_data[key]))]
return '{%s}' %(', '.join(data))
class _Executable(object):
def __init__(self, client, method, path):
self._client = client
self._method = method
self._path = path
def __call__(self, **kw):
try:
return _http_call('%s%s' %(self._client.api_url, self._path), \
self._method, self._client.access_token, **kw)
except APIError,e:
if e.error_code == 40001:
print 'APIError and do request_access_token()'
self._client.request_access_token()
return _http_call('%s%s' %(self._client.api_url, self._path), \
self._method, self._client.access_token, **kw)
def __str__(self):
return '_Executable (%s)' %(self._path)
__repr__ = __str__
class _Callable(object):
def __init__(self, client, name):
self._client = client
self._name = name
def __getattr__(self, attr):
if attr == '_get':
return _Executable(self._client, _HTTP_GET, self._name)
if attr == 'post':
return _Executable(self._client, _HTTP_POST, self._name)
if attr == 'file':
return _Executable(self._client, _HTTP_FILE, self._name)
name = '%s/%s' %(self._name, attr)
return _Callable(self._client, name)
def __str__(self):
return '_Callable (%s)' %(self._name)
def mkdir(path):
import os
path=path.strip()
path=path.rstrip("\\")
isExists=os.path.exists(path)
if not isExists:
os.makedirs(path)
return True
else:
return False
def main():
## mkpath="d:\\qttc\\web\\"
## mkdir(mkpath)
pass
if __name__ == '__main__':
main()
|
|
# Copyright 2015-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
# PYTHON_ARGCOMPLETE_OK (Must be in first 1024 bytes, so if tab completion
# is failing, move this above the license)
import argcomplete
import argparse
import importlib
import logging
import os
import pdb
import sys
import traceback
from datetime import datetime
from dateutil.parser import parse as date_parse
try:
from setproctitle import setproctitle
except ImportError:
def setproctitle(t):
return None
from c7n.commands import schema_completer
from c7n.config import Config
DEFAULT_REGION = 'us-east-1'
log = logging.getLogger('custodian.cli')
def _default_options(p, blacklist=""):
""" Add basic options ot the subparser.
`blacklist` is a list of options to exclude from the default set.
e.g.: ['region', 'log-group']
"""
provider = p.add_argument_group(
"provider", "AWS account information, defaults per the aws cli")
if 'region' not in blacklist:
provider.add_argument(
"-r", "--region", action='append', default=[],
dest='regions', metavar='REGION',
help="AWS Region to target. Can be used multiple times")
provider.add_argument(
"--profile",
help="AWS Account Config File Profile to utilize")
provider.add_argument("--assume", default=None, dest="assume_role",
help="Role to assume")
provider.add_argument("--external-id", default=None, dest="external_id",
help="External Id to provide when assuming a role")
config = p.add_argument_group(
"config", "Policy config file(s) and policy selectors")
# -c is deprecated. Supported for legacy reasons
config.add_argument("-c", "--config", help=argparse.SUPPRESS)
config.add_argument("configs", nargs='*',
help="Policy configuration file(s)")
config.add_argument("-p", "--policies", default=None, dest='policy_filter',
help="Only use named/matched policies")
config.add_argument("-t", "--resource", default=None, dest='resource_type',
help="Only use policies with the given resource type")
output = p.add_argument_group("output", "Output control")
output.add_argument("-v", "--verbose", action="count", help="Verbose logging")
if 'quiet' not in blacklist:
output.add_argument("-q", "--quiet", action="count",
help="Less logging (repeatable, -qqq for no output)")
else:
output.add_argument("-q", "--quiet", action="count", help=argparse.SUPPRESS)
output.add_argument("--debug", default=False, help=argparse.SUPPRESS,
action="store_true")
if 'vars' not in blacklist:
# p.add_argument('--vars', default=None,
# help='Vars file to substitute into policy')
p.set_defaults(vars=None)
if 'log-group' not in blacklist:
p.add_argument(
"-l", "--log-group", default=None,
help="Location to send policy logs (Ex: AWS CloudWatch Log Group)")
else:
p.add_argument("--log-group", default=None, help=argparse.SUPPRESS)
if 'output-dir' not in blacklist:
p.add_argument("-s", "--output-dir", required=True,
help="[REQUIRED] Directory or S3 URL For policy output")
if 'cache' not in blacklist:
p.add_argument(
"-f", "--cache", default="~/.cache/cloud-custodian.cache",
help="Cache file (default %(default)s)")
p.add_argument(
"--cache-period", default=15, type=int,
help="Cache validity in minutes (default %(default)i)")
else:
p.add_argument("--cache", default=None, help=argparse.SUPPRESS)
def _report_options(p):
""" Add options specific to the report subcommand. """
_default_options(p, blacklist=['cache', 'log-group', 'quiet'])
p.add_argument(
'--days', type=float, default=1,
help="Number of days of history to consider")
p.add_argument(
'--raw', type=argparse.FileType('wb'),
help="Store raw json of collected records to given file path")
p.add_argument(
'--field', action='append', default=[], type=_key_val_pair,
metavar='HEADER=FIELD',
help='Repeatable. JMESPath of field to include in the output OR '
'for a tag use prefix `tag:`. Special case fields `region` and'
'`policy` are available')
p.add_argument(
'--no-default-fields', action="store_true",
help='Exclude default fields for report.')
p.add_argument(
'--format', default='csv', choices=['csv', 'grid', 'simple', 'json'],
help="Format to output data in (default: %(default)s). "
"Options include simple, grid, csv, json")
def _metrics_options(p):
""" Add options specific to metrics subcommand. """
_default_options(p, blacklist=['log-group', 'output-dir', 'cache', 'quiet'])
p.add_argument(
'--start', type=date_parse,
help='Start date (requires --end, overrides --days)')
p.add_argument(
'--end', type=date_parse, help='End date')
p.add_argument(
'--days', type=int, default=14,
help='Number of days of history to consider (default: %(default)i)')
p.add_argument('--period', type=int, default=60 * 24 * 24)
def _logs_options(p):
""" Add options specific to logs subcommand. """
_default_options(p, blacklist=['cache', 'quiet'])
# default time range is 0 to "now" (to include all log entries)
p.add_argument(
'--start',
default='the beginning', # invalid, will result in 0
help='Start date and/or time',
)
p.add_argument(
'--end',
default=datetime.now().strftime('%c'),
help='End date and/or time',
)
def _schema_tab_completer(prefix, parsed_args, **kwargs):
# If we are printing the summary we discard the resource
if parsed_args.summary:
return []
return schema_completer(prefix)
def _schema_options(p):
""" Add options specific to schema subcommand. """
p.add_argument(
'resource', metavar='selector', nargs='?',
default=None).completer = _schema_tab_completer
p.add_argument(
'--summary', action="store_true",
help="Summarize counts of available resources, actions and filters")
p.add_argument('--json', action="store_true", help=argparse.SUPPRESS)
p.add_argument("-v", "--verbose", action="count", help="Verbose logging")
p.add_argument("-q", "--quiet", action="count", help=argparse.SUPPRESS)
p.add_argument("--debug", default=False, help=argparse.SUPPRESS)
def _dryrun_option(p):
p.add_argument(
"-d", "--dryrun", action="store_true",
help="Don't execute actions but filter resources")
def _key_val_pair(value):
"""
Type checker to ensure that --field values are of the format key=val
"""
if '=' not in value:
msg = 'values must be of the form `header=field`'
raise argparse.ArgumentTypeError(msg)
return value
def setup_parser():
c7n_desc = "Cloud Custodian - Cloud fleet management"
parser = argparse.ArgumentParser(description=c7n_desc)
# Setting `dest` means we capture which subparser was used.
subs = parser.add_subparsers(
title='commands',
dest='subparser')
run_desc = "\n".join((
"Execute the policies in a config file.",
"",
"Multiple regions can be passed in, as can the symbolic region 'all'. ",
"",
"When running across multiple regions, policies targeting resources in ",
"regions where they do not exist will not be run. The output directory ",
"when passing multiple regions is suffixed with the region. Resources ",
"with global endpoints are run just once and are suffixed with the first ",
"region passed in or us-east-1 if running against 'all' regions.",
""
))
run = subs.add_parser(
"run", description=run_desc,
help="Execute the policies in a config file",
formatter_class=argparse.RawDescriptionHelpFormatter)
run.set_defaults(command="c7n.commands.run")
_default_options(run)
_dryrun_option(run)
run.add_argument(
"--skip-validation",
action="store_true",
help="Skips validation of policies (assumes you've run the validate command seperately).")
metrics_help = ("Emit metrics to provider metrics. Specify 'aws', 'gcp', or 'azure'. "
"For more details on aws metrics options, see: "
"https://cloudcustodian.io/docs/aws/usage.html#metrics")
run.add_argument(
"-m", "--metrics-enabled",
default=None, nargs="?", const="aws",
help=metrics_help)
run.add_argument(
"--trace",
dest="tracer",
help=argparse.SUPPRESS,
default=None, nargs="?", const="default")
schema_desc = ("Browse the available vocabularies (resources, filters, modes, and "
"actions) for policy construction. The selector "
"is specified with RESOURCE[.CATEGORY[.ITEM]] "
"examples: s3, ebs.actions, or ec2.filters.instance-age")
schema = subs.add_parser(
'schema', description=schema_desc,
help="Interactive cli docs for policy authors")
schema.set_defaults(command="c7n.commands.schema_cmd")
_schema_options(schema)
report_desc = ("Report of resources that a policy matched/ran on. "
"The default output format is csv, but other formats "
"are available.")
report = subs.add_parser(
"report", description=report_desc,
help="Tabular report on policy matched resources")
report.set_defaults(command="c7n.commands.report")
_report_options(report)
logs_desc = "Get policy execution logs"
logs = subs.add_parser(
'logs', help=logs_desc, description=logs_desc)
logs.set_defaults(command="c7n.commands.logs")
_logs_options(logs)
metrics_desc = "Retrieve policy execution metrics."
metrics = subs.add_parser(
'metrics', description=metrics_desc, help=metrics_desc)
metrics.set_defaults(command="c7n.commands.metrics_cmd")
_metrics_options(metrics)
version = subs.add_parser(
'version', help="Display installed version of custodian")
version.set_defaults(command='c7n.commands.version_cmd')
version.add_argument('-v', '--verbose', action="count", help="Verbose logging")
version.add_argument("-q", "--quiet", action="count", help=argparse.SUPPRESS)
version.add_argument(
"--debug", action="store_true",
help="Print info for bug reports")
validate_desc = (
"Validate config files against the json schema")
validate = subs.add_parser(
'validate', description=validate_desc, help=validate_desc)
validate.set_defaults(command="c7n.commands.validate")
validate.add_argument(
"-c", "--config", help=argparse.SUPPRESS)
validate.add_argument("configs", nargs='*',
help="Policy Configuration File(s)")
validate.add_argument("-v", "--verbose", action="count", help="Verbose Logging")
validate.add_argument("-q", "--quiet", action="count", help="Less logging (repeatable)")
validate.add_argument("--debug", default=False, help=argparse.SUPPRESS)
return parser
def _setup_logger(options):
level = 3 + (options.verbose or 0) - (options.quiet or 0)
if level <= 0:
# print nothing
log_level = logging.CRITICAL + 1
elif level == 1:
log_level = logging.ERROR
elif level == 2:
log_level = logging.WARNING
elif level == 3:
# default
log_level = logging.INFO
else:
log_level = logging.DEBUG
logging.basicConfig(
level=log_level,
format="%(asctime)s: %(name)s:%(levelname)s %(message)s")
external_log_level = logging.ERROR
if level <= 0:
external_log_level = logging.CRITICAL + 1
elif level >= 5:
external_log_level = logging.INFO
logging.getLogger('botocore').setLevel(external_log_level)
logging.getLogger('urllib3').setLevel(external_log_level)
logging.getLogger('s3transfer').setLevel(external_log_level)
logging.getLogger('urllib3').setLevel(logging.ERROR)
def main():
parser = setup_parser()
argcomplete.autocomplete(parser)
options = parser.parse_args()
if options.subparser is None:
parser.print_help(file=sys.stderr)
return sys.exit(2)
_setup_logger(options)
# Support the deprecated -c option
if getattr(options, 'config', None) is not None:
options.configs.append(options.config)
config = Config.empty(**vars(options))
try:
command = options.command
if not callable(command):
command = getattr(
importlib.import_module(command.rsplit('.', 1)[0]),
command.rsplit('.', 1)[-1])
# Set the process name to something cleaner
process_name = [os.path.basename(sys.argv[0])]
process_name.extend(sys.argv[1:])
setproctitle(' '.join(process_name))
command(config)
except Exception:
if not options.debug:
raise
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[-1])
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers
~~~~~~~~~~~~~~~
Pygments lexers.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import types
import fnmatch
import re
from os.path import basename
from pygments.lexers._mapping import LEXERS
from pygments.modeline import get_filetype_from_buffer
from pygments.plugin import find_plugin_lexers
from pygments.util import ClassNotFound, itervalues
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
'guess_lexer'] + list(LEXERS)
_lexer_cache = {}
_pattern_cache = {}
def _fn_matches(fn, glob):
"""
Return whether the supplied file name fn matches pattern filename
"""
if glob not in _pattern_cache:
pattern = re.compile(fnmatch.translate(glob))
_pattern_cache[glob] = pattern
else:
pattern = _pattern_cache[glob]
return pattern.match(fn)
def _load_lexers(module_name):
"""
Load a lexer (and all others in the module too).
"""
mod = __import__(module_name, None, None, ['__all__'])
for lexer_name in mod.__all__:
cls = getattr(mod, lexer_name)
_lexer_cache[cls.name] = cls
def get_all_lexers():
"""
Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
"""
for item in itervalues(LEXERS):
yield item[1:]
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
def find_lexer_class(name):
"""
Lookup a lexer class by name. Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in itervalues(LEXERS):
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls
def get_lexer_by_name(_alias, **options):
"""
Get a lexer by an alias.
"""
# lookup builtin lexers
for module_name, name, aliases, _, _ in itervalues(LEXERS):
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name](**options)
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias in cls.aliases:
return cls(**options)
raise ClassNotFound('no lexer for alias %r found' % _alias)
def get_lexer_for_filename(_fn, code=None, **options):
"""
Get a lexer for a filename. If multiple lexers match the filename
pattern, use ``analyze_text()`` to figure out which one is more
appropriate.
"""
matches = []
fn = basename(_fn)
for modname, name, _, filenames, _ in itervalues(LEXERS):
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append((_lexer_cache[name], filename))
for cls in find_plugin_lexers():
for filename in cls.filenames:
if _fn_matches(fn, filename):
matches.append((cls, filename))
if sys.version_info > (3,) and isinstance(code, bytes):
# decode it, since all analyse_text functions expect unicode
code = code.decode('latin1')
def get_rating(info):
cls, filename = info
# explicit patterns get a bonus
bonus = '*' not in filename and 0.5 or 0
# The class _always_ defines analyse_text because it's included in
# the Lexer class. The default implementation returns None which
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
# to find lexers which need it overridden.
if code:
return cls.analyse_text(code) + bonus
return cls.priority + bonus
if matches:
matches.sort(key=get_rating)
#print "Possible lexers, after sort:", matches
return matches[-1][0](**options)
raise ClassNotFound('no lexer for filename %r found' % _fn)
def get_lexer_for_mimetype(_mime, **options):
"""
Get a lexer for a mimetype.
"""
for modname, name, _, _, mimetypes in itervalues(LEXERS):
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
def _iter_lexerclasses():
"""
Return an iterator over all lexer classes.
"""
for key in sorted(LEXERS):
module_name, name = LEXERS[key][:2]
if name not in _lexer_cache:
_load_lexers(module_name)
yield _lexer_cache[name]
for lexer in find_plugin_lexers():
yield lexer
def guess_lexer_for_filename(_fn, _text, **options):
"""
Lookup all lexers that handle those filenames primary (``filenames``)
or secondary (``alias_filenames``). Then run a text analysis for those
lexers and choose the best result.
usage::
>>> from pygments.lexers import guess_lexer_for_filename
>>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
<pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
>>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
<pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
>>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
<pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
"""
fn = basename(_fn)
primary = None
matching_lexers = set()
for lexer in _iter_lexerclasses():
for filename in lexer.filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary = lexer
for filename in lexer.alias_filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
if not matching_lexers:
raise ClassNotFound('no lexer for filename %r found' % fn)
if len(matching_lexers) == 1:
return matching_lexers.pop()(**options)
result = []
for lexer in matching_lexers:
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
result.append((rv, lexer))
# since py3 can no longer sort by class name by default, here is the
# sorting function that works in both
def type_sort(type_):
return (type_[0], type_[1].__name__)
result.sort(key=type_sort)
if not result[-1][0] and primary is not None:
return primary(**options)
return result[-1][1](**options)
def guess_lexer(_text, **options):
"""
Guess a lexer by strong distinctions in the text (eg, shebang).
"""
# try to get a vim modeline first
ft = get_filetype_from_buffer(_text)
if ft is not None:
try:
return get_lexer_by_name(ft, **options)
except ClassNotFound:
pass
best_lexer = [0.0, None]
for lexer in _iter_lexerclasses():
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
if rv > best_lexer[0]:
best_lexer[:] = (rv, lexer)
if not best_lexer[0] or best_lexer[1] is None:
raise ClassNotFound('no lexer matching the text found')
return best_lexer[1](**options)
class _automodule(types.ModuleType):
"""Automatically import lexers."""
def __getattr__(self, name):
info = LEXERS.get(name)
if info:
_load_lexers(info[0])
cls = _lexer_cache[info[1]]
setattr(self, name, cls)
return cls
raise AttributeError(name)
oldmod = sys.modules['pygments.lexers']
newmod = _automodule('pygments.lexers')
newmod.__dict__.update(oldmod.__dict__)
sys.modules['pygments.lexers'] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
|
|
#############################################################################
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#############################################################################
#
# Project Name : IEEE 802.11 Timeline Tool *
#
# Author : Alex Ashley
#
#############################################################################
from util.tasks import add_task, poll_task_queue
#from util.io import flatten_model
from project.models import Project
from ballot.models import Ballot, DenormalizedBallot
from django.db import models
from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
from django.core.urlresolvers import reverse
import datetime, json, logging
class BallotProxy(object):
def __init__(self,data):
self.pk = self.number = long(data['number'])
self.draft = float(data['draft'])
self.result = int(data['result'])
self.closed = datetime.date.fromordinal(data['closed'])
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return u'{"pk":%d,"draft":%f,"result":%d,"closed":"%s"}'%(self.pk,self.draft,self.result,self.closed.isoformat())
def __repr__(self):
return u'BallotProxy(%d,%f,%d,"%s")'%(self.pk,self.draft,self.result,self.closed.isoformat())
class DenormalizedProjectBallots(models.Model):
QUEUE_NAME='timeline-backlog'
project_key = models.AutoField(primary_key=True)
#project_pk = models.CharField(primary_key=True, blank=False, null=False, max_length=255)
#project_pk = models.IntegerField(primary_key=True)
#project = models.ForeignKey(DenormalizedProject, primary_key=True)
project_task_group = models.CharField(max_length=20)
denormalized_initial_wg = models.TextField(blank=True, null=True, editable=False, db_index=False)
denormalized_recirc_wg = models.TextField(blank=True, null=True, editable=False, db_index=False)
denormalized_initial_sb = models.TextField(blank=True, null=True, editable=False, db_index=False)
denormalized_recirc_sb = models.TextField(blank=True, null=True, editable=False, db_index=False)
@classmethod
def denormalize(clz, project_pk=None, project=None, commit=True):
if project is None and project_pk is not None:
project = Project.objects.get(pk=project_pk)
if project is None:
raise ValueError('denormalize with no project')
#logging.info('denormalize timeline %s'%project.name)
try:
dn = DenormalizedProjectBallots.objects.get(pk=project.pk)
except DenormalizedProjectBallots.DoesNotExist:
dn = DenormalizedProjectBallots(pk=project.pk)
dn.project_key = project.key
dn.project_task_group = project.task_group
#if dn.denormalized_initial_wg is None or backlog.update_initial_wg==True:
dn.denormalized_initial_wg = clz._denormalize_ballot(project, Ballot.WGInitial)
#if dn.denormalized_recirc_wg is None or backlog.update_recirc_wg==True:
dn.denormalized_recirc_wg = clz._denormalize_ballot(project, Ballot.WGRecirc)
#if dn.denormalized_initial_sb is None or backlog.update_initial_sb==True:
dn.denormalized_initial_sb = clz._denormalize_ballot(project, Ballot.SBInitial)
#if dn.denormalized_recirc_sb is None or backlog.update_recirc_sb==True:
dn.denormalized_recirc_sb= clz._denormalize_ballot(project, Ballot.SBRecirc)
if commit:
dn.save()
return dn
@classmethod
def request_update(clz,project=None, project_pk=None,ballot_type=None):
if project is not None:
project_pk = project.pk
if project_pk is None:
raise ValueError('request_update with no project')
#payload = flatten_model(project)
#print 'ru',project.task_group,
add_task(url=reverse('timeline.views.backlog_worker'),
name = 'timeline'+str(project_pk),
queue_name=clz.QUEUE_NAME,
params={'project':project_pk},
countdown=2)
#try:
# pbb = ProjectBallotsBacklog.objects.get(project=project)
#except ProjectBallotsBacklog.DoesNotExist:
# pbb = ProjectBallotsBacklog(project=project)
#if ballot_type==Ballot.WGInitial or ballot_type==Ballot.WGInitial.code:
# pbb.update_initial_wg=True
#elif ballot_type==Ballot.WGRecirc or ballot_type==Ballot.WGRecirc.code:
# pbb.update_recirc_wg=True
#elif ballot_type==Ballot.SBInitial or ballot_type==Ballot.SBInitial.code:
# pbb.update_initial_sb=True
#elif ballot_type==Ballot.SBRecirc or ballot_type==Ballot.SBRecirc.code:
# pbb.update_recirc_sb=True
#pbb.save()
#sys.stderr.write('%s,%d '%(project.task_group,ProjectBallotsBacklog.objects.count()))
@classmethod
def backlog_poll(clz):
rv= poll_task_queue(clz.QUEUE_NAME)
#logging.info(str(rv))
return rv
@classmethod
def _denormalize_ballot(clz,project,ballot_type):
wi=[]
for ballot in Ballot.objects.filter(project=project).filter(ballot_type=ballot_type.code).order_by('draft'): #.only('number','draft','vote_for','vote_against','closed')
wi.append({ 'number':ballot.number, 'draft':str(ballot.draft), 'result':ballot.result, 'closed':ballot.closed.toordinal()})
#if project.task_group=='TGmb':
# print 'dn',project.task_group,ballot_type,wi
return json.dumps(wi)
@property
def wg_ballots(self):
rv = self._get_ballots(Ballot.WGInitial,self.denormalized_initial_wg)
rv += self._get_ballots(Ballot.WGRecirc,self.denormalized_recirc_wg)
return rv
@property
def initial_wg_ballots(self):
return self._get_ballots(Ballot.WGInitial,self.denormalized_initial_wg)
@property
def recirc_wg_ballots(self):
return self._get_ballots(Ballot.WGRecirc,self.denormalized_recirc_wg)
@property
def sb_ballots(self):
return self._get_ballots(Ballot.SBInitial,self.denormalized_initial_sb)+self._get_ballots(Ballot.SBRecirc,self.denormalized_recirc_sb)
@property
def initial_sb_ballots(self):
return self._get_ballots(Ballot.SBInitial,self.denormalized_initial_sb)
@property
def recirc_sb_ballots(self):
return self._get_ballots(Ballot.SBRecirc,self.denormalized_recirc_sb)
def __unicode__(self):
rv = [self.project_task_group, str(self.initial_wg_ballots), str(self.recirc_wg_ballots), str(self.initial_sb_ballots), str(self.recirc_sb_ballots)]
return '\n'.join(rv)
def _get_ballots(self,ballot_type, dfield):
if not dfield:
return []
try:
return [BallotProxy(i) for i in json.loads(dfield)]
except ValueError:
DenormalizedProjectBallots.request_update(self.project_pk, ballot_type)
return []
#class ProjectBallotsBacklog(models.Model):
# #project_pk = models.CharField(blank=False, null=False, max_length=128)
# project = models.ForeignKey(DenormalizedProject)
# #project_pk = models.IntegerField(primary_key=True)
# update_initial_wg = models.BooleanField(default=False)
# update_recirc_wg = models.BooleanField(default=False)
# update_initial_sb = models.BooleanField(default=False)
# update_recirc_sb = models.BooleanField(default=False)
#def check_project_ballot_backlog(needs_update=False):
# if not needs_update:
# needs_update = ProjectBallotsBacklog.objects.exists()
# if needs_update:
# sys.stderr.write('bw%d '%ProjectBallotsBacklog.objects.count())
# add_task(url=reverse('timeline.views.backlog_worker'), name='timeline-backlog', \
# params={'backlog':str(ProjectBallotsBacklog.objects.values_list('pk',flat=True))})
# return needs_update
@receiver(pre_delete, sender=DenormalizedBallot)
@receiver(post_save, sender=DenormalizedBallot)
def post_ballot_save(sender, instance, **kwargs):
# instance is a Ballot object
if kwargs.get('raw',False):
#don't create a backlog when loading a fixture in a unit test
return
try:
project = Project.objects.get(pk=instance.project_pk)
DenormalizedProjectBallots.request_update(project=project, ballot_type=instance.ballot_type)
#check_project_ballot_backlog(True)
except Project.DoesNotExist:
pass
|
|
"""
filename: ArrowObserver.py
description:
This module implements 2 classes, ArrowAnnotation (which is applied to any
annotableObjects to describe as an arrow), and ArrowMarker (which watches
for strokes that looks like arrows and adds the arrow annotation)
Doctest Examples:
>>> c = ArrowMarker()
example of something not a arrow
>>> linepoints = [Point(x,2*x) for x in range(1,20)]
>>> c.onStrokeAdded(Stroke(linepoints))
"""
#-------------------------------------
import math
from Utils import Logger
from Utils import GeomUtils
from Utils import Template
from SketchFramework import SketchGUI
from SketchFramework.Point import Point
from SketchFramework.Stroke import Stroke
from SketchFramework.Board import BoardObserver, BoardSingleton
from SketchFramework.Annotation import Annotation, AnnotatableObject
logger = Logger.getLogger('ArrowObserver', Logger.WARN )
#-------------------------------------
class ArrowAnnotation( Annotation ):
def __init__(self, tip, tail, linearity=0):
Annotation.__init__(self)
self.tip = tip # Point
self.tail = tail # Point
self.linearity = linearity
#-------------------------------------
# FIXME: first go -- only single stroke arrows
class ArrowMarker( BoardObserver ):
def __init__(self):
BoardSingleton().AddBoardObserver( self )
BoardSingleton().RegisterForStroke( self )
#For multistroke arrows, keep track of arrowheads and line endpoints
# and match them up into arrows
self._arrowHeads = []
self._endpoints = []
self.arrowHeadMatcher = Template.TemplateDict(filename = "Utils/arrowheads.templ")
def onStrokeAdded( self, stroke ):
"Watches for Strokes that look like an arrow to Annotate"
ep1 = stroke.Points[0]
ep2 = stroke.Points[-1]
isArrowHead = False
GeomUtils.ellipseAxisRatio(stroke)
#Match single-stroke arrows
tip, tail = _isSingleStrokeArrow(stroke)
if tip is None or tail is None:
revpts = list(stroke.Points)
revpts.reverse()
tip, tail = _isSingleStrokeArrow(Stroke(revpts))
if tip is not None and tail is not None:
isArrowHead = False
anno = ArrowAnnotation( tip, tail )
BoardSingleton().AnnotateStrokes( [stroke], anno)
else:
return
if _isArrowHead(stroke, self.arrowHeadMatcher): #We've matched an arrowhead
head = stroke
isArrowHead = True
strokeNorm = GeomUtils.strokeNormalizeSpacing(stroke, numpoints = 5)
tip = strokeNorm.Points[2] #Middle normalized point is the tip
#Match it to any tails we have
if isArrowHead:
matchedTails = self._matchHeadtoTail(head = stroke, point = tip)
for headpoint, tail in matchedTails:
#Orient the tail correctly
if tail.Points[0] == headpoint:
endpoint = tail.Points[-1]
else:
endpoint = tail.Points[0]
anno = ArrowAnnotation(tip, endpoint)
BoardSingleton().AnnotateStrokes([head, tail],anno)
#Match it like a tail even if we think it's an arrowhead. Oh ambiguity!
matchedHeads = self._matchHeadtoTail(tail = stroke, point = ep1)
for tip, head in matchedHeads:
anno = ArrowAnnotation(tip, ep2)
BoardSingleton().AnnotateStrokes([head, stroke],anno)
matchedHeads = self._matchHeadtoTail(tail = stroke, point = ep2)
for tip, head in matchedHeads:
anno = ArrowAnnotation(tip, ep1)
BoardSingleton().AnnotateStrokes([head, stroke],anno)
#Add this stroke to the pool for future evaluation
self._endpoints.append( (ep1, stroke) )
self._endpoints.append( (ep2, stroke) )
if isArrowHead:
self._arrowHeads.append( (tip, stroke) )
def _matchHeadtoTail(self, head = None, tail = None, point = None):
"""Input head stroke or tail stroke. If head is specified, match it to a tail. If tail is specified, match it to a head.
Parameter 'point' should be the tip if head is specified, the end-point if tail is specified.
Returns a list of tuples: (tip, head_stroke) if tail is specified, (endpoint, tail_stroke) if head is specified."""
retlist = []
if point is None:
return retlist
if head is not None and tail is None: #Head is specified, find the tail
tip = point
for endpoint, stroke in self._endpoints:
if _isPointWithHead(endpoint, head, tip):
retlist.append( (endpoint, stroke) )
elif tail is not None and head is None: #Find the head
endpoint = point
for tip, stroke in self._arrowHeads:
if _isPointWithHead(endpoint, stroke, tip):
retlist.append( (tip, stroke) )
return retlist
def onStrokeRemoved(self, stroke):
"When a stroke is removed, remove arrow annotation if found and clean up local state"
for ep_tuple in list(self._endpoints):
if ep_tuple[1] is stroke:
self._endpoints.remove(ep_tuple)
for head_tuple in list( self._arrowHeads ):
if head_tuple[1] is stroke:
self._arrowHeads.remove(head_tuple)
for anno in stroke.findAnnotations(ArrowAnnotation, True):
BoardSingleton().RemoveAnnotation(anno)
def _isPointWithHead(point, head, tip):
"Returns true if point is close enough and within the cone of the head stroke"
ep1 = head.Points[0]
ep2 = head.Points[-1]
midpoint = Point((ep1.X + ep2.X)/2, (ep1.Y + ep2.Y)/2)
tip_to_endpoint = GeomUtils.pointDistanceSquared(point.X, point.Y, tip.X, tip.Y)
tip_to_backofarrowhead = GeomUtils.pointDistanceSquared(tip.X, tip.Y, midpoint.X, midpoint.Y)
if tip_to_endpoint < tip_to_backofarrowhead:
if GeomUtils.pointInAngleCone(point, ep1, tip, ep2):
return True
return False
#-------------------------------------
class ArrowVisualizer( BoardObserver ):
"Watches for Arrow annotations, draws them"
def __init__(self):
BoardSingleton().AddBoardObserver( self )
BoardSingleton().RegisterForAnnotation( ArrowAnnotation, self )
self.annotation_list = []
def onAnnotationAdded( self, strokes, annotation ):
"Watches for annotations of Arrows and prints out the Underlying Data"
self.annotation_list.append(annotation)
def onAnnotationRemoved(self, annotation):
"Watches for annotations to be removed"
if annotation in self.annotation_list:
self.annotation_list.remove(annotation)
def drawMyself( self ):
for a in self.annotation_list:
SketchGUI.drawCircle( a.tail.X, a.tail.Y, color="#93bfdd", width=2.0, radius=4)
SketchGUI.drawCircle( a.tip.X, a.tip.Y, color="#93bfdd", width=2.0, radius=4)
#-------------------------------------
def _isArrowHead(stroke, *args, **kargs):
"""
curvature_list = []
fp = open("indata.csv", "w")
sNorm = GeomUtils.strokeNormalizeSpacing(stroke, numpoints = 30)
prev_vect = None
prev_pt = None
for point in sNorm.Points:
if prev_vect == None:
if prev_pt is not None:
prev_vect = (point.X - prev_pt.X, point.Y - prev_pt.Y)
prev_pt = point
continue
vector = [point.X - prev_pt.X, point.Y - prev_pt.Y]
if vector == (0.0, 0.0) or prev_vect == (0.0, 0.0):
curvature = 0.0
else:
curvature = GeomUtils.vectorDistance(vector, prev_vect)
for i in range(30 * curvature/int((math.pi / 2)) ):
print " ",
print "* ",
print "%s" % (curvature)
curvature_list.append(curvature)
prev_vect = vector
prev_pt = point
print >> fp, "Segment,Curvature"
for idx, curv in enumerate(curvature_list):
print >> fp, "%s,%s" % (idx,curv)
fp.close()
"""
return _isArrowHead_Template(stroke, args[0])
def _isArrowHead_Template(stroke, matcher):
score_dict = matcher.Score([stroke])
if score_dict['score'] < 0.2:
return True
return False
def _isSingleStrokeArrow(stroke):
"Input: Single stroke for evaluation. Returns a tuple of points (tip, tail) if the stroke is an arrow, (None, None) otherwise"
logger.debug("stroke len %d", stroke.length() )
if len(stroke.Points) < 10:
return (None, None)# too small to be arrow
norm_len = len(stroke.Points)
points = GeomUtils.strokeNormalizeSpacing( stroke, numpoints=norm_len).Points
points.reverse() # start from end
# find the first 90 degree turn in the stroke
orilist = GeomUtils.strokeLineSegOrientations( Stroke(points) )
logger.debug("stroke ori %s", str(orilist) )
prev = None
i = 0
for i,ori in enumerate(orilist):
if prev is None:
prev = ori
continue
if GeomUtils.angleDiff(ori,prev)>90: break # found the first turn at index i
first_corner = i
# now we know the scale of the arrow head if there is one
# if the first corner is more than 1/4 of the way from the end of the stroke
if first_corner > norm_len/5:
return (None, None) # scale is wrong for an arrowhead
tail = stroke.Points[0] # first of the original points
tip = points[i] # reverse point
# create a list of the monoticity of all substrokes from the first corner to some dist after
m_list = [ GeomUtils.strokeMonotonicity(Stroke(points[first_corner:x])) for x in range(first_corner+2,first_corner*3) ]
if len(m_list) == 0:
return (None, None)
m_min = min(m_list)
logger.debug("stroke mon (%f)", m_min )
if m_min>0.65:
return (None, None)# too monotonic after the first corner, need to double back
return (tip, tail)
if __name__ == "__main__":
Logger.setDoctest(logger)
import doctest
doctest.testmod()
|
|
import base64
import cgi
import datetime
from distutils.version import LooseVersion
import errno
import io
import json
import logging
import os
import re
import shutil
import tempfile
import time
import zipfile
from simple_salesforce import Salesforce
from simple_salesforce import SalesforceGeneralError
from salesforce_bulk import SalesforceBulk
import xmltodict
from cumulusci.core.exceptions import ApexTestException
from cumulusci.core.exceptions import SalesforceException
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.core.tasks import BaseTask
from cumulusci.tasks.metadata.package import PackageXmlGenerator
from cumulusci.salesforce_api.exceptions import MetadataApiError
from cumulusci.salesforce_api.metadata import ApiDeploy
from cumulusci.salesforce_api.metadata import ApiListMetadata
from cumulusci.salesforce_api.metadata import ApiRetrieveInstalledPackages
from cumulusci.salesforce_api.metadata import ApiRetrievePackaged
from cumulusci.salesforce_api.metadata import ApiRetrieveUnpackaged
from cumulusci.salesforce_api.package_zip import CreatePackageZipBuilder
from cumulusci.salesforce_api.package_zip import DestructiveChangesZipBuilder
from cumulusci.salesforce_api.package_zip import InstallPackageZipBuilder
from cumulusci.salesforce_api.package_zip import UninstallPackageZipBuilder
from cumulusci.utils import CUMULUSCI_PATH
from cumulusci.utils import findReplace
from cumulusci.utils import package_xml_from_dict
from cumulusci.utils import zip_subfolder
class BaseSalesforceTask(BaseTask):
name = 'BaseSalesforceTask'
salesforce_task = True
def _run_task(self):
raise NotImplementedError(
'Subclasses should provide their own implementation')
def _update_credentials(self):
self.org_config.refresh_oauth_token(self.project_config.keychain.get_connected_app())
class BaseSalesforceMetadataApiTask(BaseSalesforceTask):
api_class = None
name = 'BaseSalesforceMetadataApiTask'
def _get_api(self):
return self.api_class(self)
def _run_task(self):
api = self._get_api()
if api:
return api()
class BaseSalesforceApiTask(BaseSalesforceTask):
name = 'BaseSalesforceApiTask'
api_version = None
def _init_task(self):
self.sf = self._init_api()
def _init_api(self):
if self.api_version:
api_version = self.api_version
else:
api_version = self.project_config.project__package__api_version
return Salesforce(
instance=self.org_config.instance_url.replace('https://', ''),
session_id=self.org_config.access_token,
version=api_version,
)
class BaseSalesforceToolingApiTask(BaseSalesforceApiTask):
name = 'BaseSalesforceToolingApiTask'
def _init_task(self):
self.tooling = self._init_api()
self.tooling.base_url += 'tooling/'
self._init_class()
def _init_class(self):
pass
def _get_tooling_object(self, obj_name):
obj = getattr(self.tooling, obj_name)
obj.base_url = obj.base_url.replace('/sobjects/', '/tooling/sobjects/')
return obj
class BaseSalesforceBulkApiTask(BaseSalesforceTask):
name = 'BaseSalesforceBulkApiTask'
def _init_task(self):
self.bulk = self._init_api()
def _init_api(self):
return SalesforceBulk(
host=self.org_config.instance_url.replace('https://', ''),
sessionId=self.org_config.access_token,
)
class GetInstalledPackages(BaseSalesforceMetadataApiTask):
api_class = ApiRetrieveInstalledPackages
name = 'GetInstalledPackages'
class BaseRetrieveMetadata(BaseSalesforceMetadataApiTask):
task_options = {
'path': {
'description': 'The path to write the retrieved metadata',
'required': True,
}
}
def _run_task(self):
api = self._get_api()
src_zip = api()
self._extract_zip(src_zip)
self.logger.info('Extracted retrieved metadata into {}'.format(self.options['path']))
def _extract_zip(self, src_zip):
src_zip.extractall(self.options['path'])
class RetrieveUnpackaged(BaseRetrieveMetadata):
api_class = ApiRetrieveUnpackaged
task_options = {
'path': {
'description': 'The path where the retrieved metadata should be written',
'required': True,
},
'package_xml': {
'description': 'The path to a package.xml manifest to use for the retrieve.',
'required': True,
},
'api_version': {
'description': 'Override the default api version for the retrieve. Defaults to project__package__api_version',
},
}
def _init_options(self, kwargs):
super(RetrieveUnpackaged, self)._init_options(kwargs)
if 'api_version' not in self.options:
self.options['api_version'] = self.project_config.project__package__api_version
if 'package_xml' in self.options:
self.options['package_xml_path'] = self.options['package_xml']
with open(self.options['package_xml_path'], 'r') as f:
self.options['package_xml'] = f.read()
def _get_api(self):
return self.api_class(
self,
self.options['package_xml'],
self.options['api_version'],
)
class RetrievePackaged(BaseRetrieveMetadata):
api_class = ApiRetrievePackaged
task_options = {
'path': {
'description': 'The path where the retrieved metadata should be written',
'required': True,
},
'package': {
'description': 'The package name to retrieve. Defaults to project__package__name',
'required': True,
},
'api_version': {
'description': 'Override the default api version for the retrieve. Defaults to project__package__api_version',
'required': True,
},
}
def _init_options(self, kwargs):
super(RetrievePackaged, self)._init_options(kwargs)
if 'package' not in self.options:
self.options['package'] = self.project_config.project__package__name
if 'api_version' not in self.options:
self.options['api_version'] = self.project_config.project__package__api_version
def _get_api(self):
return self.api_class(
self,
self.options['package'],
self.options['api_version'],
)
def _extract_zip(self, src_zip):
src_zip = zip_subfolder(src_zip, self.options.get('package'))
super(RetrievePackaged, self)._extract_zip(src_zip)
class RetrieveReportsAndDashboards(BaseRetrieveMetadata):
api_class = ApiRetrieveUnpackaged
task_options = {
'path': {
'description': 'The path where the retrieved metadata should be written',
'required': True,
},
'report_folders': {
'description': 'A list of the report folders to retrieve reports. Separate by commas for multiple folders.',
},
'dashboard_folders': {
'description': 'A list of the dashboard folders to retrieve reports. Separate by commas for multiple folders.',
},
'api_version': {
'description': 'Override the API version used to list metadata',
}
}
def _init_options(self, kwargs):
super(RetrieveReportsAndDashboards, self)._init_options(kwargs)
if 'api_version' not in self.options:
self.options['api_version'] = (
self.project_config.project__package__api_version
)
def _validate_options(self):
super(RetrieveReportsAndDashboards, self)._validate_options()
if not 'report_folders' in self.options and not 'dashboard_folders' in self.options:
raise TaskOptionsError('You must provide at least one folder name for either report_folders or dashboard_folders')
def _get_api(self):
metadata = {}
if 'report_folders' in self.options:
for folder in self.options['report_folders']:
api_reports = ApiListMetadata(
self,
'Report',
metadata=metadata,
folder=folder,
as_of_version=self.options['api_version'],
)
metadata = api_reports()
if 'dashboard_folders' in self.options:
for folder in self.options['dashboard_folders']:
api_dashboards = ApiListMetadata(
self,
'Dashboard',
metadata=metadata,
folder=folder,
as_of_version=self.options['api_version'],
)
metadata = api_dashboards()
items = {}
if 'Report' in metadata:
items['Report'] = []
items['Report'].extend(self.options['report_folders'])
for report in metadata['Report']:
items['Report'].append(report['fullName'])
if 'Dashboard' in metadata:
items['Dashboard'] = []
items['Dashboard'].extend(self.options['dashboard_folders'])
for dashboard in metadata['Dashboard']:
items['Dashboard'].append(dashboard['fullName'])
api_version = self.project_config.project__package__api_version
package_xml = package_xml_from_dict(items, api_version)
print package_xml
return self.api_class(
self,
package_xml,
api_version,
)
class Deploy(BaseSalesforceMetadataApiTask):
api_class = ApiDeploy
task_options = {
'path': {
'description': 'The path to the metadata source to be deployed',
'required': True,
},
}
def _get_api(self, path=None):
if not path:
path = self.task_config.options__path
# Build the zip file
zip_file = tempfile.TemporaryFile()
zipf = zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED)
pwd = os.getcwd()
os.chdir(path)
for root, dirs, files in os.walk('.'):
for f in files:
self._write_zip_file(zipf, root, f)
zipf.close()
zip_file.seek(0)
package_zip = base64.b64encode(zip_file.read())
os.chdir(pwd)
return self.api_class(self, package_zip, purge_on_delete=False)
def _write_zip_file(self, zipf, root, path):
zipf.write(os.path.join(root, path))
class CreatePackage(Deploy):
task_options = {
'package': {
'description': 'The name of the package to create. Defaults to project__package__name',
'required': True,
},
'api_version': {
'description': 'The api version to use when creating the package. Defaults to project__package__api_version',
'required': True,
},
}
def _init_options(self, kwargs):
super(CreatePackage, self)._init_options(kwargs)
if 'package' not in self.options:
self.options['package'] = self.project_config.project__package__name
if 'api_version' not in self.options:
self.options['api_version'] = self.project_config.project__package__api_version
def _get_api(self, path=None):
package_zip = CreatePackageZipBuilder(self.options['package'], self.options['api_version'])
return self.api_class(self, package_zip(), purge_on_delete=False)
class InstallPackageVersion(Deploy):
task_options = {
'namespace': {
'description': 'The namespace of the package to install. Defaults to project__package__namespace',
'required': True,
},
'version': {
'description': 'The version of the package to install. "latest" and "latest_beta" can be used to trigger lookup via Github Releases on the repository.',
'required': True,
},
'retries': {
'description': 'Number of retries (default=5)',
},
'retry_interval': {
'description': 'Number of seconds to wait before the next retry (default=5),'
},
'retry_interval_add': {
'description': 'Number of seconds to add before each retry (default=30),'
},
}
def _init_options(self, kwargs):
super(InstallPackageVersion, self)._init_options(kwargs)
if 'namespace' not in self.options:
self.options['namespace'] = self.project_config.project__package__namespace
if 'retries' not in self.options:
self.options['retries'] = 5
if 'retry_interval' not in self.options:
self.options['retry_interval'] = 5
if 'retry_interval_add' not in self.options:
self.options['retry_interval_add'] = 30
if self.options.get('version') == 'latest':
self.options['version'] = self.project_config.get_latest_version()
self.logger.info('Installing latest release: {}'.format(self.options['version']))
elif self.options.get('version') == 'latest_beta':
self.options['version'] = self.project_config.get_latest_version(beta=True)
self.logger.info('Installing latest beta release: {}'.format(self.options['version']))
def _get_api(self, path=None):
package_zip = InstallPackageZipBuilder(self.options['namespace'], self.options['version'])
return self.api_class(self, package_zip(), purge_on_delete=False)
def _run_task(self):
self._retry()
def _try(self):
api = self._get_api()
api()
def _is_retry_valid(self, e):
if (isinstance(e, MetadataApiError) and
('This package is not yet available' in e.message or
'InstalledPackage version number' in e.message)):
return True
class UninstallPackage(Deploy):
task_options = {
'namespace': {
'description': 'The namespace of the package to uninstall. Defaults to project__package__namespace',
'required': True,
},
'purge_on_delete': {
'description': 'Sets the purgeOnDelete option for the deployment. Defaults to True',
'required': True,
},
}
def _init_options(self, kwargs):
super(UninstallPackage, self)._init_options(kwargs)
if 'namespace' not in self.options:
self.options['namespace'] = self.project_config.project__package__namespace
if 'purge_on_delete' not in self.options:
self.options['purge_on_delete'] = True
if self.options['purge_on_delete'] == 'False':
self.options['purge_on_delete'] = False
def _get_api(self, path=None):
package_zip = UninstallPackageZipBuilder(self.options['namespace'])
return self.api_class(self, package_zip(), purge_on_delete=self.options['purge_on_delete'])
class UpdateDependencies(BaseSalesforceMetadataApiTask):
api_class = ApiDeploy
name = 'UpdateDependencies'
task_options = {
'purge_on_delete': {
'description': 'Sets the purgeOnDelete option for the deployment. Defaults to True',
},
}
def _init_options(self, kwargs):
super(UpdateDependencies, self)._init_options(kwargs)
if 'purge_on_delete' not in self.options:
self.options['purge_on_delete'] = True
if (isinstance(self.options['purge_on_delete'], basestring) and
self.options['purge_on_delete'].lower() == 'false'):
self.options['purge_on_delete'] = False
def _run_task(self):
dependencies = self.project_config.project__dependencies
if not dependencies:
self.logger.info('Project has no dependencies, doing nothing')
return
self.installed = self._get_installed()
self.uninstall_queue = []
self.install_queue = []
self.logger.info('Dependencies:')
self._process_dependencies(dependencies)
# Reverse the uninstall queue
self.uninstall_queue.reverse()
self._uninstall_dependencies()
self._install_dependencies()
def _process_dependencies(self, dependencies):
for dependency in dependencies:
dependency_version = str(dependency['version'])
# Process child dependencies
dependency_uninstalled = False
if 'dependencies' in dependency and dependency['dependencies']:
count_uninstall = len(self.uninstall_queue)
self._process_dependencies(dependency['dependencies'])
if count_uninstall != len(self.uninstall_queue):
dependency_uninstalled = True
if dependency['namespace'] in self.installed:
# Some version is installed, check what to do
installed_version = self.installed[dependency['namespace']]
if dependency_version == installed_version:
self.logger.info(' {}: version {} already installed'.format(
dependency['namespace'],
dependency_version,
))
continue
required_version = LooseVersion(dependency_version)
installed_version = LooseVersion(installed_version)
if 'Beta' in installed_version.vstring:
# Always uninstall Beta versions if required is different
self.uninstall_queue.append(dependency)
self.logger.info(' {}: Uninstall {} to upgrade to {}'.format(
dependency['namespace'],
installed_version,
dependency['version'],
))
elif dependency_uninstalled:
# If a dependency of this one needs to be uninstalled, always uninstall the package
self.uninstall_queue.append(dependency)
self.logger.info(' {}: Uninstall and Reinstall to allow downgrade of dependency'.format(
dependency['namespace'],
))
elif required_version < installed_version:
# Uninstall to downgrade
self.uninstall_queue.append(dependency)
self.logger.info(' {}: Downgrade from {} to {} (requires uninstall/install)'.format(
dependency['namespace'],
installed_version,
dependency['version'],
))
else:
self.logger.info(' {}: Upgrade from {} to {}'.format(
dependency['namespace'],
installed_version,
dependency['version'],
))
self.install_queue.append(dependency)
else:
# Just a regular install
self.logger.info(' {}: Install version {}'.format(
dependency['namespace'],
dependency['version'],
))
self.install_queue.append(dependency)
def _get_installed(self):
self.logger.info('Retrieving list of packages from target org')
api = ApiRetrieveInstalledPackages(self)
return api()
def _uninstall_dependencies(self):
for dependency in self.uninstall_queue:
self._uninstall_dependency(dependency)
def _install_dependencies(self):
for dependency in self.install_queue:
self._install_dependency(dependency)
def _install_dependency(self, dependency):
self.logger.info('Installing {} version {}'.format(
dependency['namespace'],
dependency['version'],
))
package_zip = InstallPackageZipBuilder(dependency['namespace'], dependency['version'])
api = self.api_class(self, package_zip(), purge_on_delete=self.options['purge_on_delete'])
return api()
def _uninstall_dependency(self, dependency):
self.logger.info('Uninstalling {}'.format(dependency['namespace']))
package_zip = UninstallPackageZipBuilder(dependency['namespace'])
api = self.api_class(self, package_zip(), purge_on_delete=self.options['purge_on_delete'])
return api()
class DeployBundles(Deploy):
task_options = {
'path': {
'description': 'The path to the parent directory containing the metadata bundles directories',
'required': True,
}
}
def _run_task(self):
path = self.options['path']
pwd = os.getcwd()
path = os.path.join(pwd, path)
self.logger.info(
'Deploying all metadata bundles in path {}'.format(path))
if not os.path.isdir(path):
self.logger.warn('Path {} not found, skipping'.format(path))
return
for item in os.listdir(path):
item_path = os.path.join(path, item)
if not os.path.isdir(item_path):
continue
self.logger.info('Deploying bundle: {}/{}'.format(self.options['path'], item))
self._deploy_bundle(item_path)
def _deploy_bundle(self, path):
api = self._get_api(path)
return api()
class DeployNamespacedBundles(DeployBundles):
name = 'DeployNamespacedBundles'
task_options = {
'path': {
'description': 'The path to the parent directory containing the metadata bundles directories',
'required': True,
},
'managed': {
'description': 'If True, will insert the actual namespace prefix. Defaults to False or no namespace',
},
'namespace': {
'description': 'The namespace to replace the token with if in managed mode. Defaults to project__package__namespace',
},
'namespace_token': {
'description': 'The string token to replace with the namespace',
'required': True,
},
'filename_token': {
'description': 'The path to the parent directory containing the metadata bundles directories',
'required': True,
},
}
def _init_options(self, kwargs):
super(DeployNamespacedBundles, self)._init_options(kwargs)
if 'managed' not in self.options:
self.options['managed'] = False
if 'namespace' not in self.options:
self.options['namespace'] = self.project_config.project__package__namespace
def _write_zip_file(self, zipf, root, path):
if self.options['managed'] in [True, 'True', 'true']:
namespace = self.options['namespace']
if namespace:
namespace = namespace + '__'
else:
namespace = ''
zip_path = path.replace(self.options['filename_token'], namespace)
with open(os.path.join(root, path), 'r') as f:
content = f.read().replace(self.options['namespace_token'], namespace)
if root == '.':
zipf.writestr(zip_path, content)
else:
# strip ./ from the start of root
root = root[2:]
zipf.writestr(os.path.join(root, zip_path), content)
class BaseUninstallMetadata(Deploy):
def _get_api(self, path=None):
destructive_changes = self._get_destructive_changes(path=path)
if not destructive_changes:
return
package_zip = DestructiveChangesZipBuilder(destructive_changes)
api = self.api_class(self, package_zip(), purge_on_delete=self.options['purge_on_delete'])
return api
class UninstallLocal(BaseUninstallMetadata):
def _get_destructive_changes(self, path=None):
if not path:
path = self.options['path']
generator = PackageXmlGenerator(
directory = path,
api_version = self.project_config.project__package__api_version,
delete = True,
)
return generator()
class UninstallPackaged(UninstallLocal):
task_options = {
'package': {
'description': 'The package name to uninstall. All metadata from the package will be retrieved and a custom destructiveChanges.xml package will be constructed and deployed to delete all deleteable metadata from the package. Defaults to project__package__name',
'required': True,
},
'purge_on_delete': {
'description': 'Sets the purgeOnDelete option for the deployment. Defaults to True',
'required': True,
},
}
def _init_options(self, kwargs):
super(UninstallPackaged, self)._init_options(kwargs)
if 'package' not in self.options:
self.options['package'] = self.project_config.project__package__name
if 'purge_on_delete' not in self.options:
self.options['purge_on_delete'] = True
if self.options['purge_on_delete'] == 'False':
self.options['purge_on_delete'] = False
def _retrieve_packaged(self):
retrieve_api = ApiRetrievePackaged(
self,
self.options['package'],
self.project_config.project__package__api_version
)
packaged = retrieve_api()
packaged = zip_subfolder(packaged, self.options['package'])
return packaged
def _get_destructive_changes(self, path=None):
self.logger.info('Retrieving metadata in package {} from target org'.format(self.options['package']))
packaged = self._retrieve_packaged()
tempdir = tempfile.mkdtemp()
packaged.extractall(tempdir)
destructive_changes = super(UninstallPackaged, self)._get_destructive_changes(tempdir)
shutil.rmtree(tempdir)
self.logger.info('Deleting metadata in package {} from target org'.format(self.options['package']))
return destructive_changes
class UninstallPackagedIncremental(UninstallPackaged):
name = 'UninstallPackagedIncremental'
skip_types = ['RecordType','Scontrol']
task_options = {
'path': {
'description': 'The local path to compare to the retrieved packaged metadata from the org. Defaults to src',
'required': True,
},
'package': {
'description': 'The package name to uninstall. All metadata from the package will be retrieved and a custom destructiveChanges.xml package will be constructed and deployed to delete all deleteable metadata from the package. Defaults to project__package__name',
'required': True,
},
'purge_on_delete': {
'description': 'Sets the purgeOnDelete option for the deployment. Defaults to True',
'required': True,
},
}
def _init_options(self, kwargs):
super(UninstallPackagedIncremental, self)._init_options(kwargs)
if 'path' not in self.options:
self.options['path'] = 'src'
if 'purge_on_delete' not in self.options:
self.options['purge_on_delete'] = True
if self.options['purge_on_delete'] == 'False':
self.options['purge_on_delete'] = False
def _get_destructive_changes(self, path=None):
self.logger.info('Retrieving metadata in package {} from target org'.format(self.options['package']))
packaged = self._retrieve_packaged()
tempdir = tempfile.mkdtemp()
packaged.extractall(tempdir)
destructive_changes = self._package_xml_diff(
os.path.join(self.options['path'], 'package.xml'),
os.path.join(tempdir, 'package.xml'),
)
shutil.rmtree(tempdir)
if destructive_changes:
self.logger.info('Deleting metadata in package {} from target org'.format(self.options['package']))
else:
self.logger.info('No metadata found to delete')
return destructive_changes
def _package_xml_diff(self, master, compare):
master_xml = xmltodict.parse(open(master, 'r'))
compare_xml = xmltodict.parse(open(compare, 'r'))
delete = {}
master_items = {}
compare_items = {}
md_types = master_xml['Package'].get('types', [])
if not isinstance(md_types, list):
# needed when only 1 metadata type is found
md_types = [md_types]
for md_type in md_types:
master_items[md_type['name']] = []
if 'members' not in md_type:
continue
if isinstance(md_type['members'], unicode):
master_items[md_type['name']].append(md_type['members'])
else:
for item in md_type['members']:
master_items[md_type['name']].append(item)
for md_type in compare_xml['Package'].get('types',[]):
compare_items[md_type['name']] = []
if 'members' not in md_type:
continue
if isinstance(md_type['members'], unicode):
compare_items[md_type['name']].append(md_type['members'])
else:
for item in md_type['members']:
compare_items[md_type['name']].append(item)
for md_type, members in compare_items.items():
if md_type not in master_items:
delete[md_type] = members
continue
for member in members:
if member not in master_items[md_type]:
if md_type not in delete:
delete[md_type] = []
delete[md_type].append(member)
if delete:
self.logger.info('Deleting metadata:')
for skip_type in self.skip_types:
delete.pop(skip_type, None)
for md_type, members in delete.items():
for member in members:
self.logger.info(' {}: {}'.format(md_type, member))
destructive_changes = self._render_xml_from_items_dict(delete)
return destructive_changes
def _render_xml_from_items_dict(self, items):
return package_xml_from_dict(
items,
api_version = self.project_config.project__package__api_version,
)
class UninstallLocalBundles(UninstallLocal):
def _run_task(self):
path = self.options['path']
pwd = os.getcwd()
path = os.path.join(pwd, path)
self.logger.info('Deleting all metadata from bundles in {} from target org'.format(path))
for item in os.listdir(path):
item_path = os.path.join(path, item)
if not os.path.isdir(item_path):
continue
self.logger.info('Deleting bundle: {}/{}'.format(self.options['path'], item))
self._delete_bundle(item_path)
def _delete_bundle(self, path=None):
api = self._get_api(path)
return api()
class UninstallLocalNamespacedBundles(UninstallLocalBundles):
task_options = {
'path': {
'description': 'The path to a directory containing the metadata bundles (subdirectories) to uninstall',
'required': True,
},
'managed': {
'description': 'If True, will insert the actual namespace prefix. Defaults to False or no namespace',
},
'namespace': {
'description': 'The namespace to replace the token with if in managed mode. Defaults to project__package__namespace',
},
'filename_token': {
'description': 'The path to the parent directory containing the metadata bundles directories',
'required': True,
},
'purge_on_delete': {
'description': 'Sets the purgeOnDelete option for the deployment. Defaults to True',
'required': True,
},
}
def _init_options(self, kwargs):
super(UninstallLocalNamespacedBundles, self)._init_options(kwargs)
if 'managed' not in self.options:
self.options['managed'] = False
if 'namespace' not in self.options:
self.options['namespace'] = self.project_config.project__package__namespace
if 'purge_on_delete' not in self.options:
self.options['purge_on_delete'] = True
if self.options['purge_on_delete'] == 'False':
self.options['purge_on_delete'] = False
def _get_destructive_changes(self, path=None):
if not path:
path = self.options['path']
generator = PackageXmlGenerator(
directory = path,
api_version = self.project_config.project__package__api_version,
delete = True,
)
namespace = ''
if self.options['managed'] in [True, 'True', 'true']:
if self.options['namespace']:
namespace = self.options['namespace'] + '__'
destructive_changes = generator()
destructive_changes = destructive_changes.replace(self.options['filename_token'], namespace)
return destructive_changes
class UpdateAdminProfile(Deploy):
name = 'UpdateAdminProfile'
task_options = {
'package_xml': {
'description': 'Override the default package.xml file for retrieving the Admin.profile and all objects and classes that need to be included by providing a path to your custom package.xml',
}
}
def _init_options(self, kwargs):
super(UpdateAdminProfile, self)._init_options(kwargs)
if 'package_xml' not in self.options:
self.options['package_xml'] = os.path.join(CUMULUSCI_PATH, 'cumulusci', 'files', 'admin_profile.xml')
self.options['package_xml_path'] = self.options['package_xml']
with open(self.options['package_xml_path'], 'r') as f:
self.options['package_xml'] = f.read()
def _run_task(self):
self.tempdir = tempfile.mkdtemp()
self._retrieve_unpackaged()
self._process_metadata()
self._deploy_metadata()
shutil.rmtree(self.tempdir)
def _retrieve_unpackaged(self):
self.logger.info('Retrieving metadata using {}'.format(self.options['package_xml_path']))
api_retrieve = ApiRetrieveUnpackaged(
self,
self.options.get('package_xml'),
self.project_config.project__package__api_version,
)
unpackaged = api_retrieve()
unpackaged.extractall(self.tempdir)
def _process_metadata(self):
self.logger.info('Processing retrieved metadata in {}'.format(self.tempdir))
findReplace(
'<editable>false</editable>',
'<editable>true</editable>',
os.path.join(self.tempdir, 'profiles'),
'Admin.profile',
)
findReplace(
'<readable>false</readable>',
'<readable>true</readable>',
os.path.join(self.tempdir, 'profiles'),
'Admin.profile',
)
def _deploy_metadata(self):
self.logger.info('Deploying updated Admin.profile from {}'.format(self.tempdir))
api = self._get_api(path=self.tempdir)
return api()
class PackageUpload(BaseSalesforceToolingApiTask):
name = 'PackageUpload'
api_version = '38.0'
task_options = {
'name': {
'description': 'The name of the package version.',
'required': True,
},
'production': {
'description': 'If True, uploads a production release. Defaults to uploading a beta',
},
'description': {
'description': 'A description of the package and what this version contains.',
},
'password': {
'description': "An optional password for sharing the package privately with anyone who has the password. Don't enter a password if you want to make the package available to anyone on AppExchange and share your package publicly.",
},
'post_install_url': {
'description': 'The fully-qualified URL of the post-installation instructions. Instructions are shown as a link after installation and are available from the package detail view.',
},
'release_notes_url': {
'description': 'The fully-qualified URL of the package release notes. Release notes are shown as a link during the installation process and are available from the package detail view after installation.',
},
'namespace': {
'description': 'The namespace of the package. Defaults to project__package__namespace',
},
}
def _init_options(self, kwargs):
super(PackageUpload, self)._init_options(kwargs)
# Set the namespace option to the value from cumulusci.yml if not already set
if not 'namespace' in self.options:
self.options['namespace'] = self.project_config.project__package__namespace
def _run_task(self):
sf = self._init_api()
package_res = sf.query("select Id from MetadataPackage where NamespacePrefix='{}'".format(self.options['namespace']))
if package_res['totalSize'] != 1:
message = 'No package found with namespace {}'.format(self.options['namespace'])
self.logger.error(message)
raise SalesforceException(message)
package_id = package_res['records'][0]['Id']
production = self.options.get('production', False) in [True, 'True', 'true']
package_info = {
'VersionName': self.options['name'],
'IsReleaseVersion': production,
'MetadataPackageId': package_id,
}
if 'description' in self.options:
package_info['Description'] = self.options['description']
if 'password' in self.options:
package_info['Password'] = self.options['password']
if 'post_install_url' in self.options:
package_info['PostInstallUrl'] = self.options['post_install_url']
if 'release_notes_url' in self.options:
package_info['ReleaseNotesUrl'] = self.options['release_notes_url']
PackageUploadRequest = self._get_tooling_object('PackageUploadRequest')
upload = PackageUploadRequest.create(package_info)
upload_id = upload['id']
soql_check_upload = "select Status, Errors, MetadataPackageVersionId from PackageUploadRequest where Id = '{}'".format(upload['id'])
upload = self.tooling.query(soql_check_upload)
if upload['totalSize'] != 1:
message = 'Failed to get info for upload with id {}'.format(upload_id)
self.logger.error(message)
raise SalesforceException(message)
upload = upload['records'][0]
while upload['Status'] == 'IN_PROGRESS':
time.sleep(3)
upload = self.tooling.query(soql_check_upload)
if upload['totalSize'] != 1:
message = 'Failed to get info for upload with id {}'.format(upload_id)
self.logger.error(message)
raise SalesforceException(message)
upload = upload['records'][0]
if upload['Status'] == 'ERROR':
self.logger.error('Package upload failed with the following errors')
for error in upload['Errors']['errors']:
self.logger.error(' {}'.format(error['message']))
raise SalesforceException('Package upload failed')
else:
version_id = upload['MetadataPackageVersionId']
version_res = self.tooling.query("select MajorVersion, MinorVersion, PatchVersion, BuildNumber, ReleaseState from MetadataPackageVersion where Id = '{}'".format(version_id))
if version_res['totalSize'] != 1:
message = 'Version {} not found'.format(version_id)
self.logger.error(message)
raise SalesforceException(message)
version = version_res['records'][0]
version_parts = [
str(version['MajorVersion']),
str(version['MinorVersion']),
]
if version['PatchVersion']:
version_parts.append(str(version['PatchVersion']))
self.version_number = '.'.join(version_parts)
if version['ReleaseState'] == 'Beta':
self.version_number += ' (Beta {})'.format(version['BuildNumber'])
self.return_values = {'version_number': self.version_number}
self.logger.info('Uploaded package version {} with Id {}'.format(
self.version_number,
version_id
))
class RunApexTests(BaseSalesforceToolingApiTask):
task_options = {
'test_name_match': {
'description': ('Query to find Apex test classes to run ' +
'("%" is wildcard). Defaults to ' +
'project__test__name_match'),
'required': True,
},
'test_name_exclude': {
'description': ('Query to find Apex test classes to exclude ' +
'("%" is wildcard). Defaults to ' +
'project__test__name_exclude'),
},
'namespace': {
'description': ('Salesforce project namespace. Defaults to ' +
'project__package__namespace'),
},
'managed': {
'description': ('If True, search for tests in the namespace ' +
'only. Defaults to False'),
},
'poll_interval': {
'description': ('Seconds to wait between polling for Apex test ' +
'results. Defaults to 3'),
},
'retries': {
'description': 'Number of retries (default=10)',
},
'retry_interval': {
'description': 'Number of seconds to wait before the next retry (default=5),'
},
'retry_interval_add': {
'description': 'Number of seconds to add before each retry (default=5),'
},
'junit_output': {
'description': 'File name for JUnit output. Defaults to test_results.xml',
},
}
def _init_options(self, kwargs):
super(RunApexTests, self)._init_options(kwargs)
if 'test_name_match' not in self.options:
self.options['test_name_match'] = self.project_config.project__test__name_match
if 'test_name_exclude' not in self.options:
self.options['test_name_exclude'] = self.project_config.project__test__name_exclude
if self.options['test_name_exclude'] is None:
self.options['test_name_exclude'] = ''
if 'namespace' not in self.options:
self.options['namespace'] = self.project_config.project__package__namespace
if 'managed' not in self.options:
self.options['managed'] = False
else:
if self.options['managed'] in [True, 'True', 'true']:
self.options['managed'] = True
else:
self.options['managed'] = False
if 'retries' not in self.options:
self.options['retries'] = 10
if 'retry_interval' not in self.options:
self.options['retry_interval'] = 5
if 'retry_interval_add' not in self.options:
self.options['retry_interval_add'] = 5
if 'junit_output' not in self.options:
self.options['junit_output'] = 'test_results.xml'
self.counts = {}
def _init_class(self):
self.classes_by_id = {}
self.classes_by_name = {}
self.job_id = None
self.results_by_class_name = {}
self._debug_init_class()
self.result = None
# These are overridden in the debug version
def _debug_init_class(self):
pass
def _debug_get_duration_class(self, class_id):
pass
def _debug_get_duration_method(self, result):
pass
def _debug_get_logs(self):
pass
def _debug_get_results(self, result):
pass
def _debug_create_trace_flag(self):
pass
def _decode_to_unicode(self, content):
if content:
try:
# Try to decode ISO-8859-1 to unicode
return content.decode('ISO-8859-1')
except UnicodeEncodeError:
# Assume content is unicode already
return content
def _get_test_classes(self):
if self.options['managed']:
namespace = self.options.get('namespace')
if not namespace:
raise TaskOptionsError(
'Running tests in managed mode but no namespace available.'
)
namespace = "'{}'".format(namespace)
else:
namespace = 'null'
# Split by commas to allow multiple class name matching options
test_name_match = self.options['test_name_match']
included_tests = []
for pattern in test_name_match.split(','):
if pattern:
included_tests.append("Name LIKE '{}'".format(pattern))
# Add any excludes to the where clause
test_name_exclude = self.options.get('test_name_exclude', '')
excluded_tests = []
for pattern in test_name_exclude.split(','):
if pattern:
excluded_tests.append("(NOT Name LIKE '{}')".format(pattern))
# Get all test classes for namespace
query = ('SELECT Id, Name FROM ApexClass ' +
'WHERE NamespacePrefix = {}'.format(namespace))
if included_tests:
query += ' AND ({})'.format(' OR '.join(included_tests))
if excluded_tests:
query += ' AND {}'.format(' AND '.join(excluded_tests))
# Run the query
self.logger.info('Running query: {}'.format(query))
result = self.tooling.query_all(query)
self.logger.info('Found {} test classes'.format(result['totalSize']))
return result
def _get_test_results(self):
result = self.tooling.query_all("SELECT StackTrace, Message, " +
"ApexLogId, AsyncApexJobId, MethodName, Outcome, ApexClassId, " +
"TestTimestamp FROM ApexTestResult " +
"WHERE AsyncApexJobId = '{}'".format(self.job_id))
self.counts = {
'Pass': 0,
'Fail': 0,
'CompileFail': 0,
'Skip': 0,
}
for test_result in result['records']:
class_name = self.classes_by_id[test_result['ApexClassId']]
self.results_by_class_name[class_name][test_result[
'MethodName']] = test_result
self.counts[test_result['Outcome']] += 1
self._debug_get_results(test_result)
self._debug_get_logs()
test_results = []
class_names = self.results_by_class_name.keys()
class_names.sort()
for class_name in class_names:
class_id = self.classes_by_name[class_name]
message = 'Class: {}'.format(class_name)
duration = self._debug_get_duration_class(class_id)
if duration:
message += '({}s)'.format(duration)
self.logger.info(message)
method_names = self.results_by_class_name[class_name].keys()
method_names.sort()
for method_name in method_names:
result = self.results_by_class_name[class_name][method_name]
message = '\t{}: {}'.format(result['Outcome'],
result['MethodName'])
duration = self._debug_get_duration_method(result)
if duration:
message += ' ({}s)'.format(duration)
self.logger.info(message)
test_results.append({
'Children': result.get('children', None),
'ClassName': self._decode_to_unicode(class_name),
'Method': self._decode_to_unicode(result['MethodName']),
'Message': self._decode_to_unicode(result['Message']),
'Outcome': self._decode_to_unicode(result['Outcome']),
'StackTrace': self._decode_to_unicode(
result['StackTrace']),
'Stats': result.get('stats', None),
'TestTimestamp': result.get('TestTimestamp', None),
})
if result['Outcome'] in ['Fail', 'CompileFail']:
self.logger.info('\tMessage: {}'.format(result['Message']))
self.logger.info('\tStackTrace: {}'.format(
result['StackTrace']))
self.logger.info('-' * 80)
self.logger.info('Pass: {} Fail: {} CompileFail: {} Skip: {}'
.format(
self.counts['Pass'],
self.counts['Fail'],
self.counts['CompileFail'],
self.counts['Skip'],
))
self.logger.info('-' * 80)
if self.counts['Fail'] or self.counts['CompileFail']:
self.logger.error('-' * 80)
self.logger.error('Failing Tests')
self.logger.error('-' * 80)
counter = 0
for result in test_results:
if result['Outcome'] not in ['Fail', 'CompileFail']:
continue
counter += 1
self.logger.error('{}: {}.{} - {}'.format(counter,
result['ClassName'], result['Method'], result['Outcome']))
self.logger.error('\tMessage: {}'.format(result['Message']))
self.logger.error('\tStackTrace: {}'.format(
result['StackTrace']))
return test_results
def _run_task(self):
result = self._get_test_classes()
if result['totalSize'] == 0:
return
for test_class in result['records']:
self.classes_by_id[test_class['Id']] = test_class['Name']
self.classes_by_name[test_class['Name']] = test_class['Id']
self.results_by_class_name[test_class['Name']] = {}
self._debug_create_trace_flag()
self.logger.info('Queuing tests for execution...')
ids = self.classes_by_id.keys()
result = self.tooling._call_salesforce(
method='POST',
url=self.tooling.base_url + 'runTestsAsynchronous',
json={'classids': ','.join(str(id) for id in ids)},
)
if result.status_code != 200:
raise SalesforceGeneralError(url,
path,
result.status_code,
result.content)
self.job_id = result.json()
self._wait_for_tests()
test_results = self._get_test_results()
self._write_output(test_results)
if self.counts.get('Fail') or self.counts.get('CompileFail'):
total = self.counts.get('Fail') + self.counts.get('CompileFail')
raise ApexTestException(
'{} tests failed and {} tests failed compilation'.format(
self.counts.get('Fail'), self.counts.get('CompileFail')
)
)
def _wait_for_tests(self):
poll_interval = int(self.options.get('poll_interval', 1))
while True:
self._retry()
counts = {
'Aborted': 0,
'Completed': 0,
'Failed': 0,
'Holding': 0,
'Preparing': 0,
'Processing': 0,
'Queued': 0,
}
for test_queue_item in self.result['records']:
counts[test_queue_item['Status']] += 1
self.logger.info('Completed: {} Processing: {} Queued: {}'
.format(
counts['Completed'],
counts['Processing'],
counts['Queued'],
))
if counts['Queued'] == 0 and counts['Processing'] == 0:
self.logger.info('Apex tests completed')
break
time.sleep(poll_interval)
def _try(self):
self.result = self.tooling.query_all(
"SELECT Id, Status, ApexClassId FROM ApexTestQueueItem " +
"WHERE ParentJobId = '{}'".format(self.job_id))
def _write_output(self, test_results):
junit_output = self.options['junit_output']
with io.open(junit_output, mode='w', encoding='utf-8') as f:
f.write(u'<testsuite tests="{}">\n'.format(len(test_results)))
for result in test_results:
s = ' <testcase classname="{}" name="{}"'.format(
result['ClassName'], result['Method'])
if ('Stats' in result and result['Stats']
and 'duration' in result['Stats']):
s += ' time="{}"'.format(result['Stats']['duration'])
if result['Outcome'] in ['Fail', 'CompileFail']:
s += '>\n'
s += ' <failure type="{}">{}</failure>\n'.format(
cgi.escape(result['StackTrace']),
cgi.escape(result['Message']),
)
s += ' </testcase>\n'
else:
s += ' />\n'
f.write(unicode(s))
f.write(u'</testsuite>')
run_apex_tests_debug_options = RunApexTests.task_options.copy()
run_apex_tests_debug_options.update({
'debug_log_dir': {
'description': 'Directory to store debug logs. Defaults to temp dir.',
},
'json_output': {
'description': ('The path to the json output file. Defaults to ' +
'test_results.json'),
}
})
class RunApexTestsDebug(RunApexTests):
"""Run Apex tests and collect debug info"""
api_version = '38.0'
task_options = run_apex_tests_debug_options
def _init_options(self, kwargs):
super(RunApexTestsDebug, self)._init_options(kwargs)
if 'json_output' not in self.options:
self.options['json_output'] = 'test_results.json'
def _debug_init_class(self):
self.classes_by_log_id = {}
self.logs_by_class_id = {}
self.trace_id = None
def _debug_create_trace_flag(self):
"""Create a TraceFlag for a given user."""
self._delete_debug_levels()
self._delete_trace_flags()
self.logger.info('Creating DebugLevel object')
DebugLevel = self._get_tooling_object('DebugLevel')
result = DebugLevel.create({
'ApexCode': 'Info',
'ApexProfiling': 'Debug',
'Callout': 'Info',
'Database': 'Info',
'DeveloperName': 'CumulusCI',
'MasterLabel': 'CumulusCI',
'System': 'Info',
'Validation': 'Info',
'Visualforce': 'Info',
'Workflow': 'Info',
})
self.debug_level_id = result['id']
self.logger.info('Setting up trace flag to capture debug logs')
# New TraceFlag expires 12 hours from now
expiration_date = (datetime.datetime.utcnow() +
datetime.timedelta(seconds=60*60*12))
TraceFlag = self._get_tooling_object('TraceFlag')
result = TraceFlag.create({
'DebugLevelId': result['id'],
'ExpirationDate': expiration_date.isoformat(),
'LogType': 'USER_DEBUG',
'TracedEntityId': self.org_config.user_id,
})
self.trace_id = result['id']
self.logger.info('Created TraceFlag for user')
def _delete_trace_flags(self):
"""
Delete existing DebugLevel objects.
This will automatically delete associated TraceFlags as well.
"""
self.logger.info('Deleting existing TraceFlag objects')
result = self.tooling.query(
"Select Id from TraceFlag Where TracedEntityId = '{}'".format(
self.org_config.user_id
)
)
if result['totalSize']:
TraceFlag = self._get_tooling_object('TraceFlag')
for record in result['records']:
TraceFlag.delete(str(record['Id']))
def _delete_debug_levels(self):
"""
Delete existing DebugLevel objects.
This will automatically delete associated TraceFlags as well.
"""
self.logger.info('Deleting existing DebugLevel objects')
result = self.tooling.query('Select Id from DebugLevel')
if result['totalSize']:
DebugLevel = self._get_tooling_object('DebugLevel')
for record in result['records']:
DebugLevel.delete(str(record['Id']))
def _debug_get_duration_class(self, class_id):
if class_id in self.logs_by_class_id:
return int(self.logs_by_class_id[class_id][
'DurationMilliseconds']) * .001
def _debug_get_duration_method(self, result):
if result.get('stats') and 'duration' in result['stats']:
return result['stats']['duration']
def _debug_get_logs(self):
log_ids = "('{}')".format(
"','".join(str(id) for id in self.classes_by_log_id.keys()))
result = self.tooling.query_all('SELECT Id, Application, ' +
'DurationMilliseconds, Location, LogLength, LogUserId, ' +
'Operation, Request, StartTime, Status ' +
'from ApexLog where Id in {}'.format(log_ids))
debug_log_dir = self.options.get('debug_log_dir')
if debug_log_dir:
tempdir = None
try:
os.makedirs(debug_log_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
tempdir = tempfile.mkdtemp()
for log in result['records']:
class_id = self.classes_by_log_id[log['Id']]
class_name = self.classes_by_id[class_id]
self.logs_by_class_id[class_id] = log
body_url = '{}sobjects/ApexLog/{}/Body'.format(
self.tooling.base_url, log['Id'])
response = self.tooling.request.get(body_url,
headers=self.tooling.headers)
log_file = class_name + '.log'
if debug_log_dir:
log_file = os.path.join(debug_log_dir, log_file)
else:
log_file = os.path.join(tempdir, log_file)
with io.open(log_file, mode='w', encoding='utf-8') as f:
f.write(self._decode_to_unicode(response.content))
with io.open(log_file, mode='r', encoding='utf-8') as f:
method_stats = self._parse_log(class_name, f)
# Add method stats to results_by_class_name
for method, info in method_stats.items():
if method not in self.results_by_class_name[class_name]:
# Ignore lines that aren't from a test method such as the @testSetup decorated method
continue
self.results_by_class_name[class_name][method].update(info)
# Delete the DebugLevel
DebugLevel = self._get_tooling_object('DebugLevel')
DebugLevel.delete(str(self.debug_level_id))
# Clean up tempdir logs
if tempdir:
shutil.rmtree(tempdir)
def _debug_get_results(self, result):
if result['ApexLogId']:
self.classes_by_log_id[result['ApexLogId']] = result['ApexClassId']
def _log_time_delta(self, start, end):
"""
Returns microsecond difference between two debug log timestamps in the
format HH:MM:SS.micro.
"""
dummy_date = datetime.date(2001, 1, 1)
dummy_date_next = datetime.date(2001, 1, 2)
# Split out the parts of the start and end string
start_parts = re.split(':|\.', start)
start_parts = [int(part) for part in start_parts]
start_parts[3] = start_parts[3] * 1000
t_start = datetime.time(*start_parts)
end_parts = re.split(':|\.', end)
end_parts = [int(part) for part in end_parts]
end_parts[3] = end_parts[3] * 1000
t_end = datetime.time(*end_parts)
# Combine with dummy date to do date math
d_start = datetime.datetime.combine(dummy_date, t_start)
# If end was on the next day, attach to next dummy day
if start_parts[0] > end_parts[0]:
d_end = datetime.datetime.combine(dummy_date_next, t_end)
else:
d_end = datetime.datetime.combine(dummy_date, t_end)
delta = d_end - d_start
return delta.total_seconds()
def _parse_log(self, class_name, f):
"""Parse an Apex test log."""
class_name = self._decode_to_unicode(class_name)
methods = {}
for method, stats, children in self._parse_log_by_method(class_name,
f):
methods[method] = {'stats': stats, 'children': children}
return methods
def _parse_log_by_method(self, class_name, f):
"""Parse an Apex test log by method."""
stats = {}
last_stats = {}
in_limits = False
in_cumulative_limits = False
in_testing_limits = False
unit = None
method = None
children = {}
parent = None
for line in f:
line = self._decode_to_unicode(line).strip()
if '|CODE_UNIT_STARTED|[EXTERNAL]|' in line:
unit, unit_type, unit_info = self._parse_unit_started(
class_name, line)
if unit_type == 'test_method':
method = self._decode_to_unicode(unit)
method_unit_info = unit_info
children = []
stack = []
else:
stack.append({
'unit': unit,
'unit_type': unit_type,
'unit_info': unit_info,
'stats': {},
'children': [],
})
continue
if '|CUMULATIVE_LIMIT_USAGE' in line and 'USAGE_END' not in line:
in_cumulative_limits = True
in_testing_limits = False
continue
if '|TESTING_LIMITS' in line:
in_testing_limits = True
in_cumulative_limits = False
continue
if '|LIMIT_USAGE_FOR_NS|(default)|' in line:
# Parse the start of the limits section
in_limits = True
continue
if in_limits and ':' not in line:
# Parse the end of the limits section
in_limits = False
in_cumulative_limits = False
in_testing_limits = False
continue
if in_limits:
# Parse the limit name, used, and allowed values
limit, value = line.split(': ')
if in_testing_limits:
limit = 'TESTING_LIMITS: {}'.format(limit)
used, allowed = value.split(' out of ')
stats[limit] = {'used': used, 'allowed': allowed}
continue
if '|CODE_UNIT_FINISHED|{}.{}'.format(class_name, method) in line:
# Handle the finish of test methods
end_timestamp = line.split(' ')[0]
stats['duration'] = self._log_time_delta(
method_unit_info['start_timestamp'], end_timestamp)
# Yield the stats for the method
yield method, stats, children
last_stats = stats.copy()
stats = {}
in_cumulative_limits = False
in_limits = False
elif '|CODE_UNIT_FINISHED|' in line:
# Handle all other code units finishing
end_timestamp = line.split(' ')[0]
stats['duration'] = self._log_time_delta(
method_unit_info['start_timestamp'], end_timestamp)
try:
child = stack.pop()
except:
# Skip if there was no stack. This seems to have have
# started in Spring 16 where the debug log will contain
# CODE_UNIT_FINISHED lines which have no matching
# CODE_UNIT_STARTED from earlier in the file.
continue
child['stats'] = stats
if not stack:
# Add the child to the main children list
children.append(child)
else:
# Add this child to its parent
stack[-1]['children'].append(child)
stats = {}
in_cumulative_limits = False
in_limits = False
if '* MAXIMUM DEBUG LOG SIZE REACHED *' in line:
# If debug log size limit was reached, fail gracefully
break
def _parse_unit_started(self, class_name, line):
unit = line.split('|')[-1]
unit_type = 'other'
unit_info = {}
if unit.startswith(class_name + '.'):
unit_type = 'test_method'
unit = unit.split('.')[-1]
elif 'trigger event' in unit:
unit_type = 'trigger'
unit, obj, event = re.match(
r'(.*) on (.*) trigger event (.*) for.*', unit).groups()
unit_info = {'event': event, 'object': obj}
# Add the start timestamp to unit_info
unit_info['start_timestamp'] = line.split(' ')[0]
return unit, unit_type, unit_info
def _write_output(self, test_results):
# Write the JUnit test report
super(RunApexTestsDebug, self)._write_output(test_results)
# Write the json file
json_output = self.options['json_output']
with io.open(json_output, mode='w', encoding='utf-8') as f:
f.write(unicode(json.dumps(test_results)))
class SOQLQuery(BaseSalesforceBulkApiTask):
name = 'SOQLQuery'
task_options = {
'object' : {'required':True, 'description':'The object to query'},
'query' : {'required':True, 'description':'A valid bulk SOQL query for the object'},
'result_file' : {'required':True,'description':'The name of the csv file to write the results to'}
}
def _run_task(self):
self.logger.info('Creating bulk job for: {object}'.format(**self.options))
job = self.bulk.create_query_job(self.options['object'], contentType='CSV')
self.logger.info('Job id: {0}'.format(job))
self.logger.info('Submitting query: {query}'.format(**self.options))
batch = self.bulk.query(job,self.options['query'])
self.logger.info('Batch id: {0}'.format(batch))
self.bulk.wait_for_batch(job,batch)
self.logger.info('Batch {0} finished'.format(batch))
self.bulk.close_job(job)
self.logger.info('Job {0} closed'.format(job))
with open(self.options['result_file'],"w") as result_file:
for row in self.bulk.get_batch_result_iter(job,batch):
result_file.write(row+'\n')
self.logger.info('Wrote results to: {result_file}'.format(**self.options))
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import pytest
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from astropy import units as u
from astropy.wcs import WCS
from astropy.nddata.nddata import NDData
from astropy.nddata.decorators import support_nddata
class CCDData(NDData):
pass
@support_nddata
def wrapped_function_1(data, wcs=None, unit=None):
return data, wcs, unit
def test_pass_numpy():
data_in = np.array([1, 2, 3])
data_out, wcs_out, unit_out = wrapped_function_1(data=data_in)
assert data_out is data_in
assert wcs_out is None
assert unit_out is None
def test_pass_all_separate():
data_in = np.array([1, 2, 3])
wcs_in = WCS(naxis=1)
unit_in = u.Jy
data_out, wcs_out, unit_out = wrapped_function_1(data=data_in, wcs=wcs_in, unit=unit_in)
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in
def test_pass_nddata():
data_in = np.array([1, 2, 3])
wcs_in = WCS(naxis=1)
unit_in = u.Jy
nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in)
data_out, wcs_out, unit_out = wrapped_function_1(nddata_in)
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in
def test_pass_nddata_and_explicit():
data_in = np.array([1, 2, 3])
wcs_in = WCS(naxis=1)
unit_in = u.Jy
unit_in_alt = u.mJy
nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in)
with pytest.warns(AstropyUserWarning, match="Property unit has been passed explicitly and as "
"an NDData property, using explicitly specified value") as w:
data_out, wcs_out, unit_out = wrapped_function_1(nddata_in, unit=unit_in_alt)
assert len(w) == 1
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in_alt
def test_pass_nddata_ignored():
data_in = np.array([1, 2, 3])
wcs_in = WCS(naxis=1)
unit_in = u.Jy
nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in, mask=[0, 1, 0])
with pytest.warns(AstropyUserWarning, match="The following attributes were set on the data "
"object, but will be ignored by the function: mask") as w:
data_out, wcs_out, unit_out = wrapped_function_1(nddata_in)
assert len(w) == 1
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in
def test_incorrect_first_argument():
with pytest.raises(ValueError) as exc:
@support_nddata
def wrapped_function_2(something, wcs=None, unit=None):
pass
assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`"
with pytest.raises(ValueError) as exc:
@support_nddata
def wrapped_function_3(something, data, wcs=None, unit=None):
pass
assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`"
with pytest.raises(ValueError) as exc:
@support_nddata
def wrapped_function_4(wcs=None, unit=None):
pass
assert exc.value.args[0] == "Can only wrap functions whose first positional argument is `data`"
def test_wrap_function_no_kwargs():
@support_nddata
def wrapped_function_5(data, other_data):
return data
data_in = np.array([1, 2, 3])
nddata_in = NDData(data_in)
assert wrapped_function_5(nddata_in, [1, 2, 3]) is data_in
def test_wrap_function_repack_valid():
@support_nddata(repack=True, returns=['data'])
def wrapped_function_5(data, other_data):
return data
data_in = np.array([1, 2, 3])
nddata_in = NDData(data_in)
nddata_out = wrapped_function_5(nddata_in, [1, 2, 3])
assert isinstance(nddata_out, NDData)
assert nddata_out.data is data_in
def test_wrap_function_accepts():
class MyData(NDData):
pass
@support_nddata(accepts=MyData)
def wrapped_function_5(data, other_data):
return data
data_in = np.array([1, 2, 3])
nddata_in = NDData(data_in)
mydata_in = MyData(data_in)
assert wrapped_function_5(mydata_in, [1, 2, 3]) is data_in
with pytest.raises(TypeError, match="Only NDData sub-classes that inherit "
"from MyData can be used by this function"):
wrapped_function_5(nddata_in, [1, 2, 3])
def test_wrap_preserve_signature_docstring():
@support_nddata
def wrapped_function_6(data, wcs=None, unit=None):
"""
An awesome function
"""
pass
if wrapped_function_6.__doc__ is not None:
assert wrapped_function_6.__doc__.strip() == "An awesome function"
signature = inspect.signature(wrapped_function_6)
assert str(signature) == "(data, wcs=None, unit=None)"
def test_setup_failures1():
# repack but no returns
with pytest.raises(ValueError):
support_nddata(repack=True)
def test_setup_failures2():
# returns but no repack
with pytest.raises(ValueError):
support_nddata(returns=['data'])
def test_setup_failures9():
# keeps but no repack
with pytest.raises(ValueError):
support_nddata(keeps=['unit'])
def test_setup_failures3():
# same attribute in keeps and returns
with pytest.raises(ValueError):
support_nddata(repack=True, keeps=['mask'], returns=['data', 'mask'])
def test_setup_failures4():
# function accepts *args
with pytest.raises(ValueError):
@support_nddata
def test(data, *args):
pass
def test_setup_failures10():
# function accepts **kwargs
with pytest.raises(ValueError):
@support_nddata
def test(data, **kwargs):
pass
def test_setup_failures5():
# function accepts *args (or **kwargs)
with pytest.raises(ValueError):
@support_nddata
def test(data, *args):
pass
def test_setup_failures6():
# First argument is not data
with pytest.raises(ValueError):
@support_nddata
def test(img):
pass
def test_setup_failures7():
# accepts CCDData but was given just an NDData
with pytest.raises(TypeError):
@support_nddata(accepts=CCDData)
def test(data):
pass
test(NDData(np.ones((3, 3))))
def test_setup_failures8():
# function returns a different amount of arguments than specified. Using
# NDData here so we don't get into troubles when creating a CCDData without
# unit!
with pytest.raises(ValueError):
@support_nddata(repack=True, returns=['data', 'mask'])
def test(data):
return 10
test(NDData(np.ones((3, 3)))) # do NOT use CCDData here.
def test_setup_failures11():
# function accepts no arguments
with pytest.raises(ValueError):
@support_nddata
def test():
pass
def test_setup_numpyarray_default():
# It should be possible (even if it's not advisable to use mutable
# defaults) to have a numpy array as default value.
@support_nddata
def func(data, wcs=np.array([1, 2, 3])):
return wcs
def test_still_accepts_other_input():
@support_nddata(repack=True, returns=['data'])
def test(data):
return data
assert isinstance(test(NDData(np.ones((3, 3)))), NDData)
assert isinstance(test(10), int)
assert isinstance(test([1, 2, 3]), list)
def test_accepting_property_normal():
# Accepts a mask attribute and takes it from the input
@support_nddata
def test(data, mask=None):
return mask
ndd = NDData(np.ones((3, 3)))
assert test(ndd) is None
ndd._mask = np.zeros((3, 3))
assert np.all(test(ndd) == 0)
# Use the explicitly given one (raises a Warning)
with pytest.warns(AstropyUserWarning) as w:
assert test(ndd, mask=10) == 10
assert len(w) == 1
def test_parameter_default_identical_to_explicit_passed_argument():
# If the default is identical to the explicitly passed argument this
# should still raise a Warning and use the explicit one.
@support_nddata
def func(data, meta={'a': 1}):
return meta
with pytest.warns(AstropyUserWarning) as w:
assert func(NDData(1, meta={'b': 2}), {'a': 1}) == {'a': 1}
assert len(w) == 1
assert func(NDData(1, meta={'b': 2})) == {'b': 2}
def test_accepting_property_notexist():
# Accepts flags attribute but NDData doesn't have one
@support_nddata
def test(data, flags=10):
return flags
ndd = NDData(np.ones((3, 3)))
test(ndd)
def test_accepting_property_translated():
# Accepts a error attribute and we want to pass in uncertainty!
@support_nddata(mask='masked')
def test(data, masked=None):
return masked
ndd = NDData(np.ones((3, 3)))
assert test(ndd) is None
ndd._mask = np.zeros((3, 3))
assert np.all(test(ndd) == 0)
# Use the explicitly given one (raises a Warning)
with pytest.warns(AstropyUserWarning) as w:
assert test(ndd, masked=10) == 10
assert len(w) == 1
def test_accepting_property_meta_empty():
# Meta is always set (OrderedDict) so it has a special case that it's
# ignored if it's empty but not None
@support_nddata
def test(data, meta=None):
return meta
ndd = NDData(np.ones((3, 3)))
assert test(ndd) is None
ndd._meta = {'a': 10}
assert test(ndd) == {'a': 10}
|
|
import csv
import time
from collections import OrderedDict
from arff import arff
out_dir = ['']
def get_timestamp():
timestr = time.strftime("%Y%m%d-%H%M")
return timestr
def create_output_folders(windowlength):
import os
newpath = 'output/' + str(windowlength)
if not os.path.exists(newpath):
os.makedirs(newpath)
out_dir[0] = newpath
return newpath
def traverse(d, sep='_', _prefix=''):
assert isinstance(d, dict)
for k, v in d.items():
if isinstance(v, dict):
yield from traverse(v, sep, "{0}{1}{2}".format(_prefix, k, sep))
else:
yield ("{0}{1}".format(_prefix, k), v)
def flatten(d):
return dict(traverse(d))
def prepare_data(master_data_set):
array = []
for user, data in master_data_set.items():
for reminder in data:
acknowledged = reminder['acknowledged']
unixtime = reminder['unixtime']
sensors = reminder['sensors']
# values to store
reminder_data_dict = {}
reminder_data_dict['acknowledged'] = acknowledged
reminder_data_dict['unixtime'] = unixtime
reminder_data_dict['userid'] = user
# if sensors contains data
if sensors:
flat_sensors = flatten(sensors) # flatten the nested dictionaries
reminder_data_dict.update(flat_sensors) # update the reminder dictionary with new data
array.append(reminder_data_dict) # append as a row in the master array
return array
def write_data_to_disk(data, windowlength):
try:
csv_raw_filepath = write_to_csv(data, windowlength)
except Exception as e:
print(e)
else:
csv_ord_filepath = convert_raw_csv_to_ordered(csv_raw_filepath)
convert_ordered_csv_to_weka(csv_ord_filepath)
def write_to_csv(data, windowlength):
filepath = create_output_folders(windowlength) + '/raw.csv'
with open(filepath, 'w') as csvfile:
ordered_fieldnames = OrderedDict(data[0])
writer = csv.DictWriter(csvfile, fieldnames=ordered_fieldnames)
writer.writeheader()
for entry in data:
writer.writerow(entry)
return filepath
def update_acknowledged_label(number):
if number == '1':
return 'True'
elif number == '0':
return 'False'
else:
return 'Off'
def convert_raw_csv_to_ordered(csv_raw_filepath):
# read in previously generated csv feature list
# get the headers, and order them
filepath = out_dir[0] + '/ordered.csv'
with open(csv_raw_filepath) as csvfile:
reader = csv.DictReader(csvfile)
header = reader.fieldnames
header_sorted = sorted(header)
with open(filepath, 'w') as ordered_file:
fieldnames = header_sorted
writer = csv.DictWriter(ordered_file, fieldnames=fieldnames)
writer.writeheader()
for row in reader:
acknowledged = row['acknowledged']
row['acknowledged'] = update_acknowledged_label(acknowledged)
if row['acknowledged'] == 'Off':
# skip the file line
continue
else:
writer.writerow(row)
return filepath
def write_weka_file_for_cohort(data, attributes):
weka_data = {
'description': '',
'relation': 'sensors',
'attributes': attributes,
'data': data,
}
f = open(out_dir[0] + '/cohort.arff', 'w')
f.write(arff.dumps(weka_data))
f.close()
def write_weka_file_for_each_user(data, attributes):
all_users = {}
for row in data:
userid = row[len(row) - 1] # User ID is the last element in list
# Get User data from row and add to user dictionary
# try to get existing key, and append to the existing list in value
try:
user_list = all_users[userid]
user_list.append(row)
all_users[userid] = user_list
except KeyError:
# If doesn't exist, create the list
user_list = [row]
all_users[userid] = user_list
for user, userdata in all_users.items():
weka_data = {
'description': 'Data for ' + user,
'relation': 'SensorRecordings',
'attributes': attributes,
'data': userdata,
}
# Write Weka formatted file for entire cohort
f = open(out_dir[0] + '/' + user + '.arff', 'w')
f.write(arff.dumps(weka_data))
f.close()
def convert_ordered_csv_to_weka(csv_ord_filepath):
headers = []
data = []
attributes = []
with open(csv_ord_filepath) as csvfile:
readcsv = csv.reader(csvfile, delimiter=',')
row_count = 0
for row in readcsv:
if row_count == 0:
# Save headers for features
headers = row
row_count += 1
else:
data.append(row)
row_count += 1
# iterate the headings to correctly format the attribute types
for attribute in headers:
if attribute == 'acknowledged':
attributes.append(('class', ['True', 'False']))
elif attribute == 'userid':
attributes.append((attribute, 'STRING'))
elif attribute == 'unixtime':
attributes.append((attribute, 'STRING'))
else:
attributes.append((attribute, 'REAL'))
# Get index of acknowledged data
count_acknowledged = 0
count_missed = 0
acknowledged_index = headers.index("acknowledged")
for row in data:
if row[acknowledged_index] == 'True':
count_acknowledged += 1
else:
count_missed += 1
print('Total Reminders Saved: ' + str(count_acknowledged + count_missed))
print('Acknowledged: ' + str(count_acknowledged) + ' / Missed: ' + str(count_missed))
write_weka_file_for_cohort(data, attributes)
# Write Weka format file for each user
write_weka_file_for_each_user(data, attributes)
# FOR DEBUGGING
# # # Use pickle to import object saved to disk
# master_data_set = pickle.load(open('pickle.p', "rb"))
# results = prepare_data(master_data_set)
# write_data_to_disk(results)
|
|
from __future__ import unicode_literals
from django.contrib import admin
from calaccess_raw import models
from .base import BaseAdmin
class AcronymsCdAdmin(BaseAdmin):
list_display = ("acronym", "stands_for", "effect_dt", "a_desc")
date_hierarchy = 'effect_dt'
search_fields = ("acronym", "a_desc")
class AddressCdAdmin(BaseAdmin):
pass
class BallotMeasuresCdAdmin(BaseAdmin):
list_display = ("measure_name", "election_date", "jurisdiction")
list_filter = ("jurisdiction",)
search_fields = ("measure_name",)
class EfsFilingLogCdAdmin(BaseAdmin):
list_display = (
"id",
"filing_date",
"filingstatus",
"filer_id",
"vendor",
"form_type",
)
class FilersCdAdmin(BaseAdmin):
pass
class FilerAcronymsCdAdmin(BaseAdmin):
pass
class FilerAddressCdAdmin(BaseAdmin):
list_display = (
"filer_id",
"adrid",
"effect_dt",
"add_type"
)
class FilerEthicsClassCdAdmin(BaseAdmin):
pass
class FilerInterestsCdAdmin(BaseAdmin):
pass
@admin.register(models.FilerLinksCd)
class FilerLinksCdAdmin(BaseAdmin):
list_display = (
"filer_id_a",
"filer_id_b",
"link_type",
"active_flg",
"effect_dt",
"termination_dt",
)
list_filter = (
"active_flg",
"link_type"
)
search_fields = ("filer_id_a", "filer_id_b")
class FilerStatusTypesCdAdmin(BaseAdmin):
list_display = (
"status_type",
"status_desc"
)
class FilerToFilerTypeCdAdmin(BaseAdmin):
list_display = (
"filer_id",
"filer_type",
"effect_dt",
"active",
"session_id",
"race",
"district_cd",
"party_cd"
)
list_filter = (
"active",
"filer_type",
"category",
"sub_category",
"category_type",
"party_cd",
"session_id"
)
date_hierarchy = "effect_dt"
search_fields = (
"filer_id",
)
class FilerTypesCdAdmin(BaseAdmin):
list_display = (
"filer_type",
"description",
"grp_type",
"calc_use",
"grace_period",
)
class FilerXrefCdAdmin(BaseAdmin):
pass
class FilingPeriodCdAdmin(BaseAdmin):
list_display = (
"period_id", "start_date", "end_date", "period_desc",
)
search_fields = (
"period_id",
)
class GroupTypesCdAdmin(BaseAdmin):
pass
class HeaderCdAdmin(BaseAdmin):
pass
class HdrCdAdmin(BaseAdmin):
pass
class ImageLinksCdAdmin(BaseAdmin):
pass
class LegislativeSessionsCdAdmin(BaseAdmin):
pass
class LobbyingChgLogCdAdmin(BaseAdmin):
pass
class LobbyistContributions1CdAdmin(BaseAdmin):
pass
class LobbyistContributions2CdAdmin(BaseAdmin):
pass
class LobbyistContributions3CdAdmin(BaseAdmin):
pass
class LobbyistEmployer1CdAdmin(BaseAdmin):
pass
class LobbyistEmployer2CdAdmin(BaseAdmin):
pass
class LobbyistEmployer3CdAdmin(BaseAdmin):
pass
class LobbyistEmployerFirms1CdAdmin(BaseAdmin):
pass
class LobbyistEmployerFirms2CdAdmin(BaseAdmin):
pass
class LobbyistEmpLobbyist1CdAdmin(BaseAdmin):
pass
class LobbyistEmpLobbyist2CdAdmin(BaseAdmin):
pass
class LobbyistFirm1CdAdmin(BaseAdmin):
pass
class LobbyistFirm2CdAdmin(BaseAdmin):
pass
class LobbyistFirm3CdAdmin(BaseAdmin):
pass
class LobbyistFirmEmployer1CdAdmin(BaseAdmin):
pass
class LobbyistFirmEmployer2CdAdmin(BaseAdmin):
pass
class LobbyistFirmLobbyist1CdAdmin(BaseAdmin):
pass
class LobbyistFirmLobbyist2CdAdmin(BaseAdmin):
pass
class LookupCodeAdmin(BaseAdmin):
list_display = (
"code_type",
"code_id",
"code_desc",
)
list_filter = (
"code_type",
)
search_fields = (
"code_type",
"code_id",
"code_desc",
)
class NamesCdAdmin(BaseAdmin):
pass
class ReceivedFilingsCdAdmin(BaseAdmin):
pass
class ReportsCdAdmin(BaseAdmin):
pass
admin.site.register(models.AcronymsCd, AcronymsCdAdmin)
admin.site.register(models.AddressCd, AddressCdAdmin)
admin.site.register(models.BallotMeasuresCd, BallotMeasuresCdAdmin)
admin.site.register(models.EfsFilingLogCd, EfsFilingLogCdAdmin)
admin.site.register(models.FilersCd, FilersCdAdmin)
admin.site.register(models.FilerAcronymsCd, FilerAcronymsCdAdmin)
admin.site.register(models.FilerAddressCd, FilerAddressCdAdmin)
admin.site.register(models.FilerEthicsClassCd, FilerEthicsClassCdAdmin)
admin.site.register(models.FilerInterestsCd, FilerInterestsCdAdmin)
admin.site.register(models.FilerStatusTypesCd, FilerStatusTypesCdAdmin)
admin.site.register(models.FilerToFilerTypeCd, FilerToFilerTypeCdAdmin)
admin.site.register(models.FilerTypesCd, FilerTypesCdAdmin)
admin.site.register(models.FilerXrefCd, FilerXrefCdAdmin)
admin.site.register(models.FilingPeriodCd, FilingPeriodCdAdmin)
admin.site.register(models.GroupTypesCd, GroupTypesCdAdmin)
admin.site.register(models.HeaderCd, HeaderCdAdmin)
admin.site.register(models.HdrCd, HdrCdAdmin)
admin.site.register(models.ImageLinksCd, ImageLinksCdAdmin)
admin.site.register(models.LegislativeSessionsCd, LegislativeSessionsCdAdmin)
admin.site.register(models.LobbyingChgLogCd, LobbyingChgLogCdAdmin)
admin.site.register(
models.LobbyistContributions1Cd,
LobbyistContributions1CdAdmin
)
admin.site.register(
models.LobbyistContributions2Cd,
LobbyistContributions2CdAdmin
)
admin.site.register(
models.LobbyistContributions3Cd,
LobbyistContributions3CdAdmin
)
admin.site.register(models.LobbyistEmployer1Cd, LobbyistEmployer1CdAdmin)
admin.site.register(models.LobbyistEmployer2Cd, LobbyistEmployer2CdAdmin)
admin.site.register(models.LobbyistEmployer3Cd, LobbyistEmployer3CdAdmin)
admin.site.register(
models.LobbyistEmployerFirms1Cd,
LobbyistEmployerFirms1CdAdmin
)
admin.site.register(
models.LobbyistEmployerFirms2Cd,
LobbyistEmployerFirms2CdAdmin
)
admin.site.register(
models.LobbyistEmpLobbyist1Cd,
LobbyistEmpLobbyist1CdAdmin
)
admin.site.register(
models.LobbyistEmpLobbyist2Cd,
LobbyistEmpLobbyist2CdAdmin
)
admin.site.register(models.LobbyistFirm1Cd, LobbyistFirm1CdAdmin)
admin.site.register(models.LobbyistFirm2Cd, LobbyistFirm2CdAdmin)
admin.site.register(models.LobbyistFirm3Cd, LobbyistFirm3CdAdmin)
admin.site.register(
models.LobbyistFirmEmployer1Cd,
LobbyistFirmEmployer1CdAdmin
)
admin.site.register(
models.LobbyistFirmEmployer2Cd,
LobbyistFirmEmployer2CdAdmin
)
admin.site.register(
models.LobbyistFirmLobbyist1Cd,
LobbyistFirmLobbyist1CdAdmin
)
admin.site.register(
models.LobbyistFirmLobbyist2Cd,
LobbyistFirmLobbyist2CdAdmin
)
admin.site.register(models.LookupCode, LookupCodeAdmin)
admin.site.register(models.NamesCd, NamesCdAdmin)
admin.site.register(models.ReceivedFilingsCd, ReceivedFilingsCdAdmin)
admin.site.register(models.ReportsCd, ReportsCdAdmin)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import sys
import tempfile
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer as sdca_optimizer_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import server_lib
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = base.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = base.Dataset(data=iris.data[ids], target=iris.target[ids])
return iris
class LinearClassifierTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearClassifier(
n_classes=3, feature_columns=cont_features),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearClassifier)
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testJointTrain(self):
"""Tests that loss goes down with training with joint weights."""
def input_fn():
return {
'age':
sparse_tensor.SparseTensor(
values=['1'], indices=[[0, 0]], dense_shape=[1, 1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.sparse_column_with_hash_bucket('age', 2)
classifier = linear.LinearClassifier(
_joint_weight=True, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_column = feature_column_lib.real_valued_column('', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100, 1], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column_lib.real_valued_column('', dimension=4)]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertEqual(4, len(classifier.weights_))
self.assertEqual(3, len(classifier.bias_))
def testCustomOptimizerByObject(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
def _optimizer():
return ftrl.FtrlOptimizer(learning_rate=0.1)
classifier = linear.LinearClassifier(
n_classes=3, optimizer=_optimizer, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, optimizer='Ftrl', feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]], dtype=dtypes.float32)
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = linear.LinearClassifier(
feature_columns=[feature_column_lib.real_valued_column('x')])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Tests the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(KeyError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
def testLogisticFractionalLabels(self):
"""Tests logistic training with fractional labels."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([[1], [2]]), num_epochs=num_epochs),
}, constant_op.constant(
[[.7], [0]], dtype=dtypes.float32)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age], config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=500)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
# Prediction probabilities mirror the labels column, which proves that the
# classifier learns from float input.
self.assertAllClose([[.3, .7], [1., 0.]], predictions_proba, atol=.1)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = linear.LinearClassifier(
feature_columns=sparse_features, config=config)
classifier.fit(input_fn=_input_fn, steps=200)
loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([1]), num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1]),
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
model_dir = tempfile.mkdtemp()
classifier = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=30)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
out1_class = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out1_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
del classifier
classifier2 = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
out2_class = list(
classifier2.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out2_proba = list(
classifier2.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertTrue(np.array_equal(out1_class, out2_class))
self.assertTrue(np.array_equal(out1_proba, out2_proba))
def testWeightColumn(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = linear.LinearClassifier(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# All examples in eval data set are y=x.
self.assertGreater(scores['labels/actual_label_mean'], 0.9)
# If there were no weight column, model would learn y=Not(x). Because of
# weights, it learns y=x.
self.assertGreater(scores['labels/prediction_mean'], 0.9)
# All examples in eval data set are y=x. So if weight column were ignored,
# then accuracy would be zero. Because of weights, accuracy should be close
# to 1.0.
self.assertGreater(scores['accuracy'], 0.9)
scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Considering weights, the mean label should be close to 1.0.
# If weights were ignored, it would be 0.25.
self.assertGreater(scores_train_set['labels/actual_label_mean'], 0.9)
# The classifier has learned y=x. If weight column were ignored in
# evaluation, then accuracy for the train set would be 0.25.
# Because weight is not ignored, accuracy is greater than 0.6.
self.assertGreater(scores_train_set['accuracy'], 0.6)
def testWeightColumnLoss(self):
"""Test ensures that you can specify per-example weights for loss."""
def _input_fn():
features = {
'age': constant_op.constant([[20], [20], [20]]),
'weights': constant_op.constant([[100], [1], [1]]),
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age])
classifier.fit(input_fn=_input_fn, steps=100)
loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
classifier = linear.LinearClassifier(
feature_columns=[age], weight_column_name='weights')
classifier.fit(input_fn=_input_fn, steps=100)
loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss_weighted, loss_unweighted)
def testExport(self):
"""Tests that export model for servo works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=True)
classifier.fit(input_fn=input_fn, steps=100)
self.assertIn('linear/binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['hindi'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier_no_reg = linear.LinearClassifier(feature_columns=[language])
classifier_with_reg = linear.LinearClassifier(
feature_columns=[language],
optimizer=ftrl.FtrlOptimizer(
learning_rate=1.0, l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(input_fn=input_fn,
steps=100).evaluate(
input_fn=input_fn,
steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[1], [1], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier = linear.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2']),
'maintenance_cost': constant_op.constant([[500.0], [200.0]]),
'sq_footage': constant_op.constant([[800.0], [600.0]]),
'weights': constant_op.constant([[1.0], [1.0]])
}, constant_op.constant([[0], [1]])
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):
"""Tests SDCAOptimizer with real valued features of higher dimension."""
# input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures
# where 2 1-dimensional dense features have been replaced by 1 2-dimensional
# feature.
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2']),
'dense_feature':
constant_op.constant([[500.0, 800.0], [200.0, 600.0]])
}, constant_op.constant([[0], [1]])
dense_feature = feature_column_lib.real_valued_column(
'dense_feature', dimension=2)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[dense_feature], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'price': constant_op.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': constant_op.constant([[1000.0], [600.0], [700.0]]),
'weights': constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'), boundaries=[650.0])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
classifier = linear.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClasssifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
sparse_tensor.SparseTensor(
values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, constant_op.constant([[1], [0], [1]])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = feature_column_lib.weighted_sparse_column(
country, 'price')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'language':
sparse_tensor.SparseTensor(
values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'country':
sparse_tensor.SparseTensor(
values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[0], [0], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = feature_column_lib.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_language], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age':
constant_op.constant([[1], [2]]),
'language':
sparse_tensor.SparseTensor(
values=['greek', 'chinese'],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}, constant_op.constant([[1], [0]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
# Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearRegressor(feature_columns=cont_features),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, linear.LinearRegressor)
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearRegressor(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
regressor = linear.LinearRegressor(
feature_columns=cont_features,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = regressor.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.1)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = list(
regressor.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(
regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = linear.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = linear.LinearRegressor(
model_dir=model_dir, feature_columns=feature_columns)
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7),
feature_column_lib.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = linear.LinearRegressor(
feature_columns=feature_columns, config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = estimator.infer_real_valued_columns_from_input(x)
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.8))
regressor.fit(x, y, batch_size=64, steps=2000)
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor.weights_.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def testSdcaOptimizerRealValuedLinearFeatures(self):
"""Tests LinearRegressor with SDCAOptimizer and real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'x': constant_op.constant(x),
'weights': constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant(y)
x_column = feature_column_lib.real_valued_column('x', dimension=3)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[x_column],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertAllClose(
[w[0] for w in weights], regressor.weights_.flatten(), rtol=0.1)
def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):
"""Tests LinearRegressor with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant([[1.4], [-0.8], [2.6]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
no_l1_reg_weights = regressor.weights_
# Regressor with L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l1_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = regressor.weights_
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have smaller
# L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testSdcaOptimizerBiasOnly(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder':
constant_op.constant([[0.0]] * num_examples),
}, constant_op.constant(
[[1 if i % 4 is 0 else 0] for i in range(num_examples)])
place_holder = feature_column_lib.real_valued_column('place_holder')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[place_holder], optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.25, err=0.1)
def testSdcaOptimizerBiasAndOtherColumns(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant(
[[x]
for x in [1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half / 10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half / 10)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=200)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(regressor.weights_['linear/a/weight'][0], 0.2, err=0.05)
self.assertNear(regressor.weights_['linear/b/weight'][0], 0.0, err=0.05)
def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant([[1 if x % 10 == 0 else 0] for x in range(half)] +
[[-1 if x % 10 == 0 else 0] for x in range(half)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(regressor.weights_['linear/a/weight'][0], 0.1, err=0.05)
self.assertNear(regressor.weights_['linear/b/weight'][0], -0.1, err=0.05)
class LinearEstimatorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear._LinearEstimator(feature_columns=cont_features,
head=head_lib._regression_head()),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear._LinearEstimator)
def testLinearRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear._LinearEstimator(feature_columns=[age, language],
head=head_lib._regression_head())
linear_estimator.fit(input_fn=input_fn, steps=100)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=400)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testPoissonRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear._LinearEstimator(
feature_columns=[age, language],
head=head_lib._poisson_regression_head())
linear_estimator.fit(input_fn=input_fn, steps=10)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=100)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
# Here loss of 2.1 implies a prediction of ~9.9998
self.assertLess(loss2, 2.1)
def testSDCANotSupported(self):
"""Tests that we detect error for SDCA."""
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
with self.assertRaises(ValueError):
linear._LinearEstimator(
head=head_lib._regression_head(label_dimension=1),
feature_columns=[maintenance_cost, sq_footage],
optimizer=sdca_optimizer,
_joint_weights=True)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = linear.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
|
|
"""Component to interface with various media players."""
import asyncio
import base64
import collections
from datetime import timedelta
import functools as ft
import hashlib
import logging
from random import SystemRandom
from typing import Optional
from urllib.parse import urlparse
from aiohttp import web
from aiohttp.hdrs import CACHE_CONTROL, CONTENT_TYPE
import async_timeout
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.components.http import KEY_AUTHENTICATED, HomeAssistantView
from homeassistant.const import (
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_SEEK,
SERVICE_MEDIA_STOP,
SERVICE_SHUFFLE_SET,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
STATE_IDLE,
STATE_OFF,
STATE_PLAYING,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.loader import bind_hass
from .const import (
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_ENQUEUE,
ATTR_MEDIA_EPISODE,
ATTR_MEDIA_PLAYLIST,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_SEASON,
ATTR_MEDIA_SEEK_POSITION,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_SHUFFLE,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_SOUND_MODE,
ATTR_SOUND_MODE_LIST,
DOMAIN,
SERVICE_CLEAR_PLAYLIST,
SERVICE_PLAY_MEDIA,
SERVICE_SELECT_SOUND_MODE,
SERVICE_SELECT_SOURCE,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_SELECT_SOUND_MODE,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
_RND = SystemRandom()
ENTITY_ID_FORMAT = DOMAIN + ".{}"
ENTITY_IMAGE_URL = "/api/media_player_proxy/{0}?token={1}&cache={2}"
CACHE_IMAGES = "images"
CACHE_MAXSIZE = "maxsize"
CACHE_LOCK = "lock"
CACHE_URL = "url"
CACHE_CONTENT = "content"
ENTITY_IMAGE_CACHE = {CACHE_IMAGES: collections.OrderedDict(), CACHE_MAXSIZE: 16}
SCAN_INTERVAL = timedelta(seconds=10)
DEVICE_CLASS_TV = "tv"
DEVICE_CLASS_SPEAKER = "speaker"
DEVICE_CLASSES = [DEVICE_CLASS_TV, DEVICE_CLASS_SPEAKER]
DEVICE_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(DEVICE_CLASSES))
MEDIA_PLAYER_PLAY_MEDIA_SCHEMA = {
vol.Required(ATTR_MEDIA_CONTENT_TYPE): cv.string,
vol.Required(ATTR_MEDIA_CONTENT_ID): cv.string,
vol.Optional(ATTR_MEDIA_ENQUEUE): cv.boolean,
}
ATTR_TO_PROPERTY = [
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_SEASON,
ATTR_MEDIA_EPISODE,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_PLAYLIST,
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_INPUT_SOURCE,
ATTR_SOUND_MODE,
ATTR_MEDIA_SHUFFLE,
]
@bind_hass
def is_on(hass, entity_id=None):
"""
Return true if specified media player entity_id is on.
Check all media player if no entity_id specified.
"""
entity_ids = [entity_id] if entity_id else hass.states.entity_ids(DOMAIN)
return any(
not hass.states.is_state(entity_id, STATE_OFF) for entity_id in entity_ids
)
WS_TYPE_MEDIA_PLAYER_THUMBNAIL = "media_player_thumbnail"
SCHEMA_WEBSOCKET_GET_THUMBNAIL = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{"type": WS_TYPE_MEDIA_PLAYER_THUMBNAIL, "entity_id": cv.entity_id}
)
def _rename_keys(**keys):
"""Create validator that renames keys.
Necessary because the service schema names do not match the command parameters.
Async friendly.
"""
def rename(value):
for to_key, from_key in keys.items():
if from_key in value:
value[to_key] = value.pop(from_key)
return value
return rename
async def async_setup(hass, config):
"""Track states and offer events for media_players."""
component = hass.data[DOMAIN] = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL
)
hass.components.websocket_api.async_register_command(
WS_TYPE_MEDIA_PLAYER_THUMBNAIL,
websocket_handle_thumbnail,
SCHEMA_WEBSOCKET_GET_THUMBNAIL,
)
hass.http.register_view(MediaPlayerImageView(component))
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_TURN_ON, {}, "async_turn_on", [SUPPORT_TURN_ON]
)
component.async_register_entity_service(
SERVICE_TURN_OFF, {}, "async_turn_off", [SUPPORT_TURN_OFF]
)
component.async_register_entity_service(
SERVICE_TOGGLE, {}, "async_toggle", [SUPPORT_TURN_OFF | SUPPORT_TURN_ON],
)
component.async_register_entity_service(
SERVICE_VOLUME_UP,
{},
"async_volume_up",
[SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP],
)
component.async_register_entity_service(
SERVICE_VOLUME_DOWN,
{},
"async_volume_down",
[SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP],
)
component.async_register_entity_service(
SERVICE_MEDIA_PLAY_PAUSE,
{},
"async_media_play_pause",
[SUPPORT_PLAY | SUPPORT_PAUSE],
)
component.async_register_entity_service(
SERVICE_MEDIA_PLAY, {}, "async_media_play", [SUPPORT_PLAY]
)
component.async_register_entity_service(
SERVICE_MEDIA_PAUSE, {}, "async_media_pause", [SUPPORT_PAUSE]
)
component.async_register_entity_service(
SERVICE_MEDIA_STOP, {}, "async_media_stop", [SUPPORT_STOP]
)
component.async_register_entity_service(
SERVICE_MEDIA_NEXT_TRACK, {}, "async_media_next_track", [SUPPORT_NEXT_TRACK],
)
component.async_register_entity_service(
SERVICE_MEDIA_PREVIOUS_TRACK,
{},
"async_media_previous_track",
[SUPPORT_PREVIOUS_TRACK],
)
component.async_register_entity_service(
SERVICE_CLEAR_PLAYLIST, {}, "async_clear_playlist", [SUPPORT_CLEAR_PLAYLIST],
)
component.async_register_entity_service(
SERVICE_VOLUME_SET,
vol.All(
cv.make_entity_service_schema(
{vol.Required(ATTR_MEDIA_VOLUME_LEVEL): cv.small_float}
),
_rename_keys(volume=ATTR_MEDIA_VOLUME_LEVEL),
),
"async_set_volume_level",
[SUPPORT_VOLUME_SET],
)
component.async_register_entity_service(
SERVICE_VOLUME_MUTE,
vol.All(
cv.make_entity_service_schema(
{vol.Required(ATTR_MEDIA_VOLUME_MUTED): cv.boolean}
),
_rename_keys(mute=ATTR_MEDIA_VOLUME_MUTED),
),
"async_mute_volume",
[SUPPORT_VOLUME_MUTE],
)
component.async_register_entity_service(
SERVICE_MEDIA_SEEK,
vol.All(
cv.make_entity_service_schema(
{
vol.Required(ATTR_MEDIA_SEEK_POSITION): vol.All(
vol.Coerce(float), vol.Range(min=0)
)
}
),
_rename_keys(position=ATTR_MEDIA_SEEK_POSITION),
),
"async_media_seek",
[SUPPORT_SEEK],
)
component.async_register_entity_service(
SERVICE_SELECT_SOURCE,
{vol.Required(ATTR_INPUT_SOURCE): cv.string},
"async_select_source",
[SUPPORT_SELECT_SOURCE],
)
component.async_register_entity_service(
SERVICE_SELECT_SOUND_MODE,
{vol.Required(ATTR_SOUND_MODE): cv.string},
"async_select_sound_mode",
[SUPPORT_SELECT_SOUND_MODE],
)
component.async_register_entity_service(
SERVICE_PLAY_MEDIA,
vol.All(
cv.make_entity_service_schema(MEDIA_PLAYER_PLAY_MEDIA_SCHEMA),
_rename_keys(
media_type=ATTR_MEDIA_CONTENT_TYPE,
media_id=ATTR_MEDIA_CONTENT_ID,
enqueue=ATTR_MEDIA_ENQUEUE,
),
),
"async_play_media",
[SUPPORT_PLAY_MEDIA],
)
component.async_register_entity_service(
SERVICE_SHUFFLE_SET,
{vol.Required(ATTR_MEDIA_SHUFFLE): cv.boolean},
"async_set_shuffle",
[SUPPORT_SHUFFLE_SET],
)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class MediaPlayerDevice(Entity):
"""ABC for media player devices."""
_access_token: Optional[str] = None
# Implement these for your media player
@property
def state(self):
"""State of the player."""
return None
@property
def access_token(self) -> str:
"""Access token for this media player."""
if self._access_token is None:
self._access_token = hashlib.sha256(
_RND.getrandbits(256).to_bytes(32, "little")
).hexdigest()
return self._access_token
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return None
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return None
@property
def media_content_id(self):
"""Content ID of current playing media."""
return None
@property
def media_content_type(self):
"""Content type of current playing media."""
return None
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return None
@property
def media_position(self):
"""Position of current playing media in seconds."""
return None
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
return None
@property
def media_image_url(self):
"""Image url of current playing media."""
return None
@property
def media_image_remotely_accessible(self) -> bool:
"""If the image url is remotely accessible."""
return False
@property
def media_image_hash(self):
"""Hash value for media image."""
url = self.media_image_url
if url is not None:
return hashlib.sha256(url.encode("utf-8")).hexdigest()[:16]
return None
async def async_get_media_image(self):
"""Fetch media image of current playing image."""
url = self.media_image_url
if url is None:
return None, None
return await _async_fetch_image(self.hass, url)
@property
def media_title(self):
"""Title of current playing media."""
return None
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return None
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return None
@property
def media_album_artist(self):
"""Album artist of current playing media, music track only."""
return None
@property
def media_track(self):
"""Track number of current playing media, music track only."""
return None
@property
def media_series_title(self):
"""Title of series of current playing media, TV show only."""
return None
@property
def media_season(self):
"""Season of current playing media, TV show only."""
return None
@property
def media_episode(self):
"""Episode of current playing media, TV show only."""
return None
@property
def media_channel(self):
"""Channel currently playing."""
return None
@property
def media_playlist(self):
"""Title of Playlist currently playing."""
return None
@property
def app_id(self):
"""ID of the current running app."""
return None
@property
def app_name(self):
"""Name of the current running app."""
return None
@property
def source(self):
"""Name of the current input source."""
return None
@property
def source_list(self):
"""List of available input sources."""
return None
@property
def sound_mode(self):
"""Name of the current sound mode."""
return None
@property
def sound_mode_list(self):
"""List of available sound modes."""
return None
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return None
@property
def supported_features(self):
"""Flag media player features that are supported."""
return 0
def turn_on(self):
"""Turn the media player on."""
raise NotImplementedError()
async def async_turn_on(self):
"""Turn the media player on."""
await self.hass.async_add_job(self.turn_on)
def turn_off(self):
"""Turn the media player off."""
raise NotImplementedError()
async def async_turn_off(self):
"""Turn the media player off."""
await self.hass.async_add_job(self.turn_off)
def mute_volume(self, mute):
"""Mute the volume."""
raise NotImplementedError()
async def async_mute_volume(self, mute):
"""Mute the volume."""
await self.hass.async_add_job(self.mute_volume, mute)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
raise NotImplementedError()
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self.hass.async_add_job(self.set_volume_level, volume)
def media_play(self):
"""Send play command."""
raise NotImplementedError()
async def async_media_play(self):
"""Send play command."""
await self.hass.async_add_job(self.media_play)
def media_pause(self):
"""Send pause command."""
raise NotImplementedError()
async def async_media_pause(self):
"""Send pause command."""
await self.hass.async_add_job(self.media_pause)
def media_stop(self):
"""Send stop command."""
raise NotImplementedError()
async def async_media_stop(self):
"""Send stop command."""
await self.hass.async_add_job(self.media_stop)
def media_previous_track(self):
"""Send previous track command."""
raise NotImplementedError()
async def async_media_previous_track(self):
"""Send previous track command."""
await self.hass.async_add_job(self.media_previous_track)
def media_next_track(self):
"""Send next track command."""
raise NotImplementedError()
async def async_media_next_track(self):
"""Send next track command."""
await self.hass.async_add_job(self.media_next_track)
def media_seek(self, position):
"""Send seek command."""
raise NotImplementedError()
async def async_media_seek(self, position):
"""Send seek command."""
await self.hass.async_add_job(self.media_seek, position)
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
raise NotImplementedError()
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
await self.hass.async_add_job(
ft.partial(self.play_media, media_type, media_id, **kwargs)
)
def select_source(self, source):
"""Select input source."""
raise NotImplementedError()
async def async_select_source(self, source):
"""Select input source."""
await self.hass.async_add_job(self.select_source, source)
def select_sound_mode(self, sound_mode):
"""Select sound mode."""
raise NotImplementedError()
async def async_select_sound_mode(self, sound_mode):
"""Select sound mode."""
await self.hass.async_add_job(self.select_sound_mode, sound_mode)
def clear_playlist(self):
"""Clear players playlist."""
raise NotImplementedError()
async def async_clear_playlist(self):
"""Clear players playlist."""
await self.hass.async_add_job(self.clear_playlist)
def set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
raise NotImplementedError()
async def async_set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
await self.hass.async_add_job(self.set_shuffle, shuffle)
# No need to overwrite these.
@property
def support_play(self):
"""Boolean if play is supported."""
return bool(self.supported_features & SUPPORT_PLAY)
@property
def support_pause(self):
"""Boolean if pause is supported."""
return bool(self.supported_features & SUPPORT_PAUSE)
@property
def support_stop(self):
"""Boolean if stop is supported."""
return bool(self.supported_features & SUPPORT_STOP)
@property
def support_seek(self):
"""Boolean if seek is supported."""
return bool(self.supported_features & SUPPORT_SEEK)
@property
def support_volume_set(self):
"""Boolean if setting volume is supported."""
return bool(self.supported_features & SUPPORT_VOLUME_SET)
@property
def support_volume_mute(self):
"""Boolean if muting volume is supported."""
return bool(self.supported_features & SUPPORT_VOLUME_MUTE)
@property
def support_previous_track(self):
"""Boolean if previous track command supported."""
return bool(self.supported_features & SUPPORT_PREVIOUS_TRACK)
@property
def support_next_track(self):
"""Boolean if next track command supported."""
return bool(self.supported_features & SUPPORT_NEXT_TRACK)
@property
def support_play_media(self):
"""Boolean if play media command supported."""
return bool(self.supported_features & SUPPORT_PLAY_MEDIA)
@property
def support_select_source(self):
"""Boolean if select source command supported."""
return bool(self.supported_features & SUPPORT_SELECT_SOURCE)
@property
def support_select_sound_mode(self):
"""Boolean if select sound mode command supported."""
return bool(self.supported_features & SUPPORT_SELECT_SOUND_MODE)
@property
def support_clear_playlist(self):
"""Boolean if clear playlist command supported."""
return bool(self.supported_features & SUPPORT_CLEAR_PLAYLIST)
@property
def support_shuffle_set(self):
"""Boolean if shuffle is supported."""
return bool(self.supported_features & SUPPORT_SHUFFLE_SET)
async def async_toggle(self):
"""Toggle the power on the media player."""
if hasattr(self, "toggle"):
# pylint: disable=no-member
await self.hass.async_add_job(self.toggle)
return
if self.state in [STATE_OFF, STATE_IDLE]:
await self.async_turn_on()
else:
await self.async_turn_off()
async def async_volume_up(self):
"""Turn volume up for media player.
This method is a coroutine.
"""
if hasattr(self, "volume_up"):
# pylint: disable=no-member
await self.hass.async_add_job(self.volume_up)
return
if self.volume_level < 1 and self.supported_features & SUPPORT_VOLUME_SET:
await self.async_set_volume_level(min(1, self.volume_level + 0.1))
async def async_volume_down(self):
"""Turn volume down for media player.
This method is a coroutine.
"""
if hasattr(self, "volume_down"):
# pylint: disable=no-member
await self.hass.async_add_job(self.volume_down)
return
if self.volume_level > 0 and self.supported_features & SUPPORT_VOLUME_SET:
await self.async_set_volume_level(max(0, self.volume_level - 0.1))
async def async_media_play_pause(self):
"""Play or pause the media player."""
if hasattr(self, "media_play_pause"):
# pylint: disable=no-member
await self.hass.async_add_job(self.media_play_pause)
return
if self.state == STATE_PLAYING:
await self.async_media_pause()
else:
await self.async_media_play()
@property
def entity_picture(self):
"""Return image of the media playing."""
if self.state == STATE_OFF:
return None
if self.media_image_remotely_accessible:
return self.media_image_url
image_hash = self.media_image_hash
if image_hash is None:
return None
return ENTITY_IMAGE_URL.format(self.entity_id, self.access_token, image_hash)
@property
def capability_attributes(self):
"""Return capability attributes."""
supported_features = self.supported_features or 0
data = {}
if supported_features & SUPPORT_SELECT_SOURCE:
source_list = self.source_list
if source_list:
data[ATTR_INPUT_SOURCE_LIST] = source_list
if supported_features & SUPPORT_SELECT_SOUND_MODE:
sound_mode_list = self.sound_mode_list
if sound_mode_list:
data[ATTR_SOUND_MODE_LIST] = sound_mode_list
return data
@property
def state_attributes(self):
"""Return the state attributes."""
if self.state == STATE_OFF:
return None
state_attr = {
attr: getattr(self, attr)
for attr in ATTR_TO_PROPERTY
if getattr(self, attr) is not None
}
return state_attr
async def _async_fetch_image(hass, url):
"""Fetch image.
Images are cached in memory (the images are typically 10-100kB in size).
"""
cache_images = ENTITY_IMAGE_CACHE[CACHE_IMAGES]
cache_maxsize = ENTITY_IMAGE_CACHE[CACHE_MAXSIZE]
if urlparse(url).hostname is None:
url = hass.config.api.base_url + url
if url not in cache_images:
cache_images[url] = {CACHE_LOCK: asyncio.Lock()}
async with cache_images[url][CACHE_LOCK]:
if CACHE_CONTENT in cache_images[url]:
return cache_images[url][CACHE_CONTENT]
content, content_type = (None, None)
websession = async_get_clientsession(hass)
try:
with async_timeout.timeout(10):
response = await websession.get(url)
if response.status == 200:
content = await response.read()
content_type = response.headers.get(CONTENT_TYPE)
if content_type:
content_type = content_type.split(";")[0]
cache_images[url][CACHE_CONTENT] = content, content_type
except asyncio.TimeoutError:
pass
while len(cache_images) > cache_maxsize:
cache_images.popitem(last=False)
return content, content_type
class MediaPlayerImageView(HomeAssistantView):
"""Media player view to serve an image."""
requires_auth = False
url = "/api/media_player_proxy/{entity_id}"
name = "api:media_player:image"
def __init__(self, component):
"""Initialize a media player view."""
self.component = component
async def get(self, request, entity_id):
"""Start a get request."""
player = self.component.get_entity(entity_id)
if player is None:
status = 404 if request[KEY_AUTHENTICATED] else 401
return web.Response(status=status)
authenticated = (
request[KEY_AUTHENTICATED]
or request.query.get("token") == player.access_token
)
if not authenticated:
return web.Response(status=401)
if player.media_image_remotely_accessible:
url = player.media_image_url
if url is not None:
return web.Response(status=302, headers={"location": url})
return web.Response(status=500)
data, content_type = await player.async_get_media_image()
if data is None:
return web.Response(status=500)
headers = {CACHE_CONTROL: "max-age=3600"}
return web.Response(body=data, content_type=content_type, headers=headers)
@websocket_api.async_response
async def websocket_handle_thumbnail(hass, connection, msg):
"""Handle get media player cover command.
Async friendly.
"""
component = hass.data[DOMAIN]
player = component.get_entity(msg["entity_id"])
if player is None:
connection.send_message(
websocket_api.error_message(
msg["id"], "entity_not_found", "Entity not found"
)
)
return
data, content_type = await player.async_get_media_image()
if data is None:
connection.send_message(
websocket_api.error_message(
msg["id"], "thumbnail_fetch_failed", "Failed to fetch thumbnail"
)
)
return
await connection.send_big_result(
msg["id"],
{
"content_type": content_type,
"content": base64.b64encode(data).decode("utf-8"),
},
)
|
|
#!/usr/bin/env python
"""AFF4 object representing grr users."""
import crypt
import random
import string
import time
from grr.lib import aff4
from grr.lib import rdfvalue
from grr.proto import flows_pb2
from grr.proto import jobs_pb2
class GlobalNotification(rdfvalue.RDFProtoStruct):
"""Global notification shown to all the users of GRR."""
protobuf = jobs_pb2.GlobalNotification
def __init__(self, *args, **kwargs):
super(GlobalNotification, self).__init__(*args, **kwargs)
if not self.duration:
self.duration = rdfvalue.Duration("2w")
if not self.show_from:
self.show_from = rdfvalue.RDFDatetime().Now()
@property
def hash(self):
"""Having hash property makes things easier in Django templates."""
return hash(self)
@property
def type_name(self):
return self.Type.reverse_enum[self.type]
class GlobalNotificationSet(rdfvalue.RDFProtoStruct):
"""A set of global notifications: one notification per notification's type."""
protobuf = jobs_pb2.GlobalNotificationSet
def AddNotification(self, new_notification):
"""Adds new notification to the set.
There can be only one notification of particular type (info, warning,
error) in the set. Notifications are guaranteed to be stored in the
order of their priority.
Args:
new_notification: New notification to add.
"""
current_list = [notification for notification in self.notifications
if notification.type != new_notification.type]
current_list.append(new_notification)
current_list = sorted(current_list, key=lambda x: x.type)
self.notifications = current_list
def __iter__(self):
for notification in self.notifications:
yield notification
def __contains__(self, notification):
return notification in self.notifications
class GlobalNotificationStorage(aff4.AFF4Object):
"""Object that stores GRR's GlobalNotifications."""
DEFAULT_PATH = rdfvalue.RDFURN("aff4:/config/global_notifications")
class SchemaCls(aff4.AFF4Object.SchemaCls):
"""Schema for GlobalNotificationsManager."""
NOTIFICATIONS = aff4.Attribute(
"aff4:global_notification_storage/notifications", GlobalNotificationSet,
"List of currently active notifications", versioned=False)
def AddNotification(self, new_notification):
"""Adds new notification to the set."""
current_set = self.GetNotifications()
current_set.AddNotification(new_notification)
self.Set(self.Schema.NOTIFICATIONS, current_set)
def GetNotifications(self):
return self.Get(self.Schema.NOTIFICATIONS, default=GlobalNotificationSet())
class CryptedPassword(rdfvalue.RDFString):
"""Encoded crypted password."""
def _CalculateHash(self, password, salt=None):
# Note: As of python 3.3. there is a function to do this, but we do our
# own for backwards compatibility.
valid_salt_chars = string.ascii_letters + string.digits + "./"
if salt is None:
salt = "".join(random.choice(valid_salt_chars) for i in range(2))
return crypt.crypt(password, salt)
def SetPassword(self, password, salt=None):
self._value = self._CalculateHash(password, salt=salt)
return self
def CheckPassword(self, password):
salt = self._value[:2]
return self._CalculateHash(password, salt=salt) == self._value
class GUISettings(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.GUISettings
class GRRUser(aff4.AFF4Object):
"""An AFF4 object modeling a GRR User."""
# URN of the index for labels for users.
labels_index_urn = rdfvalue.RDFURN("aff4:/index/labels/users")
SYSTEM_USERS = set(["GRRWorker", "GRREnroller", "GRRCron", "test"])
class SchemaCls(aff4.AFF4Object.SchemaCls):
"""Schema for GRRUser."""
PENDING_NOTIFICATIONS = aff4.Attribute(
"aff4:notification/pending", rdfvalue.NotificationList,
"The notifications pending for the user.", default="",
versioned=False)
SHOWN_NOTIFICATIONS = aff4.Attribute(
"aff4:notifications/shown", rdfvalue.NotificationList,
"Notifications already shown to the user.", default="",
versioned=False)
SHOWN_GLOBAL_NOTIFICATIONS = aff4.Attribute(
"aff4:global_notification/timestamp_list", GlobalNotificationSet,
"Global notifications shown to this user.",
default=GlobalNotificationSet(), versioned=False)
GUI_SETTINGS = aff4.Attribute(
"aff4:gui/settings", rdfvalue.GUISettings,
"GUI Settings", default="")
PASSWORD = aff4.Attribute(
"aff4:user/password", CryptedPassword,
"Encrypted Password for the user")
def Notify(self, message_type, subject, msg, source):
"""Send a notification to the user in the UI.
Args:
message_type: One of aff4_grr.Notification.notification_types e.g.
"ViewObject", "HostInformation", "GrantAccess".
subject: The subject to use, normally a URN.
msg: The message to display.
source: The class doing the notification.
Raises:
TypeError: On invalid message_type.
"""
pending = self.Get(self.Schema.PENDING_NOTIFICATIONS)
if pending is None:
pending = self.Schema.PENDING_NOTIFICATIONS()
if message_type not in rdfvalue.Notification.notification_types:
raise TypeError("Invalid notification type %s" % message_type)
pending.Append(type=message_type, subject=subject, message=msg,
source=source, timestamp=long(time.time() * 1e6))
# Limit the notification to 50, expiring older notifications.
while len(pending) > 50:
pending.Pop(0)
self.Set(self.Schema.PENDING_NOTIFICATIONS, pending)
def ShowNotifications(self, reset=True):
"""A generator of current notifications."""
shown_notifications = self.Schema.SHOWN_NOTIFICATIONS()
# Pending notifications first
pending = self.Get(self.Schema.PENDING_NOTIFICATIONS)
for notification in pending:
shown_notifications.Append(notification)
notifications = self.Get(self.Schema.SHOWN_NOTIFICATIONS)
for notification in notifications:
shown_notifications.Append(notification)
# Shall we reset the pending notification state?
if reset:
self.Set(shown_notifications)
self.Set(self.Schema.PENDING_NOTIFICATIONS())
self.Flush()
return shown_notifications
def Describe(self):
"""Return a description of this user."""
result = ["\nUsername: %s" % self.urn.Basename()]
fd = aff4.FACTORY.Open(self.urn.Add("labels"), token=self.token)
labels = [str(x) for x in fd.Get(fd.Schema.LABEL, [])]
result.append("Labels: %s" % ",".join(labels))
if self.Get(self.Schema.PASSWORD) is None:
result.append("Password: not set")
else:
result.append("Password: set")
return "\n".join(result)
def SetPassword(self, password):
self.Set(self.Schema.PASSWORD().SetPassword(password))
def CheckPassword(self, password):
password_obj = self.Get(self.Schema.PASSWORD)
return password_obj and password_obj.CheckPassword(password)
def GetPendingGlobalNotifications(self):
storage = aff4.FACTORY.Create(GlobalNotificationStorage.DEFAULT_PATH,
aff4_type="GlobalNotificationStorage",
mode="r", token=self.token)
current_notifications = storage.GetNotifications()
shown_notifications = self.Get(self.Schema.SHOWN_GLOBAL_NOTIFICATIONS,
default=GlobalNotificationSet())
result = []
for notification in current_notifications:
if notification in shown_notifications:
continue
current_time = rdfvalue.RDFDatetime().Now()
if (notification.show_from + notification.duration >= current_time and
current_time >= notification.show_from):
result.append(notification)
return result
def MarkGlobalNotificationAsShown(self, notification):
shown_notifications = self.Get(self.Schema.SHOWN_GLOBAL_NOTIFICATIONS)
shown_notifications.AddNotification(notification)
self.Set(self.Schema.SHOWN_GLOBAL_NOTIFICATIONS, shown_notifications)
|
|
#!/usr/bin/env python3
'''
Perform basic ELF security checks on a series of executables.
Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
Needs `readelf` (for ELF) and `objdump` (for PE).
'''
import subprocess
import sys
import os
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
NONFATAL = {} # checks which are non-fatal for now but only generate a warning
def check_ELF_PIE(executable):
'''
Check for position independent executable (PIE), allowing for address space randomization.
'''
p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.splitlines():
line = line.split()
if len(line)>=2 and line[0] == 'Type:' and line[1] == 'DYN':
ok = True
return ok
def get_ELF_program_headers(executable):
'''Return type and flags for ELF program headers'''
p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
in_headers = False
count = 0
headers = []
for line in stdout.splitlines():
if line.startswith('Program Headers:'):
in_headers = True
if line == '':
in_headers = False
if in_headers:
if count == 1: # header line
ofs_typ = line.find('Type')
ofs_offset = line.find('Offset')
ofs_flags = line.find('Flg')
ofs_align = line.find('Align')
if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1:
raise ValueError('Cannot parse elfread -lW output')
elif count > 1:
typ = line[ofs_typ:ofs_offset].rstrip()
flags = line[ofs_flags:ofs_align].rstrip()
headers.append((typ, flags))
count += 1
return headers
def check_ELF_NX(executable):
'''
Check that no sections are writable and executable (including the stack)
'''
have_wx = False
have_gnu_stack = False
for (typ, flags) in get_ELF_program_headers(executable):
if typ == 'GNU_STACK':
have_gnu_stack = True
if 'W' in flags and 'E' in flags: # section is both writable and executable
have_wx = True
return have_gnu_stack and not have_wx
def check_ELF_RELRO(executable):
'''
Check for read-only relocations.
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
have_gnu_relro = False
for (typ, flags) in get_ELF_program_headers(executable):
# Note: not checking flags == 'R': here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also https://marc.info/?l=binutils&m=1498883354122353
if typ == 'GNU_RELRO':
have_gnu_relro = True
have_bindnow = False
p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.splitlines():
tokens = line.split()
if len(tokens)>1 and tokens[1] == '(BIND_NOW)' or (len(tokens)>2 and tokens[1] == '(FLAGS)' and 'BIND_NOW' in tokens[2:]):
have_bindnow = True
return have_gnu_relro and have_bindnow
def check_ELF_Canary(executable):
'''
Check for use of stack canary
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.splitlines():
if '__stack_chk_fail' in line:
ok = True
return ok
def get_PE_dll_characteristics(executable):
'''
Get PE DllCharacteristics bits.
Returns a tuple (arch,bits) where arch is 'i386:x86-64' or 'i386'
and bits is the DllCharacteristics value.
'''
p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
arch = ''
bits = 0
for line in stdout.splitlines():
tokens = line.split()
if len(tokens)>=2 and tokens[0] == 'architecture:':
arch = tokens[1].rstrip(',')
if len(tokens)>=2 and tokens[0] == 'DllCharacteristics':
bits = int(tokens[1],16)
return (arch,bits)
IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020
IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040
IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100
def check_PE_DYNAMIC_BASE(executable):
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
(arch,bits) = get_PE_dll_characteristics(executable)
reqbits = IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE
return (bits & reqbits) == reqbits
# On 64 bit, must support high-entropy 64-bit address space layout randomization in addition to DYNAMIC_BASE
# to have secure ASLR.
def check_PE_HIGH_ENTROPY_VA(executable):
'''PIE: DllCharacteristics bit 0x20 signifies high-entropy ASLR'''
(arch,bits) = get_PE_dll_characteristics(executable)
if arch == 'i386:x86-64':
reqbits = IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA
else: # Unnecessary on 32-bit
assert(arch == 'i386')
reqbits = 0
return (bits & reqbits) == reqbits
def check_PE_NX(executable):
'''NX: DllCharacteristics bit 0x100 signifies nxcompat (DEP)'''
(arch,bits) = get_PE_dll_characteristics(executable)
return (bits & IMAGE_DLL_CHARACTERISTICS_NX_COMPAT) == IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
CHECKS = {
'ELF': [
('PIE', check_ELF_PIE),
('NX', check_ELF_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary)
],
'PE': [
('DYNAMIC_BASE', check_PE_DYNAMIC_BASE),
('HIGH_ENTROPY_VA', check_PE_HIGH_ENTROPY_VA),
('NX', check_PE_NX)
]
}
def identify_executable(executable):
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('%s: unknown format' % filename)
retval = 1
continue
failed = []
warning = []
for (name, func) in CHECKS[etype]:
if not func(filename):
if name in NONFATAL:
warning.append(name)
else:
failed.append(name)
if failed:
print('%s: failed %s' % (filename, ' '.join(failed)))
retval = 1
if warning:
print('%s: warning %s' % (filename, ' '.join(warning)))
except IOError:
print('%s: cannot open' % filename)
retval = 1
sys.exit(retval)
|
|
from __future__ import print_function
import numpy
import numpy.random
import scipy.stats
import scipy.signal
import scipy
AF = None
# implement logging in this module
class GenericDataArray(dict):
# base class for single sample volume data (e.g. Vectrino, Vector, current meters)
# sample rate is a scalar
# shape is an 2-tuple, the first entry is the number of cells,
# the second entry is the number of samples,
# finel is the number of channels/beams/components
def __init__( self,
sampleRate = 1,
shape = ( 1, 1, 1 ) ):
#super( GenericDataArray, self ).__init__()
dict.__init__( self )
self.sampleRate = sampleRate
self[ 'data' ] = numpy.empty( shape )
if len( shape ) == 1:
self.numberOfSamples = shape[ 0 ]
else:
self.numberOfSamples = shape[ 1 ]
def calculateStatistics( self, onAxis = 1, useScreenedData = False ):
self.mean = scipy.stats.nanmean( self[ 'data' ], onAxis )
self.median = scipy.stats.nanmedian( self[ 'data' ], onAxis )
self.var = scipy.stats.nanstd( self[ 'data' ], onAxis )**2
def calculateHistograms( self, bins = None ):
self.histograms = Histogram( self[ 'data' ], bins )
def adaptiveOutlierRemoval( self, thresholdFactor = 3.5 ):
if "mean" not in self:
self.calculateStatistics()
self[ 'goodIndices' ] = numpy.isfinite( self[ 'data' ] )
for currentCell in range( 0, self[ 'data' ].shape[ 0 ], 1 ):
for currentChannel in range( 0, self[ 'data' ].shape[ 2 ], 1 ):
converge = False
midpointWorking = self[ 'median' ][ currentCell, currentChannel ]
# initilization for the first pass through the loop
sortedIndices = self[ 'data' ][ currentCell, :, currentChannel ].argsort().flatten()
numberOfGoodSamples = numpy.sum( self[ 'goodIndices' ][ currentCell, :, currentChannel ] )
while converge is False and numberOfGoodSamples > 0.5 * self[ 'data' ].shape[ 1 ]:
# estimate the standard deviation based on student's t distribution
probabilityLow = scipy.stats.t.cdf( -1, numberOfGoodSamples )
probabilityHi = scipy.stats.t.cdf( 1, numberOfGoodSamples )
probabilityLowIndex = int( numpy.floor( probabilityLow * numberOfGoodSamples ) )
probabilityHiIndex = int( numpy.ceil( probabilityHi * numberOfGoodSamples ) )
if ( numpy.isfinite( probabilityLowIndex ) and numpy.isfinite( probabilityHiIndex ) ):
belowMedianSTDEstimate = midpointWorking - \
self[ 'data' ][ currentCell, sortedIndices[ probabilityLowIndex ], currentChannel ]
aboveMedianSTDEstimate = self[ 'data' ][ currentCell, sortedIndices[ probabilityHiIndex ], currentChannel ] - \
midpointWorking
lowerLimit = midpointWorking - thresholdFactor * numpy.abs( aboveMedianSTDEstimate )
upperLimit = midpointWorking + thresholdFactor * numpy.abs( belowMedianSTDEstimate )
outlierIndices = numpy.logical_or( self[ 'data' ][ currentCell,
self[ 'goodIndices' ][ currentCell, :, currentChannel ],
currentChannel ] <= lowerLimit,
self[ 'data' ][ currentCell,
self[ 'goodIndices' ][ currentCell, :, currentChannel ],
currentChannel ] >= upperLimit ).flatten()
self[ 'goodIndices' ][ currentCell, outlierIndices, currentChannel ] = False
formerNumberOfGoodSamples = numberOfGoodSamples
numberOfGoodSamples = numpy.sum( self[ 'goodIndices' ][ currentCell, :, currentChannel ] )
numberOfPointsRemoved = int( formerNumberOfGoodSamples - numberOfGoodSamples )
#print "Removed %i points" , numberOfPointsRemoved
if numberOfPointsRemoved is 0:
converge = True
def calculateTemporalSpectrum( self, numberOfWindows = 1 ):
if "mean" not in self:
self.calculateStatistics()
self[ 'spectrum' ] = {}
ensemble = numpy.arange( 0, self[ 'data' ].shape[ 1 ], 1 )
windowLength = self[ 'data' ].shape[ 1 ] / numberOfWindows
if self[ 'data' ].ndim == 3:
numberOfChannels = self[ 'data' ].shape[ 2 ]
self[ 'spectrum' ][ 'psd' ] = numpy.empty( ( self[ 'data' ].shape[ 0 ],
windowLength,
numberOfChannels ) )
self[ 'spectrum' ][ 'psdCheck' ] = numpy.empty( ( self[ 'data' ].shape[ 0 ],
numberOfChannels ) )
else:
numberOfChannels = 1
self[ 'spectrum' ][ 'psd' ] = numpy.empty( ( self[ 'data' ].shape[ 0 ],
windowLength ) )
self[ 'spectrum' ][ 'psdCheck' ] = numpy.empty( ( self[ 'data' ].shape[ 0 ], ) )
df = self.sampleRate / numpy.float( windowLength )
self[ 'spectrum' ][ 'f' ] = numpy.linspace( 0, self.sampleRate, windowLength, endpoint = False )
for currentCell in range( 0, self[ 'data' ].shape[ 0 ], 1 ):
for currentChannel in range( 0, numberOfChannels, 1 ):
T = self[ 'data' ][ currentCell, :, currentChannel ]
if 'goodIndices' not in self:
self[ 'goodIndices' ] = numpy.isfinite( self[ 'data' ] )
if numpy.sum( self[ 'goodIndices' ][ currentCell, :, currentChannel ] ) != len( T ):
interpolateT = scipy.interpolate.interp1d( ensemble[ self[ 'goodIndices' ][ currentCell, :, currentChannel ] ],
T[ self[ 'goodIndices' ][ currentCell, :, currentChannel ] ],
kind = 'linear',
copy = False,
bounds_error = False,
fill_value = T[ self[ 'goodIndices' ][ currentCell, :, currentChannel ] ].mean() )
T = interpolateT( ensemble )
startIndexInWindow = 0
endIndexInWindow = windowLength
window = 0
Stt = numpy.zeros( ( numberOfWindows, windowLength ) )
for window in range( 0, numberOfWindows, 1 ):
subsetOfT = T[ startIndexInWindow:endIndexInWindow ]
fftOfT = numpy.fft.fft( subsetOfT )
windowStt = fftOfT * fftOfT.conjugate()
Stt[ window, : ] = windowStt.real
startIndexInWindow = endIndexInWindow
endIndexInWindow = startIndexInWindow + windowLength
Stt = numpy.mean( Stt, axis = 0 )
# Normalize so that the integral equals the rms fluctuation squared (variance)
self[ 'spectrum' ][ 'psd' ][ currentCell, :, currentChannel ] = Stt / ( self.sampleRate * windowLength )
self[ 'spectrum' ][ 'psdCheck' ][ currentCell, currentChannel ] = ( numpy.sum( self[ 'spectrum' ][ 'psd' ][ currentCell, :, currentChannel ] ) * \
df ) / T.var()
self[ 'spectrum' ][ 'nyquistFrequency' ] = self.sampleRate / 2
self[ 'spectrum' ][ 'nyquistIndex' ] = windowLength / 2
class Histogram(dict):
def __init__(self, dataArray, bins = None ):
dict.__init__( self )
self.binEdges = []
self.binCenters = []
self.densityInBin = []
if bins is 'correlation':
bins = numpy.linspace( 0, 100, 101 )
elif bins is 'vectrinoSNR':
bins = numpy.linspace( 0, 35, 35 )
elif bins is 'vectorSNR':
bins = numpy.linspace( 0, 45, 45 )
elif bins is 'vProSNR':
bins = numpy.linspace( 1, 60, 60 )
elif bins is 'amplitude':
bins = numpy.linspace( 0, 255, 256 )
for cellNumber in range( 0, dataArray.shape[ 0 ], 1 ):
self.binEdges.append( [] )
self.binCenters.append( [] )
self.densityInBin.append( [] )
for channelNumber in range( 0, dataArray.shape[ -1 ], 1 ):
if bins == None:
binEdges, binCenters = self.optimalHistogramBins( dataArray[ cellNumber, :, channelNumber ] )
densityInBin, otherBinEdges = numpy.histogram(
dataArray[ cellNumber, :, channelNumber ],
binEdges,
density = True )
elif isinstance( bins, ( int, numpy.ndarray ) ): # number of bins or binEdges specified
densityInBin, binEdges = numpy.histogram(
dataArray[ cellNumber, :, channelNumber ],
bins,
density = True )
binWidth = ( binEdges[ 1 ] - binEdges[ 0 ] ) / 2.
binCenters = numpy.linspace( binEdges[ 0 ] + binWidth,
binEdges[ -1 ] - binWidth,
densityInBin.shape[ 0 ] )
# if cellNumber == 0 and channelNumber == 0:
# self[ 'binEdges' ] = numpy.empty( ( dataArray.shape[ 0 ],
# binEdges.shape[ 0 ],
# dataArray.shape[ -1 ] ) )
# self[ 'binCenters' ] = numpy.empty( ( dataArray.shape[ 0 ],
# binCenters.shape[ 0 ],
# dataArray.shape[ -1 ] ) )
# self[ 'densityInBin' ] = numpy.empty( ( dataArray.shape[ 0 ],
# densityInBin.shape[ 0 ],
# dataArray.shape[ -1 ] ) )
# self[ 'binEdges' ][ cellNumber, :, channelNumber ] = binEdges
# self[ 'binCenters' ][ cellNumber, :, channelNumber ] = binCenters
# self[ 'densityInBin' ][ cellNumber, :, channelNumber ] = densityInBin
self.binEdges[ cellNumber ].append( binEdges )
self.binCenters[ cellNumber ].append( binCenters )
self.densityInBin[ cellNumber ].append( densityInBin )
def optimalHistogramBins( self, data ):
################################################################################
# optimal histogram bin width as shown by
# http://www.fmrib.ox.ac.uk/analysis/techrep/tr00mj2/tr00mj2/node24.html
# Summary reference is:
# Izenman, 1991
# Izenman, A. J. 1991.
# Recent developments in nonparametric density estimation.
# Journal of the American Statistical Association, 86(413):205-224.
################################################################################
data = data.flatten()
n = max(data.shape) - sum( numpy.isnan( data ) )
# need to estimate the IQR
interQuartileRange = AF.iqr( data )
binwidth = 2.0 * interQuartileRange * n ** (-1.0 / 3.0 )
# have one bin centered at the median and extend to either end of the data as
# appropriate
medianValue = numpy.median( data )
dataMinimumValue = min( data )
bins = int( ( medianValue - dataMinimumValue - binwidth / 2.0 ) / binwidth )
binCenters = medianValue - numpy.arange( bins ) * binwidth
dataMaximumValue = max( data )
bins = int( ( medianValue + dataMaximumValue - binwidth / 2.0 ) / binwidth )
binCenters2ndHalf = medianValue + numpy.arange( 1, bins + 1 ) * binwidth
binCenters = numpy.append( binCenters, binCenters2ndHalf )
binCenters.sort( )
binEdges = binCenters - binwidth / 2
# add one last bin edge so we get the right values to plot against binCenters
binEdges = numpy.append( binEdges, binEdges[-1] + binwidth/2 )
return binEdges, binCenters
class VelocityDataArray(GenericDataArray):
def __init__(self, sampleRate = 1, shape = (0, 0, 0), coordinateSystem = None ):
dict.__init__( self )
self.sampleRate = sampleRate
self[ 'data' ] = numpy.nan * numpy.zeros( shape )
self.numberOfSamples = shape[ 1 ]
self.dataIsInCoordinateSystem = coordinateSystem
self.calculateStatistics()
def calculateScreenedStatistics( self ):
self.screenedMean = {}
self.screenedMedian = {}
self.screenedStandardDeviation = {}
if hasattr( self, "goodIndices" ):
for component in self.componentNames:
self.screenedMean[ component ] = numpy.mean( self.data[ component ][ self.goodIndices[ component ] ] )
self.screenedMedian[ component ] = numpy.median( self.data[ component ][ self.goodIndices[ component ] ] )
self.screenedStandardDeviation[ component ] = numpy.std( self.data[ component ][ self.goodIndices[ component ] ] )
elif hasattr( self, "aorIndices" ):
for component in self.componentNames:
self.screenedMean[ component ] = numpy.mean( self.data[ component ][ self.aorIndices[ component ] ] )
self.screenedMedian[ component ] = numpy.median( self.data[ component ][ self.aorIndices[ component ] ] )
self.screenedStandardDeviation[ component ] = numpy.std( self.data[ component ][ self.aorIndices[ component ] ] )
else:
print("Velocity data has not been screened yet.")
|
|
#!/usr/bin/env python
import json
import math
import re
class PManager(object):
def __init__(self, data):
if isinstance(data, (str, unicode)):
self.data = json.loads(data)
else:
self.data = data
self.factor = 1
self.unit = "MiB"
self._pre = []
self._kick = []
self._post = []
self.raid_count = 0
self._pcount = {}
self._pend = {}
self._rcount = 0
self._pvcount = 0
def _pseparator(self, devname):
pseparator = ''
if devname.find('cciss') > 0:
pseparator = 'p'
return pseparator
def pcount(self, disk_id, increment=0):
self._pcount[disk_id] = self._pcount.get(disk_id, 0) + increment
return self._pcount.get(disk_id, 0)
def psize(self, disk_id, increment=0):
self._pend[disk_id] = self._pend.get(disk_id, 0) + increment
return self._pend.get(disk_id, 0)
def rcount(self, increment=0):
self._rcount += increment
return self._rcount
def pvcount(self, increment=0):
self._pvcount += increment
return self._pvcount
def pre(self, command=None):
if command:
return self._pre.append(command)
return self._pre
def kick(self, command=None):
if command:
return self._kick.append(command)
return self._kick
def post(self, command=None):
if command:
return self._post.append(command)
return self._post
def iterdisks(self):
for item in self.data:
if item["type"] == "disk" and item["size"] > 0:
yield item
def get_partition_count(self, name):
count = 0
for disk in self.iterdisks():
count += len([v for v in disk["volumes"]
if v.get('name') == name and v['size'] > 0])
return count
def num_ceph_journals(self):
return self.get_partition_count('cephjournal')
def num_ceph_osds(self):
return self.get_partition_count('ceph')
def _gettabfstype(self, vol):
if vol.get("file_system"):
return vol["file_system"]
elif vol["mount"] == "/":
return "ext4"
elif vol["mount"] == "/boot":
return "ext3"
elif vol["mount"] == "swap":
return "swap"
return "xfs"
def _getfstype(self, vol):
fstype = self._gettabfstype(vol)
if fstype == "swap":
return ""
return "--fstype=%s" % fstype
def _getlabel(self, label):
if not label:
return ""
# XFS will refuse to format a partition if the
# disk label is > 12 characters.
return " -L {0} ".format(label[:12])
def _parttype(self, n):
return "primary"
def _getsize(self, vol):
"""Anaconda has hard coded limitation in 16TB
for ext3/4 and xfs filesystems (the only filesystems
we are supposed to use). Besides there is no stable
64-bit ext4 implementation at the moment, so the
limitation in 16TB for ext4 is not only
anaconda limitation."""
"""Root partition can not be located on xfs file system
therefore we check if root filesystem is larger
than 16TB and set it size into 16TB if it is larger.
It is necessary to note that to format 16TB
volume on ext4 it is needed about 1G memory."""
if vol["size"] > 16777216 and vol["mount"] == "/":
return 16777216
return vol["size"]
def erase_lvm_metadata(self):
self.pre("for v in $(vgs | awk '{print $1}'); do "
"vgreduce -f --removemissing $v; vgremove -f $v; done")
self.pre("for p in $(pvs | grep '\/dev' | awk '{print $1}'); do "
"pvremove -ff -y $p ; done")
def erase_raid_metadata(self):
for disk in self.iterdisks():
self.pre("mdadm --zero-superblock --force /dev/{0}*"
"".format(disk['id']))
def clean(self, disk):
self.pre("hdparm -z /dev/{0}".format(disk["id"]))
self.pre("test -e /dev/{0} && dd if=/dev/zero "
"of=/dev/{0} bs=1M count=10".format(disk["id"]))
self.pre("sleep 5")
self.pre("hdparm -z /dev/{0}".format(disk["id"]))
def gpt(self, disk):
self.pre("parted -s /dev/{0} mklabel gpt".format(disk["id"]))
def bootable(self, disk):
"""Create and mark Bios Boot partition to which grub will
embed its code later, useable for legacy boot.
May be way smaller, but be aware that the parted may
shrink 1M partition to zero at some disks and versions."""
self.pre("parted -a none -s /dev/{0} "
"unit {3} mkpart primary {1} {2}".format(
disk["id"],
self.psize(disk["id"]),
self.psize(disk["id"], 24 * self.factor),
self.unit
)
)
self.pre("parted -s /dev/{0} set {1} bios_grub on".format(
disk["id"],
self.pcount(disk["id"], 1)
)
)
"""Create partition for the EFI boot, minimum
size is 100M, recommended is 200M, with fat32 and
future mountpoint in the /boot/efi. There is also
'/usr/sbin/parted -s /dev/sda set 2 boot on'
which is strictly needed for EFI boot."""
self.pre("parted -a none -s /dev/{0} "
"unit {3} mkpart primary fat32 {1} {2}".format(
disk["id"],
self.psize(disk["id"]),
self.psize(disk["id"], 200 * self.factor),
self.unit
)
)
self.pre("parted -s /dev/{0} set {1} boot on".format(
disk["id"],
self.pcount(disk["id"], 1)
)
)
def boot(self):
self.plains(volume_filter=lambda x: x["mount"] == "/boot")
self.raids(volume_filter=lambda x: x["mount"] == "/boot")
def notboot(self):
self.plains(volume_filter=lambda x: x["mount"] != "/boot")
self.raids(volume_filter=lambda x: x["mount"] != "/boot")
def plains(self, volume_filter=None):
if not volume_filter:
volume_filter = lambda x: True
ceph_osds = self.num_ceph_osds()
journals_left = ceph_osds
ceph_journals = self.num_ceph_journals()
for disk in self.iterdisks():
for part in filter(lambda p: p["type"] == "partition" and
volume_filter(p), disk["volumes"]):
if part["size"] <= 0:
continue
if part.get('name') == 'cephjournal':
# We need to allocate a journal partition for each ceph OSD
# Determine the number of journal partitions we need on each device
ratio = math.ceil(float(ceph_osds) / ceph_journals)
# No more than 10GB will be allocated to a single journal partition
size = part["size"] / ratio
if size > 10240:
size = 10240
# This will attempt to evenly spread partitions across
# multiple devices e.g. 5 osds with 2 journal devices will
# create 3 partitions on the first device and 2 on the
# second
if ratio < journals_left:
end = ratio
else:
end = journals_left
for i in range(0, end):
journals_left -= 1
pcount = self.pcount(disk["id"], 1)
self.pre("parted -a none -s /dev/{0} "
"unit {4} mkpart {1} {2} {3}".format(
disk["id"],
self._parttype(pcount),
self.psize(disk["id"]),
self.psize(disk["id"], size * self.factor),
self.unit))
self.post("chroot /mnt/sysimage sgdisk "
"--typecode={0}:{1} /dev/{2}".format(
pcount, part["partition_guid"],disk["id"]))
continue
pcount = self.pcount(disk["id"], 1)
self.pre("parted -a none -s /dev/{0} "
"unit {4} mkpart {1} {2} {3}".format(
disk["id"],
self._parttype(pcount),
self.psize(disk["id"]),
self.psize(disk["id"], part["size"] * self.factor),
self.unit))
fstype = self._getfstype(part)
size = self._getsize(part)
tabmount = part["mount"] if part["mount"] != "swap" else "none"
tabfstype = self._gettabfstype(part)
if part.get("partition_guid"):
self.post("chroot /mnt/sysimage sgdisk "
"--typecode={0}:{1} /dev/{2}".format(
pcount, part["partition_guid"],disk["id"]))
if size > 0 and size <= 16777216 and part["mount"] != "none":
self.kick("partition {0} "
"--onpart=$(readlink -f /dev/{2})"
"{3}{4}".format(part["mount"], size,
disk["id"],
self._pseparator(disk["id"]),
pcount))
else:
if part["mount"] != "swap" and tabfstype != "none":
disk_label = self._getlabel(part.get('disk_label'))
self.post("mkfs.{0} -f $(readlink -f /dev/{1})"
"{2}{3} {4}".format(tabfstype, disk["id"],
self._pseparator(disk["id"]),
pcount, disk_label))
if part["mount"] != "none":
self.post("mkdir -p /mnt/sysimage{0}".format(
part["mount"]))
if tabfstype != "none":
self.post("echo 'UUID=$(blkid -s UUID -o value "
"$(readlink -f /dev/{0}){1}{2}) "
"{3} {4} defaults 0 0'"
" >> /mnt/sysimage/etc/fstab".format(
disk["id"], self._pseparator(disk["id"]),
pcount, tabmount, tabfstype))
def raids(self, volume_filter=None):
if not volume_filter:
volume_filter = lambda x: True
raids = {}
raid_info = {}
phys = {}
for disk in self.iterdisks():
for raid in filter(lambda p: p["type"] == "raid" and
volume_filter(p), disk["volumes"]):
if raid["size"] <= 0:
continue
raid_info[raid["mount"]] = raid
pcount = self.pcount(disk["id"], 1)
if not phys.get(raid["mount"]):
phys[raid["mount"]] = []
phys[raid["mount"]].append("$(readlink -f /dev/{0}){1}{2}".
format(disk["id"], self._pseparator(disk["id"]), pcount))
rname = "raid.{0:03d}".format(self.rcount(1))
begin_size = self.psize(disk["id"])
end_size = self.psize(disk["id"], raid["size"] * self.factor)
self.pre("parted -a none -s /dev/{0} "
"unit {4} mkpart {1} {2} {3}".format(
disk["id"], self._parttype(pcount),
begin_size, end_size, self.unit))
self.kick("partition {0} "
"--onpart=$(readlink -f /dev/{2}){3}{4}"
"".format(rname, raid["size"], disk["id"],
self._pseparator(disk["id"]), pcount))
if not raids.get(raid["mount"]):
raids[raid["mount"]] = []
raids[raid["mount"]].append(rname)
for (num, (mount, rnames)) in enumerate(raids.iteritems()):
raid = raid_info[mount]
fstype = self._gettabfstype(raid)
label = raid.get('disk_label')
# Anaconda won't label a RAID array. It also can't create
# a single-drive RAID1 array, but mdadm can.
if label or len(rnames) == 1:
if len(rnames) == 1:
phys[mount].append('missing')
self.post("mdadm --create /dev/md{0} --run --level=1 "
"--raid-devices={1} {2}".format(self.raid_count,
len(phys[mount]), ' '.join(phys[mount])))
self.post("mkfs.{0} -f {1} /dev/md{2}".format(
fstype, self._getlabel(label), self.raid_count))
self.post("mdadm --detail --scan | grep '\/dev\/md{0}'"
">> /mnt/sysimage/etc/mdadm.conf".format(
self.raid_count))
self.post("mkdir -p /mnt/sysimage{0}".format(mount))
self.post("echo \\\"UUID=\$(blkid -s UUID -o value "
"/dev/md{0}) "
"{1} {2} defaults 0 0\\\""
" >> /mnt/sysimage/etc/fstab".format(
self.raid_count, mount, fstype))
else:
self.kick("raid {0} --device md{1} --fstype {3} "
"--level=RAID1 {2}".format(mount, self.raid_count,
" ".join(rnames), fstype))
self.raid_count += 1
def pvs(self):
pvs = {}
for disk in self.iterdisks():
for pv in [p for p in disk["volumes"] if p["type"] == "pv"]:
if pv["size"] <= 0:
continue
pcount = self.pcount(disk["id"], 1)
pvname = "pv.{0:03d}".format(self.pvcount(1))
begin_size = self.psize(disk["id"])
end_size = self.psize(disk["id"], pv["size"] * self.factor)
self.pre("parted -a none -s /dev/{0} "
"unit {4} mkpart {1} {2} {3}".format(
disk["id"], self._parttype(pcount),
begin_size, end_size, self.unit))
self.kick("partition {0} "
"--onpart=$(readlink -f /dev/{2}){3}{4}"
"".format(pvname, pv["size"], disk["id"],
self._pseparator(disk["id"]), pcount))
if not pvs.get(pv["vg"]):
pvs[pv["vg"]] = []
pvs[pv["vg"]].append(pvname)
for vg, pvnames in pvs.iteritems():
self.kick("volgroup {0} {1}".format(vg, " ".join(pvnames)))
def lvs(self):
for vg in [g for g in self.data if g["type"] == "vg"]:
for lv in vg["volumes"]:
if lv["size"] <= 0:
continue
fstype = self._getfstype(lv)
size = self._getsize(lv)
tabmount = lv["mount"] if lv["mount"] != "swap" else "none"
tabfstype = self._gettabfstype(lv)
if size > 0 and size <= 16777216:
self.kick("logvol {0} --vgname={1} --size={2} "
"--name={3} {4}".format(
lv["mount"], vg["id"], size,
lv["name"], fstype))
else:
self.post("lvcreate --size {0} --name {1} {2}".format(
size, lv["name"], vg["id"]))
if lv["mount"] != "swap" and tabfstype != "none":
self.post("mkfs.{0} /dev/mapper/{1}-{2}".format(
tabfstype, vg["id"], lv["name"]))
self.post("mkdir -p /mnt/sysimage{0}"
"".format(lv["mount"]))
if tabfstype != "none":
"""
The name of the device. An LVM device is
expressed as the volume group name and the logical
volume name separated by a hyphen. A hyphen in
the original name is translated to two hyphens.
"""
self.post("echo '/dev/mapper/{0}-{1} {2} {3} "
"defaults 0 0'"
" >> /mnt/sysimage/etc/fstab".format(
vg["id"].replace("-", "--"),
lv["name"].replace("-", "--"),
tabmount, tabfstype))
def bootloader(self):
devs = []
for disk in self.iterdisks():
devs.append("$(basename `readlink -f /dev/{0}`)"
"".format(disk["id"]))
if devs:
self.kick("bootloader --location=mbr --driveorder={0} "
"--append=' console=ttyS0,9600 console=tty0 "
"biosdevname=0 "
"crashkernel=none'".format(",".join(devs)))
for dev in devs:
self.post("echo -n > /tmp/grub.script")
self.post("echo \\\"device (hd0) /dev/{0}\\\" >> "
"/tmp/grub.script".format(dev))
"""
This means that we set drive geometry manually into to
avoid grub register overlapping. We set it so that grub
thinks disk size is equal to 1G.
130 cylinders * (16065 * 512 = 8225280 bytes) = 1G
"""
self.post("echo \\\"geometry (hd0) 130 255 63\\\" >> "
"/tmp/grub.script")
self.post("echo \\\"root (hd0,2)\\\" >> /tmp/grub.script")
self.post("echo \\\"install /grub/stage1 (hd0) /grub/stage2 p "
"/grub/grub.conf\\\" >> /tmp/grub.script")
self.post("echo quit >> /tmp/grub.script")
self.post("cat /tmp/grub.script | chroot /mnt/sysimage "
"/sbin/grub --no-floppy --batch")
self.post("sed -i 's/hiddenmenu/hiddenmenu\\nserial\\ "
"--unit=1\\ --speed=19200\\nterminal\\ "
"--timeout=8\\ console\\ serial/g' /etc/grub.cfg")
def expose(self,
kickfile="/tmp/partition.ks",
postfile="/tmp/post_partition.ks"
):
result = ""
for pre in self.pre():
result += "{0}\n".format(pre)
result += "echo > {0}\n".format(kickfile)
for kick in self.kick():
result += "echo \"{0}\" >> {1}\n".format(kick, kickfile)
result += "echo \"%post --nochroot\" > {0}\n".format(postfile)
result += "echo \"set -x -v\" >> {0}\n".format(postfile)
result += ("echo \"exec 1>/mnt/sysimage/root/post-partition.log "
"2>&1\" >> {0}\n".format(postfile))
for post in self.post():
result += "echo \"{0}\" >> {1}\n".format(post, postfile)
result += "echo \"%end\" >> {0}\n".format(postfile)
return result
def eval(self):
for disk in self.iterdisks():
self.clean(disk)
self.gpt(disk)
self.bootable(disk)
self.boot()
self.notboot()
self.pvs()
self.lvs()
self.bootloader()
self.pre("sleep 10")
for disk in self.iterdisks():
self.pre("hdparm -z /dev/{0}".format(disk["id"]))
self.erase_lvm_metadata()
self.erase_raid_metadata()
class PreseedPManager(object):
def __init__(self, data):
if isinstance(data, (str, unicode)):
self.data = json.loads(data)
else:
self.data = data
self.validate()
self.factor = 1
self.unit = "MiB"
self.disks = sorted(["/dev/" + d["id"] for d in self.iterdisks()])
self.os_disk = "/dev/" + self.os_disks()[0]["id"]
self._pcount = {}
self._pend = {}
self._recipe = []
self._late = []
self._early = []
def os_disks(self):
return [d for d in self.iterdisks() if
filter(lambda x: x.get("vg") == "os" and
x.get("size") > 0, d["volumes"])]
def validate(self):
# os volume group can not be distributed over more than one disk.
# it is because we use plain partition for / and swap on ubuntu.
if len(self.os_disks()) > 1:
raise Exception("OS volume group must be located on one disk")
def iterdisks(self):
for item in self.data:
if item["type"] == "disk" and item["size"] > 0:
yield item
def recipe(self, command=None):
if command:
return self._recipe.append(command)
return self._recipe
def late(self, command=None, in_target=False):
if command:
return self._late.append((command, in_target))
return self._late
def early(self, command=None):
if command:
return self._early.append(command)
return self._early
def _pseparator(self, devname):
pseparator = ''
if devname.find('cciss') > 0:
pseparator = 'p'
return pseparator
def _getlabel(self, label):
if not label:
return ""
# XFS will refuse to format a partition if the
# disk label is > 12 characters.
return " -L {0} ".format(label[:12])
def _parttype(self, n):
return "primary"
def pcount(self, disk_id, increment=0):
self._pcount[disk_id] = self._pcount.get(disk_id, 0) + increment
return self._pcount.get(disk_id, 0)
def psize(self, disk_id, increment=0):
self._pend[disk_id] = self._pend.get(disk_id, 0) + increment
return self._pend.get(disk_id, 0)
def get_partition_count(self, name):
count = 0
for disk in self.iterdisks():
count += len([v for v in disk["volumes"]
if v.get('name') == name and v['size'] > 0])
return count
def num_ceph_journals(self):
return self.get_partition_count('cephjournal')
def num_ceph_osds(self):
return self.get_partition_count('ceph')
def erase_partition_table(self):
for disk in self.iterdisks():
self.early("test -e $(readlink -f /dev/{0}) && "
"dd if=/dev/zero of=$(readlink -f /dev/{0}) "
"bs=1M count=10".format(disk["id"]))
self.early("sleep 3")
self.early("hdparm -z $(readlink -f /dev/{0})".format(disk["id"]))
def log_lvm(self, line, early=True):
func = self.early
if not early:
func = self.late
func("echo \"=== {0} ===\" | logger".format(line))
func("for v in $(vgs -a --noheadings 2>/dev/null | "
"sed 's/^\([ ]*\)\([^ ]\+\)\(.*\)/\\2/g'); do "
"echo \"vg=$v\" | logger; done")
func("for p in $(pvs --noheadings 2>/dev/null | "
"sed 's/^\([ ]*\)\([^ ]\+\)\(.*\)/\\2/g'); do "
"echo \"pv=$p\" | logger; done")
def erase_lvm_metadata(self, early=True):
func = self.early
if not early:
func = self.late
func("for v in $(vgs -a --noheadings 2>/dev/null | "
"sed 's/^\([ ]*\)\([^ ]\+\)\(.*\)/\\2/g'); do "
"vgreduce --force --removemissing $v; "
"vgremove --force $v; done")
func("for p in $(pvs --noheadings 2>/dev/null | "
"sed 's/^\([ ]*\)\([^ ]\+\)\(.*\)/\\2/g'); do "
"pvremove -ff -y $p; done")
def boot(self):
self.recipe("24 24 24 ext3 "
"$gptonly{ } "
"$bios_boot{ } "
"method{ biosgrub } .")
self.psize(self.os_disk, 24 * self.factor)
self.pcount(self.os_disk, 1)
self.late("parted -s $(readlink -f {0}) set {1} bios_grub on".format(
self.os_disk,
self.pcount(self.os_disk)
)
)
self.recipe("200 200 200 ext3 $primary{ } "
"$gptonly{ } "
"$bootable{ } method{ format } format{ } use_filesystem{ } "
"filesystem{ ext3 } mountpoint{ /boot } .")
self.pcount(self.os_disk, 1)
self.psize(self.os_disk, 200 * self.factor)
def os(self):
for vg in [v for v in self.data
if v["type"] == "vg" and v["id"] == "os"]:
for vol in vg["volumes"]:
if vol["mount"] == "swap":
swap_size = vol["size"]
elif vol["mount"] == "/":
root_size = vol["size"]
self.recipe("{0} {0} {0} ext4 "
"$gptonly{{ }} "
"method{{ format }} format{{ }} use_filesystem{{ }} "
"filesystem{{ ext4 }} mountpoint{{ / }} ."
"".format(root_size))
self.pcount(self.os_disk, 1)
self.psize(self.os_disk, root_size * self.factor)
self.recipe("{0} {0} {0} linux-swap "
"$gptonly{{ }} "
"method{{ swap }} format{{ }} .".format(swap_size))
self.pcount(self.os_disk, 1)
self.psize(self.os_disk, swap_size * self.factor)
"""
We need this line because debian-installer takes total disk space
for the last partition. So to be able to allocate custom partitions
during the late stage we need to create fake swap partition that
we then destroy.
"""
self.recipe("1 1 -1 ext3 $gptonly{ } method{ keep } .")
self.late("parted $(readlink -f {0}) rm 5".format(self.os_disk))
self.late("sleep 3")
self.late("hdparm -z $(readlink -f {0})".format(self.os_disk))
def partitions(self):
ceph_osds = self.num_ceph_osds()
journals_left = ceph_osds
ceph_journals = self.num_ceph_journals()
for disk in self.iterdisks():
for part in filter(lambda p: p["type"] == "partition" and
p["mount"] != "/boot", disk["volumes"]):
if part["size"] <= 0:
continue
if self.pcount("/dev/%s" % disk["id"]) == 0:
self.late("parted -s $(readlink -f /dev/{0}) mklabel gpt"
"".format(disk["id"]))
self.late("parted -a none -s $(readlink -f /dev/{0}) "
"unit {3} mkpart primary {1} {2}".format(
disk["id"],
self.psize("/dev/%s" % disk["id"]),
self.psize("/dev/%s" % disk["id"],
24 * self.factor),
self.unit
)
)
self.late("parted -s $(readlink -f /dev/{0}) set {1} "
"bios_grub on".format(
disk["id"],
self.pcount("/dev/%s" % disk["id"], 1)
)
)
if part.get('name') == 'cephjournal':
# We need to allocate a journal partition for each ceph OSD
# Determine the number of journal partitions we need on each device
ratio = math.ceil(float(ceph_osds) / ceph_journals)
# No more than 10GB will be allocated to a single journal partition
size = part["size"] / ratio
if size > 10240:
size = 10240
# This will attempt to evenly spread partitions across
# multiple devices e.g. 5 osds with 2 journal devices will
# create 3 partitions on the first device and 2 on the
# second
if ratio < journals_left:
end = ratio
else:
end = journals_left
for i in range(0, end):
journals_left -= 1
pcount = self.pcount('/dev/%s' % disk["id"], 1)
self.late("parted -a none -s /dev/{0} "
"unit {4} mkpart {1} {2} {3}".format(
disk["id"],
self._parttype(pcount),
self.psize('/dev/%s' % disk["id"]),
self.psize('/dev/%s' % disk["id"], size * self.factor),
self.unit))
self.late("sgdisk --typecode={0}:{1} /dev/{2}"
"".format(pcount, part["partition_guid"],
disk["id"]), True)
continue
pcount = self.pcount("/dev/%s" % disk["id"], 1)
tabmount = part["mount"] if part["mount"] != "swap" else "none"
self.late("parted -a none -s $(readlink -f /dev/{0}) "
"unit {4} mkpart {1} {2} {3}".format(
disk["id"],
self._parttype(pcount),
self.psize("/dev/%s" % disk["id"]),
self.psize("/dev/%s" % disk["id"],
part["size"] * self.factor),
self.unit))
self.late("sleep 3")
self.late("hdparm -z $(readlink -f /dev/{0})"
"".format(disk["id"]))
if part.get("partition_guid"):
self.late("sgdisk --typecode={0}:{1} /dev/{2}"
"".format(pcount, part["partition_guid"],
disk["id"]), True)
if not part.get("file_system", "xfs") in ("swap", None, "none"):
disk_label = self._getlabel(part.get("disk_label"))
self.late("mkfs.{0} -f $(readlink -f /dev/{1}){2}{3} {4}"
"".format(part.get("file_system", "xfs"),
disk["id"],
self._pseparator(disk["id"]),
pcount, disk_label))
if not part["mount"] in (None, "none", "swap"):
self.late("mkdir -p /target{0}".format(part["mount"]))
if not part["mount"] in (None, "none"):
self.late("echo 'UUID=$(blkid -s UUID -o value "
"$(readlink -f /dev/{0}){1}{2}) "
"{3} {4} {5} 0 0'"
" >> /target/etc/fstab"
"".format(
disk["id"], self._pseparator(disk["id"]),
pcount, tabmount,
part.get("file_system", "xfs"),
("defaults" if part["mount"] != "swap"
else "sw" )))
def lv(self):
self.log_lvm("before creating lvm", False)
devices_dict = {}
pvlist = []
for disk in self.iterdisks():
for pv in [p for p in disk["volumes"]
if p["type"] == "pv" and p["vg"] != "os"]:
if pv["size"] <= 0:
continue
if self.pcount("/dev/%s" % disk["id"]) == 0:
self.late("parted -s $(readlink -f /dev/{0}) mklabel gpt"
"".format(disk["id"]))
self.late("parted -a none -s $(readlink -f /dev/{0}) "
"unit {3} mkpart primary {1} {2}".format(
disk["id"],
self.psize("/dev/%s" % disk["id"]),
self.psize("/dev/%s" % disk["id"],
24 * self.factor),
self.unit
)
)
self.late("parted -s $(readlink -f /dev/{0}) set {1} "
"bios_grub on".format(
disk["id"],
self.pcount("/dev/%s" % disk["id"], 1)))
pcount = self.pcount("/dev/%s" % disk["id"], 1)
begin_size = self.psize("/dev/%s" % disk["id"])
end_size = self.psize("/dev/%s" % disk["id"],
pv["size"] * self.factor)
self.late("parted -a none -s $(readlink -f /dev/{0}) "
"unit {4} mkpart {1} {2} {3}".format(
disk["id"],
self._parttype(pcount),
begin_size,
end_size,
self.unit))
self.late("sleep 3")
self.late("hdparm -z $(readlink -f /dev/{0})"
"".format(disk["id"]))
pvlist.append("pvcreate -ff -y $(readlink -f /dev/{0}){1}{2}"
"".format(disk["id"],
self._pseparator(disk["id"]),
pcount))
if not devices_dict.get(pv["vg"]):
devices_dict[pv["vg"]] = []
devices_dict[pv["vg"]].append(
"$(readlink -f /dev/{0}){1}{2}"
"".format(disk["id"], self._pseparator(disk["id"]), pcount)
)
self.log_lvm("before additional cleaning", False)
self.erase_lvm_metadata(False)
self.log_lvm("before pvcreate", False)
for pvcommand in pvlist:
self.late(pvcommand)
self.log_lvm("before vgcreate", False)
for vg, devs in devices_dict.iteritems():
self.late("vgcreate -s 32m {0} {1}".format(vg, " ".join(devs)))
self.log_lvm("after vgcreate", False)
for vg in [v for v in self.data
if v["type"] == "vg" and v["id"] != "os"]:
for lv in vg["volumes"]:
if lv["size"] <= 0:
continue
self.late("lvcreate -L {0}m -n {1} {2}".format(
lv["size"], lv["name"], vg["id"]))
self.late("sleep 5")
self.late("lvscan")
tabmount = lv["mount"] if lv["mount"] != "swap" else "none"
if ((not lv.get("file_system", "xfs") in ("swap", None, "none"))
and (not lv["mount"] in ("swap", "/"))):
self.late("mkfs.{0} /dev/mapper/{1}-{2}".format(
lv.get("file_system", "xfs"),
vg["id"].replace("-", "--"),
lv["name"].replace("-", "--")))
if not lv["mount"] in (None, "none", "swap", "/"):
self.late("mkdir -p /target{0}".format(lv["mount"]))
if not lv["mount"] in (None, "none", "swap", "/"):
self.late("echo '/dev/mapper/{0}-{1} "
"{2} {3} {4} 0 0' >> /target/etc/fstab"
"".format(
vg["id"].replace("-", "--"),
lv["name"].replace("-", "--"),
tabmount,
lv.get("file_system", "xfs"),
("defaults" if lv["mount"] != "swap"
else "sw" )))
def eval(self):
self.log_lvm("before early lvm cleaning")
self.erase_lvm_metadata()
self.log_lvm("after early lvm cleaning")
self.erase_partition_table()
self.boot()
self.os()
self.lv()
self.partitions()
self.late("apt-get install -y grub-pc", True)
self.late("sed -i "
"-e 's/.*GRUB_TERMINAL.*/GRUB_TERMINAL=console/g' "
"-e 's/.*GRUB_GFXMODE.*/#GRUB_GFXMODE=640x480/g' "
"-e 's/.*GRUB_CMDLINE_LINUX.*/"
"GRUB_CMDLINE_LINUX=\"console=tty0 "
"console=ttyS0,9600\"/g' /etc/default/grub", True)
self.late("umount /target/proc")
self.late("mount -o bind /proc /target/proc")
self.late("umount /target/sys")
self.late("mount -o bind /sys /target/sys")
self.late("grub-mkconfig", True)
self.late("grub-mkdevicemap", True)
for disk in self.iterdisks():
self.late("grub-install $(readlink -f /dev/{0})"
"".format(disk["id"]), True)
self.late("update-grub", True)
def expose_recipe(self):
return " \\\n".join(self.recipe())
def expose_late(self, gzip=False):
result = ""
for line, in_target in self.late():
line_to_append = "{0}{1}".format(
("in-target " if in_target else ""), line)
result += line_to_append + ";\\\n"
result += ("echo '{0}' | logger;\\\n"
"".format(re.sub("'", "'\"'\"'", line_to_append)))
return result.rstrip()
def expose_early(self):
result = ""
for line in self.early():
line_to_append = "{0}".format(line)
result += line_to_append + ";\\\n"
result += ("echo '{0}' | logger;\\\n"
"".format(re.sub("'", "'\"'\"'", line_to_append)))
return result.rstrip()
def expose_disks(self):
return "$(readlink -f {0})".format(self.os_disk)
def pm(data):
pmanager = PManager(data)
pmanager.eval()
return pmanager.expose()
example = """
[
{
"name": "sda",
"free_space": 101772,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"type": "raid",
"size": 200
},
{
"type": "lvm_meta_pool",
"size": 0
},
{
"size": 12352,
"type": "pv",
"lvm_meta_size": 64,
"vg": "os"
},
{
"size": 89548,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "disk/by-path/pci-0000:00:06.0-scsi-0:0:0:0",
"size": 102400
},
{
"name": "sdb",
"free_space": 101772,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"type": "raid",
"size": 200
},
{
"type": "lvm_meta_pool",
"size": 64
},
{
"size": 0,
"type": "pv",
"lvm_meta_size": 0,
"vg": "os"
},
{
"size": 101836,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "disk/by-path/pci-0000:00:06.0-scsi-0:0:1:0",
"size": 102400
},
{
"min_size": 12288,
"type": "vg",
"id": "os",
"volumes": [
{
"mount": "/",
"type": "lv",
"name": "root",
"size": 10240
},
{
"mount": "swap",
"type": "lv",
"name": "swap",
"size": 2048
}
],
"label": "Base System"
},
{
"min_size": 5120,
"type": "vg",
"id": "image",
"volumes": [
{
"mount": "/var/lib/glance",
"type": "lv",
"name": "glance",
"size": 191256
}
],
"label": "Image Storage"
}
]
"""
# pmanager = PreseedPManager(example)
# pmanager.eval()
# print pmanager.expose_late()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.