gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#
# Copyright John Reid 2006
#
import numpy, numpy.random
from _maths import *
def reverse_complement( s ):
result = numpy.zeros_like( s )
for i in xrange( len( s ) ):
result[ len(s) - i - 1 ] = 3 - s[i]
return result
class GappedPssm( object ):
def __init__(
self,
phi,
varphi,
K,
alpha = [ 1.0, 1.0 ]
):
"""Generate a pssm from a base distribution
K: PSSM length
phi: dirichlet prior for theta
varphi: dirichlet prior for pi
alpha: prior on gamma parameter
"""
self.theta = numpy.empty( (K+1,4), numpy.float64 )
for j in xrange( K + 1 ):
if 0 == j: self.theta[j,:] = sample_from_dirichlet( varphi )
else: self.theta[j,:] = sample_from_dirichlet( phi )
self.h = numpy.random.randint(0, K - 1)
self.gamma = numpy.random.beta( alpha[0], alpha[1] )
self.K = K
def __str__( self ):
return 'Theta:\n%s\nh:%d\ngamma:%f' % (
str( self.theta ),
self.h,
self.gamma
)
def sample_from( self ):
has_gap = bool( numpy.random.random() > self.gamma ) # does it have a gap?
result = numpy.zeros( self.K + 1, dtype = numpy.int32 )
# fill in the pssm bases
for i in xrange( self.K ):
if has_gap and i > self.h: idx = i + 1
else: idx = i
result[ idx ] = sample_from_discrete( self.theta[i+1] )
# fill in the gap base
if has_gap: gap_base = self.h + 1
else: gap_base = self.K
result[ gap_base ] = sample_from_discrete( self.theta[0] )
return result, has_gap
def generate_synthetic_sequence( pi, L ):
"""Generate one sequence
pi: sequence distribution
L: length
"""
return numpy.array(
[
sample_from_discrete( pi )
for i in xrange( L )
],
dtype = numpy.int32
)
def base_to_str( base ):
"""Converts 0,1,2,3 to A,C,G,T"""
if 0 == base: return 'A'
if 1 == base: return 'C'
if 2 == base: return 'G'
if 3 == base: return 'T'
raise RuntimeError( 'Bad base: %d' % base )
def seq_to_str( seq ):
"""Our sequences are held as arrays, this converts to A,C,G,T strings"""
return ''.join( [ base_to_str(s) for s in seq ] )
def place_binding_site_in_sequence( seq, pssm ):
"""Replaces part of a sequence with a binding site from the pssm
seq: sequence
pssm: pssm
returns (s,g) where s is the position the site starts at and g is whether
there is a gap
"""
sample_seq, has_gap = pssm.sample_from()
rev_comp = bool( numpy.random.random() > 0.5 ) # is it reverse complemented?
if rev_comp: sample_seq = reverse_complement( sample_seq )
s = numpy.random.randint( 0, len( seq ) - pssm.K ) # where in sequence?
# replace sequence
seq[s:s+pssm.K+1] = sample_seq
return s, has_gap, rev_comp
class ModelSample( object ):
def __init__(
self,
phi,
varphi,
K,
N,
av_length,
alpha = [ 1.0, 1.0 ],
verbose = False
):
"""Generate some synthetic sequences
N: number of sequences
K: length of PSSM
av_length: expected length
phi: prior for pssm
varphi: prior for background
alpha: prior for gamma
"""
if verbose:
print 'Creating PSSM of length %d' % K
self.pssm = GappedPssm( K = K, phi = phi, varphi = varphi, alpha = alpha )
if verbose:
print 'Creating %d sequences of average length %d' % (N, av_length)
length = 0
while length < K + 1:
length = numpy.random.poisson( av_length )
self.seqs = [
generate_synthetic_sequence(
self.pssm.theta[0,:],
length
)
for n in xrange( N )
]
self.locations = []
self.has_gap = []
self.rev_comp = []
self.ungapped_sites = []
for n in xrange( N ):
s, g, rev_comp = place_binding_site_in_sequence( self.seqs[n], self.pssm )
self.locations.append( s )
self.has_gap.append( g )
self.rev_comp.append( rev_comp )
# Calculate the pssm that would be generated if the sites were known
gapped_site = self.seqs[n][s:s+K+1]
if rev_comp: gapped_site = reverse_complement( gapped_site )
if g:
ungapped_site = numpy.concatenate(
(
gapped_site[:1+self.pssm.h],
gapped_site[2+self.pssm.h:]
)
)
else:
ungapped_site = gapped_site[:-1]
# print g, self.pssm.h, gapped_site, ungapped_site
assert len( ungapped_site ) == self.pssm.K
self.ungapped_sites.append( ungapped_site )
def dist_of_sites( self ):
"""The distribution of bases at the actual sites in the sequences
This will not be the same as the actual pssm
"""
return dist_from_seqs( self.ungapped_sites )
def __str__( self ):
return (
'Gapped Pssm Model Sample:\n'
'Pssm: %s\n'
'# sequences: %d\n'
'Lengths: %s\n'
'Starts: %s\n'
'Has gap: %s'
) % (
str( self.pssm ),
len( self.seqs ),
','.join( [ str(len(s)) for s in self.seqs ] ),
','.join( [ str(l) for l in self.locations ] ),
','.join( [ str(g) for g in self.has_gap ] ),
)
def dist_from_seqs( seqs ):
if not len( seqs ): return numpy.array((), dtype = numpy.float64)
K = len( seqs[0] )
result = numpy.zeros( (K,4), dtype = numpy.float64 )
for i in xrange( K ):
for s in seqs:
assert s[i] < 4
result[i,s[i]] += 1.0
return result / len( seqs )
|
|
from common_fixtures import * # NOQA
def test_host_deactivate(super_client, new_context):
host = new_context.host
agent = super_client.reload(host).agent()
assert host.state == 'active'
agent = super_client.wait_success(agent)
assert agent.state == 'active'
host = super_client.wait_success(host.deactivate())
assert host.state == 'inactive'
agent = super_client.wait_success(agent)
assert agent.state == 'active'
def test_host_deactivate_two_hosts(super_client, new_context):
host = new_context.host
agent = super_client.reload(host).agent()
assert host.state == 'active'
agent = super_client.wait_success(agent)
assert agent.state == 'active'
# Create another host using the same agent
other_host = super_client.create_host(hostname='test',
agentId=agent.id)
other_host = super_client.wait_success(other_host)
assert other_host.state == 'active'
assert other_host.agentId == agent.id
host = super_client.wait_success(host.deactivate())
assert host.state == 'inactive'
agent = super_client.wait_success(agent)
assert agent.state == 'active'
def test_host_activate(super_client, new_context):
host = new_context.host
agent = super_client.reload(host).agent()
assert host.state == 'active'
agent = super_client.wait_success(agent)
assert agent.state == 'active'
host = super_client.wait_success(host.deactivate())
assert host.state == 'inactive'
agent = super_client.wait_success(agent)
assert agent.state == 'active'
host = super_client.wait_success(host.activate())
assert host.state == 'active'
agent = super_client.wait_success(agent)
assert agent.state == 'active'
def test_host_purge(super_client, new_context):
account_id = new_context.project.id
image_uuid = 'sim:{}'.format(random_num())
host = new_context.host
phy_host = super_client.reload(host).physicalHost()
agent = super_client.reload(host).agent()
assert host.state == 'active'
agent = super_client.wait_success(agent)
assert agent.state == 'active'
c1 = super_client.create_container(accountId=account_id,
imageUuid=image_uuid,
requestedHostId=host.id)
c1 = super_client.wait_success(c1)
assert c1.state == 'running'
c2 = super_client.create_container(accountId=account_id,
imageUuid=image_uuid,
requestedHostId=host.id)
c2 = super_client.wait_success(c2)
assert c2.state == 'running'
host = super_client.wait_success(host.deactivate())
host = super_client.wait_success(super_client.delete(host))
assert host.removed is not None
agent = super_client.by_id_agent(host.agentId)
agent = super_client.wait_success(agent)
wait_for(lambda: super_client.reload(agent).state == 'removed')
phy_host = super_client.wait_success(phy_host)
assert phy_host.removed is not None
c1 = super_client.wait_success(c1)
assert c1.removed is not None
c2 = super_client.wait_success(c2)
assert c2.removed is not None
def test_host_container_actions_inactive(new_context):
host = new_context.host
client = new_context.client
c = new_context.create_container()
host = client.wait_success(host.deactivate())
assert host.state == 'inactive'
c = client.wait_success(c.stop())
assert c.state == 'stopped'
c = client.wait_success(c.start())
assert c.state == 'running'
def test_host_create_container_inactive(new_context):
client = new_context.client
host = new_context.host
host = client.wait_success(host.deactivate())
assert host.state == 'inactive'
c = new_context.create_container_no_success()
assert c.transitioning == 'error'
def test_host_create_container_requested_inactive(new_context):
client = new_context.client
host = new_context.host
host = client.wait_success(host.deactivate())
assert host.state == 'inactive'
c = new_context.create_container(requestedHostId=host.id)
c = client.wait_success(c)
assert c.state == 'running'
def test_host_agent_state(super_client, new_context):
agent = super_client.reload(new_context.host).agent()
agent = super_client.wait_success(agent)
assert agent.state == 'active'
host = new_context.client.reload(new_context.host)
assert host.state == 'active'
agent = super_client.wait_success(agent.deactivate())
host = super_client.reload(host)
assert agent.state == 'inactive'
assert host.state == 'inactive'
assert agent.state == host.agentState
agent = super_client.wait_success(agent.activate())
host = new_context.client.reload(new_context.host)
assert host.state == 'active'
assert agent.state == 'active'
assert agent.state == host.agentState
def test_host_remove(super_client, new_context):
client = new_context.client
container = new_context.create_container()
host = super_client.reload(new_context.host)
pool = find_one(host.storagePools)
agent = host.agent()
agent_account = agent.account()
phy_host = new_context.client.wait_success(host.physicalHost())
key = find_one(super_client.list_register, key=agent.data.registrationKey)
instances = host.instances()
assert len(instances) == 1
assert container.state == 'running'
assert host.state == 'active'
assert pool.state == 'active'
assert agent.state == 'active'
assert agent_account.state == 'active'
assert phy_host.state == 'active'
assert key.state == 'active'
assert key.secretKey is not None
host = client.wait_success(host.deactivate())
assert host.state == 'inactive'
host = client.wait_success(client.delete(host))
assert host.removed is not None
agent = super_client.wait_success(agent)
wait_for(lambda: super_client.reload(agent).removed is not None)
pool = super_client.wait_success(pool)
assert pool.removed is not None
phy_host = super_client.wait_success(phy_host)
assert phy_host.removed is not None
key = super_client.wait_success(key)
assert key.removed is not None
agent_account = super_client.wait_success(agent_account)
assert agent_account.removed is not None
container = super_client.wait_success(container)
assert container.removed is not None
for c in instances:
c = super_client.wait_success(c)
assert c.removed is not None
def test_host_dockersocket(context, client):
host = client.reload(context.host)
dockersocket = host.dockersocket()
assert dockersocket.token.index('.') > 0
assert '/v1/dockersocket/' in dockersocket.url
def test_host_dockersocket_inactive(context, client):
host = client.wait_success(context.host.deactivate())
dockersocket = host.dockersocket()
assert dockersocket.token.index('.') > 0
assert '/v1/dockersocket/' in dockersocket.url
|
|
"""
Wrap the internal caffe C++ module (_caffe.so) with a clean, Pythonic
interface.
"""
from collections import OrderedDict
from itertools import izip_longest
import numpy as np
from ._caffe import Net, SGDSolver
import caffe.io
# We directly update methods from Net here (rather than using composition or
# inheritance) so that nets created by caffe (e.g., by SGDSolver) will
# automatically have the improved interface.
@property
def _Net_blobs(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
blobs indexed by name
"""
return OrderedDict([(bl.name, bl) for bl in self._blobs])
@property
def _Net_params(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
parameters indexed by name; each is a list of multiple blobs (e.g.,
weights and biases)
"""
return OrderedDict([(lr.name, lr.blobs) for lr in self.layers
if len(lr.blobs) > 0])
def _Net_forward(self, blobs=None, **kwargs):
"""
Forward pass: prepare inputs and run the net forward.
Take
blobs: list of blobs to return in addition to output blobs.
kwargs: Keys are input blob names and values are blob ndarrays.
For formatting inputs for Caffe, see Net.preprocess().
If None, input is taken from data layers.
Give
outs: {blob name: blob ndarray} dict.
"""
if blobs is None:
blobs = []
if kwargs:
if set(kwargs.keys()) != set(self.inputs):
raise Exception('Input blob arguments do not match net inputs.')
# Set input according to defined shapes and make arrays single and
# C-contiguous as Caffe expects.
for in_, blob in kwargs.iteritems():
if blob.shape[0] != self.blobs[in_].num:
raise Exception('Input is not batch sized')
if blob.ndim != 4:
raise Exception('{} blob is not 4-d'.format(in_))
self.blobs[in_].data[...] = blob
self._forward()
# Unpack blobs to extract
outs = {out: self.blobs[out].data for out in set(self.outputs + blobs)}
return outs
def _Net_backward(self, diffs=None, **kwargs):
"""
Backward pass: prepare diffs and run the net backward.
Take
diffs: list of diffs to return in addition to bottom diffs.
kwargs: Keys are output blob names and values are diff ndarrays.
If None, top diffs are taken from forward loss.
Give
outs: {blob name: diff ndarray} dict.
"""
if diffs is None:
diffs = []
if kwargs:
if set(kwargs.keys()) != set(self.outputs):
raise Exception('Top diff arguments do not match net outputs.')
# Set top diffs according to defined shapes and make arrays single and
# C-contiguous as Caffe expects.
for top, diff in kwargs.iteritems():
if diff.shape[0] != self.blobs[top].num:
raise Exception('Diff is not batch sized')
if diff.ndim != 4:
raise Exception('{} diff is not 4-d'.format(top))
self.blobs[top].diff[...] = diff
self._backward()
# Unpack diffs to extract
outs = {out: self.blobs[out].diff for out in set(self.inputs + diffs)}
return outs
def _Net_forward_all(self, blobs=None, **kwargs):
"""
Run net forward in batches.
Take
blobs: list of blobs to extract as in forward()
kwargs: Keys are input blob names and values are blob ndarrays.
Refer to forward().
Give
all_outs: {blob name: list of blobs} dict.
"""
# Collect outputs from batches
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
for batch in self._batch(kwargs):
outs = self.forward(blobs=blobs, **batch)
for out, out_blob in outs.iteritems():
all_outs[out].extend(out_blob.copy())
# Package in ndarray.
for out in all_outs:
all_outs[out] = np.asarray(all_outs[out])
# Discard padding.
pad = len(all_outs.itervalues().next()) - len(kwargs.itervalues().next())
if pad:
for out in all_outs:
all_outs[out] = all_outs[out][:-pad]
return all_outs
def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs):
"""
Run net forward + backward in batches.
Take
blobs: list of blobs to extract as in forward()
diffs: list of diffs to extract as in backward()
kwargs: Keys are input (for forward) and output (for backward) blob names
and values are ndarrays. Refer to forward() and backward().
Prefilled variants are called for lack of input or output blobs.
Give
all_blobs: {blob name: blob ndarray} dict.
all_diffs: {blob name: diff ndarray} dict.
"""
# Batch blobs and diffs.
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
all_diffs = {diff: [] for diff in set(self.inputs + (diffs or []))}
forward_batches = self._batch({in_: kwargs[in_]
for in_ in self.inputs if in_ in kwargs})
backward_batches = self._batch({out: kwargs[out]
for out in self.outputs if out in kwargs})
# Collect outputs from batches (and heed lack of forward/backward batches).
for fb, bb in izip_longest(forward_batches, backward_batches, fillvalue={}):
batch_blobs = self.forward(blobs=blobs, **fb)
batch_diffs = self.backward(diffs=diffs, **bb)
for out, out_blobs in batch_blobs.iteritems():
all_outs[out].extend(out_blobs)
for diff, out_diffs in batch_diffs.iteritems():
all_diffs[diff].extend(out_diffs)
# Package in ndarray.
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = np.asarray(all_outs[out])
all_diffs[diff] = np.asarray(all_diffs[diff])
# Discard padding at the end and package in ndarray.
pad = len(all_outs.itervalues().next()) - len(kwargs.itervalues().next())
if pad:
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = all_outs[out][:-pad]
all_diffs[diff] = all_diffs[diff][:-pad]
return all_outs, all_diffs
def _Net_set_mean(self, input_, mean_f, mode='elementwise'):
"""
Set the mean to subtract for data centering.
Take
input_: which input to assign this mean.
mean_f: path to mean .npy with ndarray (input dimensional or broadcastable)
mode: elementwise = use the whole mean (and check dimensions)
channel = channel constant (e.g. mean pixel instead of mean image)
"""
if not hasattr(self, 'mean'):
self.mean = {}
if input_ not in self.inputs:
raise Exception('Input not in {}'.format(self.inputs))
in_shape = self.blobs[input_].data.shape
mean = np.load(mean_f)
if mode == 'elementwise':
if mean.shape != in_shape[1:]:
# Resize mean (which requires H x W x K input in range [0,1]).
m_min, m_max = mean.min(), mean.max()
normal_mean = (mean - m_min) / (m_max - m_min)
mean = caffe.io.resize_image(normal_mean.transpose((1,2,0)),
in_shape[2:]).transpose((2,0,1)) * (m_max - m_min) + m_min
self.mean[input_] = mean
elif mode == 'channel':
self.mean[input_] = mean.mean(1).mean(1).reshape((in_shape[1], 1, 1))
else:
raise Exception('Mode not in {}'.format(['elementwise', 'channel']))
def _Net_set_input_scale(self, input_, scale):
"""
Set the input feature scaling factor s.t. input blob = input * scale.
Take
input_: which input to assign this scale factor
scale: scale coefficient
"""
if not hasattr(self, 'input_scale'):
self.input_scale = {}
if input_ not in self.inputs:
raise Exception('Input not in {}'.format(self.inputs))
self.input_scale[input_] = scale
def _Net_set_channel_swap(self, input_, order):
"""
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
Take
input_: which input to assign this channel order
order: the order to take the channels.
(2,1,0) maps RGB to BGR for example.
"""
if not hasattr(self, 'channel_swap'):
self.channel_swap = {}
if input_ not in self.inputs:
raise Exception('Input not in {}'.format(self.inputs))
self.channel_swap[input_] = order
def _Net_preprocess(self, input_name, input_):
"""
Format input for Caffe:
- convert to single
- resize to input dimensions (preserving number of channels)
- scale feature
- reorder channels (for instance color to BGR)
- subtract mean
- transpose dimensions to K x H x W
Take
input_name: name of input blob to preprocess for
input_: (H' x W' x K) ndarray
Give
caffe_inputs: (K x H x W) ndarray
"""
caffe_in = input_.astype(np.float32)
input_scale = self.input_scale.get(input_name)
channel_order = self.channel_swap.get(input_name)
mean = self.mean.get(input_name)
in_size = self.blobs[input_name].data.shape[2:]
if caffe_in.shape[:2] != in_size:
caffe_in = caffe.io.resize_image(caffe_in, in_size)
if input_scale:
caffe_in *= input_scale
if channel_order:
caffe_in = caffe_in[:, :, channel_order]
caffe_in = caffe_in.transpose((2, 0, 1))
if mean is not None:
caffe_in -= mean
return caffe_in
def _Net_deprocess(self, input_name, input_):
"""
Invert Caffe formatting; see Net.preprocess().
"""
decaf_in = input_.copy().squeeze()
input_scale = self.input_scale.get(input_name)
channel_order = self.channel_swap.get(input_name)
mean = self.mean.get(input_name)
if mean is not None:
decaf_in += mean
decaf_in = decaf_in.transpose((1,2,0))
if channel_order:
channel_order_inverse = [channel_order.index(i)
for i in range(decaf_in.shape[2])]
decaf_in = decaf_in[:, :, channel_order_inverse]
if input_scale:
decaf_in /= input_scale
return decaf_in
def _Net_set_input_arrays(self, data, labels):
"""
Set input arrays of the in-memory MemoryDataLayer.
(Note: this is only for networks declared with the memory data layer.)
"""
if labels.ndim == 1:
labels = np.ascontiguousarray(labels[:, np.newaxis, np.newaxis,
np.newaxis])
return self._set_input_arrays(data, labels)
def _Net_batch(self, blobs):
"""
Batch blob lists according to net's batch size.
Take
blobs: Keys blob names and values are lists of blobs (of any length).
Naturally, all the lists should have the same length.
Give (yield)
batch: {blob name: list of blobs} dict for a single batch.
"""
num = len(blobs.itervalues().next())
batch_size = self.blobs.itervalues().next().num
remainder = num % batch_size
num_batches = num / batch_size
# Yield full batches.
for b in range(num_batches):
i = b * batch_size
yield {name: blobs[name][i:i + batch_size] for name in blobs}
# Yield last padded batch, if any.
if remainder > 0:
padded_batch = {}
for name in blobs:
padding = np.zeros((batch_size - remainder,)
+ blobs[name].shape[1:])
padded_batch[name] = np.concatenate([blobs[name][-remainder:],
padding])
yield padded_batch
# Attach methods to Net.
Net.blobs = _Net_blobs
Net.params = _Net_params
Net.forward = _Net_forward
Net.backward = _Net_backward
Net.forward_all = _Net_forward_all
Net.forward_backward_all = _Net_forward_backward_all
Net.set_mean = _Net_set_mean
Net.set_input_scale = _Net_set_input_scale
Net.set_channel_swap = _Net_set_channel_swap
Net.preprocess = _Net_preprocess
Net.deprocess = _Net_deprocess
Net.set_input_arrays = _Net_set_input_arrays
Net._batch = _Net_batch
|
|
# Copyright (c) 2013 Mirantis Inc.
# Copyright (c) 2013 Hortonworks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for executing commands on nodes via SSH.
The main access point is method get_remote(instance), it returns
InstanceInteropHelper object which does the actual work. See the
class for the list of available methods.
It is a context manager, so it could be used with 'with' statement
like that:
with get_remote(instance) as r:
r.execute_command(...)
Note that the module offloads the ssh calls to a child process.
It was implemented that way because we found no way to run paramiko
and eventlet together. The private high-level module methods are
implementations which are run in a separate process.
"""
import copy
import os
import shlex
import sys
import threading
import time
from eventlet.green import subprocess as e_subprocess
from eventlet import semaphore
from eventlet import timeout as e_timeout
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import uuidutils
import paramiko
import requests
from requests import adapters
import six
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.service import trusts
from sahara.utils import crypto
from sahara.utils import network as net_utils
from sahara.utils.openstack import neutron
from sahara.utils import procutils
from sahara.utils import remote
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
ssh_config_options = [
cfg.IntOpt(
'ssh_timeout_common', default=300, min=1,
help="Overrides timeout for common ssh operations, in seconds"),
cfg.IntOpt(
'ssh_timeout_interactive', default=1800, min=1,
help="Overrides timeout for interactive ssh operations, in seconds"),
cfg.IntOpt(
'ssh_timeout_files', default=600, min=1,
help="Overrides timeout for ssh operations with files, in seconds"),
]
CONF.register_opts(ssh_config_options)
_ssh = None
_proxy_ssh = None
_sessions = {}
INFRA = None
SSH_TIMEOUTS_MAPPING = {
'_execute_command': 'ssh_timeout_common',
'_execute_command_interactive': 'ssh_timeout_interactive'
}
_global_remote_semaphore = None
def _get_access_ip(instance):
if CONF.proxy_command and CONF.proxy_command_use_internal_ip:
return instance.internal_ip
return instance.management_ip
def _default_timeout(func):
timeout = SSH_TIMEOUTS_MAPPING.get(func.__name__, 'ssh_timeout_files')
return getattr(CONF, timeout, CONF.ssh_timeout_common)
def _get_ssh_timeout(func, timeout):
return timeout if timeout else _default_timeout(func)
def _connect(host, username, private_key, proxy_command=None,
gateway_host=None, gateway_image_username=None):
global _ssh
global _proxy_ssh
LOG.debug('Creating SSH connection')
if isinstance(private_key, six.string_types):
private_key = crypto.to_paramiko_private_key(private_key)
_ssh = paramiko.SSHClient()
_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
proxy = None
if proxy_command:
LOG.debug('Creating proxy using command: {command}'.format(
command=proxy_command))
proxy = paramiko.ProxyCommand(proxy_command)
if gateway_host:
_proxy_ssh = paramiko.SSHClient()
_proxy_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
LOG.debug('Connecting to proxy gateway at: {gateway}'.format(
gateway=gateway_host))
_proxy_ssh.connect(gateway_host, username=gateway_image_username,
pkey=private_key, sock=proxy)
proxy = _proxy_ssh.get_transport().open_session()
proxy.exec_command("nc {0} 22".format(host))
_ssh.connect(host, username=username, pkey=private_key, sock=proxy)
def _cleanup():
global _ssh
global _proxy_ssh
_ssh.close()
if _proxy_ssh:
_proxy_ssh.close()
def _read_paramimko_stream(recv_func):
result = b''
buf = recv_func(1024)
while buf != b'':
result += buf
buf = recv_func(1024)
return result
def _escape_quotes(command):
command = command.replace('\\', '\\\\')
command = command.replace('"', '\\"')
command = command.replace('`', '\\`')
return command
def _execute_command(cmd, run_as_root=False, get_stderr=False,
raise_when_error=True):
global _ssh
chan = _ssh.get_transport().open_session()
if run_as_root:
chan.exec_command('sudo bash -c "%s"' % _escape_quotes(cmd))
else:
chan.exec_command(cmd)
# TODO(dmitryme): that could hang if stderr buffer overflows
stdout = _read_paramimko_stream(chan.recv)
stderr = _read_paramimko_stream(chan.recv_stderr)
if type(stdout) == bytes:
stdout = stdout.decode('utf-8')
if type(stderr) == bytes:
stderr = stderr.decode('utf-8')
ret_code = chan.recv_exit_status()
if ret_code and raise_when_error:
raise ex.RemoteCommandException(cmd=cmd, ret_code=ret_code,
stdout=stdout, stderr=stderr)
if get_stderr:
return ret_code, stdout, stderr
else:
return ret_code, stdout
def _execute_command_interactive(cmd, run_as_root=False):
global _ssh
chan = _ssh.get_transport().open_session()
if run_as_root:
chan.exec_command('sudo bash -c "%s"' % _escape_quotes(cmd))
else:
chan.exec_command(cmd)
_proxy_shell(chan)
_ssh.close()
def _proxy_shell(chan):
def readall():
while True:
d = sys.stdin.read(1)
if not d or chan.exit_status_ready():
break
chan.send(d)
reader = threading.Thread(target=readall)
reader.start()
while True:
data = chan.recv(256)
if not data or chan.exit_status_ready():
break
sys.stdout.write(data)
sys.stdout.flush()
def _get_http_client(host, port, proxy_command=None, gateway_host=None,
gateway_username=None, gateway_private_key=None):
global _sessions
_http_session = _sessions.get((host, port), None)
LOG.debug('Cached HTTP session for {host}:{port} is {session}'.format(
host=host, port=port, session=_http_session))
if not _http_session:
if gateway_host:
_http_session = _get_proxy_gateway_http_session(
gateway_host, gateway_username,
gateway_private_key, host, port, proxy_command)
LOG.debug('Created ssh proxied HTTP session for {host}:{port}'
.format(host=host, port=port))
elif proxy_command:
# can return a new session here because it actually uses
# the same adapter (and same connection pools) for a given
# host and port tuple
_http_session = _get_proxied_http_session(
proxy_command, host, port=port)
LOG.debug('Created proxied HTTP session for {host}:{port}'
.format(host=host, port=port))
else:
# need to cache the sessions that are not proxied through
# HTTPRemoteWrapper so that a new session with a new HTTPAdapter
# and associated pools is not recreated for each HTTP invocation
_http_session = requests.Session()
LOG.debug('Created standard HTTP session for {host}:{port}'
.format(host=host, port=port))
adapter = requests.adapters.HTTPAdapter()
for prefix in ['http://', 'https://']:
_http_session.mount(prefix + '%s:%s' % (host, port),
adapter)
LOG.debug('Caching session {session} for {host}:{port}'
.format(session=_http_session, host=host, port=port))
_sessions[(host, port)] = _http_session
return _http_session
def _write_fl(sftp, remote_file, data):
try:
write_data = paramiko.py3compat.StringIO(data)
except TypeError:
write_data = paramiko.py3compat.BytesIO(data)
sftp.putfo(write_data, remote_file)
def _append_fl(sftp, remote_file, data):
fl = sftp.file(remote_file, 'a')
fl.write(data)
fl.close()
def _write_file(sftp, remote_file, data, run_as_root):
if run_as_root:
temp_file = 'temp-file-%s' % uuidutils.generate_uuid()
_write_fl(sftp, temp_file, data)
_execute_command(
'mv %s %s' % (temp_file, remote_file), run_as_root=True)
else:
_write_fl(sftp, remote_file, data)
def _append_file(sftp, remote_file, data, run_as_root):
if run_as_root:
temp_file = 'temp-file-%s' % uuidutils.generate_uuid()
_write_fl(sftp, temp_file, data)
_execute_command(
'cat %s >> %s' % (temp_file, remote_file), run_as_root=True)
_execute_command('rm -f %s' % temp_file)
else:
_append_fl(sftp, remote_file, data)
def _prepend_file(sftp, remote_file, data, run_as_root):
if run_as_root:
temp_file = 'temp-file-%s' % uuidutils.generate_uuid()
temp_remote_file = 'temp-remote-file-%s' % uuidutils.generate_uuid()
_write_fl(sftp, temp_file, data)
_execute_command(
'cat %s > %s' % (remote_file, temp_remote_file))
_execute_command(
'cat %s %s > %s' % (
temp_file, temp_remote_file, remote_file), run_as_root=True)
_execute_command('rm -f %s %s' % (temp_file, temp_remote_file))
def _write_file_to(remote_file, data, run_as_root=False):
global _ssh
_write_file(_ssh.open_sftp(), remote_file, data, run_as_root)
def _write_files_to(files, run_as_root=False):
global _ssh
sftp = _ssh.open_sftp()
for fl, data in six.iteritems(files):
_write_file(sftp, fl, data, run_as_root)
def _append_to_file(remote_file, data, run_as_root=False):
global _ssh
_append_file(_ssh.open_sftp(), remote_file, data, run_as_root)
def _append_to_files(files, run_as_root=False):
global _ssh
sftp = _ssh.open_sftp()
for fl, data in six.iteritems(files):
_append_file(sftp, fl, data, run_as_root)
def _prepend_to_file(remote_file, data, run_as_root=False):
global _ssh
_prepend_file(_ssh.open_sftp(), remote_file, data, run_as_root)
def _prepend_to_files(files, run_as_root=False):
global _ssh
sftp = _ssh.open_sftp()
for fl, data in six.iteritems(files):
_prepend_file(sftp, fl, data, run_as_root)
def _read_file(sftp, remote_file):
fl = sftp.file(remote_file, 'r')
data = fl.read()
fl.close()
try:
return data.decode('utf-8')
except Exception:
return data
def _read_file_from(remote_file, run_as_root=False):
global _ssh
fl = remote_file
if run_as_root:
fl = 'temp-file-%s' % (uuidutils.generate_uuid())
_execute_command('cp %s %s' % (remote_file, fl), run_as_root=True)
try:
return _read_file(_ssh.open_sftp(), fl)
except IOError:
LOG.error("Can't read file {filename}".format(filename=remote_file))
raise
finally:
if run_as_root:
_execute_command(
'rm %s' % fl, run_as_root=True, raise_when_error=False)
def _get_python_to_execute():
try:
_execute_command('python3 --version')
except Exception:
_execute_command('python2 --version')
return 'python2'
return 'python3'
def _get_os_distrib():
python_version = _get_python_to_execute()
return _execute_command(
('printf "import platform\nprint(platform.linux_distribution('
'full_distribution_name=0)[0])" | {}'.format(python_version)),
run_as_root=False)[1].lower().strip()
def _get_os_version():
python_version = _get_python_to_execute()
return _execute_command(
('printf "import platform\nprint(platform.linux_distribution()[1])"'
' | {}'.format(python_version)), run_as_root=False)[1].strip()
def _install_packages(packages):
distrib = _get_os_distrib()
if distrib == 'ubuntu':
cmd = 'RUNLEVEL=1 apt-get install -y %(pkgs)s'
elif distrib == 'fedora':
fversion = _get_os_version()
if fversion >= 22:
cmd = 'dnf install -y %(pkgs)s'
else:
cmd = 'yum install -y %(pkgs)s'
elif distrib in ('redhat', 'centos'):
cmd = 'yum install -y %(pkgs)s'
else:
raise ex.NotImplementedException(
_('Package Installation'),
_('%(fmt)s is not implemented for OS %(distrib)s') % {
'fmt': '%s', 'distrib': distrib})
cmd = cmd % {'pkgs': ' '.join(packages)}
_execute_command(cmd, run_as_root=True)
def _update_repository():
distrib = _get_os_distrib()
if distrib == 'ubuntu':
cmd = 'apt-get update'
elif distrib == 'fedora':
fversion = _get_os_version()
if fversion >= 22:
cmd = 'dnf clean all'
else:
cmd = 'yum clean all'
elif distrib in ('redhat', 'centos'):
cmd = 'yum clean all'
else:
raise ex.NotImplementedException(
_('Repository Update'),
_('%(fmt)s is not implemented for OS %(distrib)s') % {
'fmt': '%s', 'distrib': distrib})
_execute_command(cmd, run_as_root=True)
def _replace_remote_string(remote_file, old_str, new_str):
old_str = old_str.replace("\'", "\''")
new_str = new_str.replace("\'", "\''")
cmd = "sudo sed -i 's,%s,%s,g' %s" % (old_str, new_str, remote_file)
_execute_command(cmd)
def _replace_remote_line(remote_file, old_line_with_start_string, new_line):
search_string = old_line_with_start_string.replace("\'", "\''")
cmd = ("sudo sed -i 's/^%s.*/%s/' %s" % (search_string,
new_line, remote_file))
_execute_command(cmd)
def _execute_on_vm_interactive(cmd, matcher):
global _ssh
buf = ''
channel = _ssh.invoke_shell()
LOG.debug('Channel is {channel}'.format(channel=channel))
try:
LOG.debug('Sending cmd {command}'.format(command=cmd))
channel.send(cmd + '\n')
while not matcher.is_eof(buf):
buf += channel.recv(4096)
response = matcher.get_response(buf)
if response is not None:
channel.send(response + '\n')
buf = ''
finally:
LOG.debug('Closing channel')
channel.close()
def _acquire_remote_semaphore():
context.current().remote_semaphore.acquire()
_global_remote_semaphore.acquire()
def _release_remote_semaphore():
_global_remote_semaphore.release()
context.current().remote_semaphore.release()
def _get_proxied_http_session(proxy_command, host, port=None):
session = requests.Session()
adapter = ProxiedHTTPAdapter(
_simple_exec_func(shlex.split(proxy_command)), host, port)
session.mount('http://{0}:{1}'.format(host, adapter.port), adapter)
return session
def _get_proxy_gateway_http_session(gateway_host, gateway_username,
gateway_private_key, host, port=None,
proxy_command=None):
session = requests.Session()
adapter = ProxiedHTTPAdapter(
_proxy_gateway_func(gateway_host, gateway_username,
gateway_private_key, host,
port, proxy_command),
host, port)
session.mount('http://{0}:{1}'.format(host, port), adapter)
return session
def _simple_exec_func(cmd):
def func():
return e_subprocess.Popen(cmd,
stdin=e_subprocess.PIPE,
stdout=e_subprocess.PIPE,
stderr=e_subprocess.PIPE)
return func
def _proxy_gateway_func(gateway_host, gateway_username,
gateway_private_key, host,
port, proxy_command):
def func():
proc = procutils.start_subprocess()
try:
conn_params = (gateway_host, gateway_username, gateway_private_key,
proxy_command, None, None)
procutils.run_in_subprocess(proc, _connect, conn_params)
cmd = "nc {host} {port}".format(host=host, port=port)
procutils.run_in_subprocess(
proc, _execute_command_interactive, (cmd,), interactive=True)
return proc
except Exception:
with excutils.save_and_reraise_exception():
procutils.shutdown_subprocess(proc, _cleanup)
return func
class ProxiedHTTPAdapter(adapters.HTTPAdapter):
def __init__(self, create_process_func, host, port):
super(ProxiedHTTPAdapter, self).__init__()
LOG.debug('HTTP adapter created for {host}:{port}'.format(host=host,
port=port))
self.create_process_func = create_process_func
self.port = port
self.host = host
def get_connection(self, url, proxies=None):
pool_conn = (
super(ProxiedHTTPAdapter, self).get_connection(url, proxies))
if hasattr(pool_conn, '_get_conn'):
http_conn = pool_conn._get_conn()
if http_conn.sock is None:
if hasattr(http_conn, 'connect'):
sock = self._connect()
LOG.debug('HTTP connection {connection} getting new '
'netcat socket {socket}'.format(
connection=http_conn, socket=sock))
http_conn.sock = sock
else:
if hasattr(http_conn.sock, 'is_netcat_socket'):
LOG.debug('Pooled http connection has existing '
'netcat socket. resetting pipe')
http_conn.sock.reset()
pool_conn._put_conn(http_conn)
return pool_conn
def close(self):
LOG.debug('Closing HTTP adapter for {host}:{port}'
.format(host=self.host, port=self.port))
super(ProxiedHTTPAdapter, self).close()
def _connect(self):
LOG.debug('Returning netcat socket for {host}:{port}'
.format(host=self.host, port=self.port))
rootwrap_command = CONF.rootwrap_command if CONF.use_rootwrap else ''
return NetcatSocket(self.create_process_func, rootwrap_command)
class NetcatSocket(object):
def _create_process(self):
self.process = self.create_process_func()
def __init__(self, create_process_func, rootwrap_command=None):
self.create_process_func = create_process_func
self.rootwrap_command = rootwrap_command
self._create_process()
def send(self, content):
try:
self.process.stdin.write(content)
self.process.stdin.flush()
except IOError as e:
raise ex.SystemError(e)
return len(content)
def sendall(self, content):
return self.send(content)
def makefile(self, mode, *arg):
if mode.startswith('r'):
return self.process.stdout
if mode.startswith('w'):
return self.process.stdin
raise ex.IncorrectStateError(_("Unknown file mode %s") % mode)
def recv(self, size):
try:
return os.read(self.process.stdout.fileno(), size)
except IOError as e:
raise ex.SystemError(e)
def _terminate(self):
if self.rootwrap_command:
os.system('{0} kill {1}'.format(self.rootwrap_command, # nosec
self.process.pid))
else:
self.process.terminate()
def close(self):
LOG.debug('Socket close called')
self._terminate()
def settimeout(self, timeout):
pass
def fileno(self):
return self.process.stdin.fileno()
def is_netcat_socket(self):
return True
def reset(self):
self._terminate()
self._create_process()
class InstanceInteropHelper(remote.Remote):
def __init__(self, instance):
self.instance = instance
def __enter__(self):
_acquire_remote_semaphore()
try:
self.bulk = BulkInstanceInteropHelper(self.instance)
return self.bulk
except Exception:
with excutils.save_and_reraise_exception():
_release_remote_semaphore()
def __exit__(self, *exc_info):
try:
self.bulk.close()
finally:
_release_remote_semaphore()
def get_neutron_info(self, instance=None):
if not instance:
instance = self.instance
neutron_info = dict()
neutron_info['network'] = instance.cluster.neutron_management_network
ctx = context.current()
neutron_info['token'] = context.get_auth_token()
neutron_info['tenant'] = ctx.tenant_name
neutron_info['host'] = _get_access_ip(instance)
log_info = copy.deepcopy(neutron_info)
del log_info['token']
LOG.debug('Returning neutron info: {info}'.format(info=log_info))
return neutron_info
def _build_proxy_command(self, command, instance=None, port=None,
info=None, rootwrap_command=None):
# Accepted keywords in the proxy command template:
# {host}, {port}, {tenant_id}, {network_id}, {router_id}
keywords = {}
if not info:
info = self.get_neutron_info(instance)
keywords['tenant_id'] = context.current().tenant_id
keywords['network_id'] = info['network']
# Query Neutron only if needed
if '{router_id}' in command:
auth = trusts.get_os_admin_auth_plugin(instance.cluster)
client = neutron.NeutronClient(info['network'], info['token'],
info['tenant'], auth=auth)
keywords['router_id'] = client.get_router()
keywords['host'] = _get_access_ip(instance)
keywords['port'] = port
try:
command = command.format(**keywords)
except KeyError as e:
LOG.error('Invalid keyword in proxy_command: {result}'.format(
result=e))
# Do not give more details to the end-user
raise ex.SystemError('Misconfiguration')
if rootwrap_command:
command = '{0} {1}'.format(rootwrap_command, command)
return command
def _get_conn_params(self):
host_ng = self.instance.node_group
cluster = host_ng.cluster
access_instance = self.instance
proxy_gateway_node = cluster.get_proxy_gateway_node()
gateway_host = None
gateway_image_username = None
if proxy_gateway_node and not host_ng.is_proxy_gateway:
# tmckay-fp in other words, if we are going to connect
# through the proxy instead of the node we are actually
# trying to reach
# okay, the node group that supplies the proxy gateway
# must have fps, but if a proxy is used the other
# nodes are not required to have an fp.
# so, this instance is assumed not to have a floating
# ip and we are going to get to it through the proxy
access_instance = proxy_gateway_node
gateway_host = proxy_gateway_node.management_ip
ng = proxy_gateway_node.node_group
gateway_image_username = ng.image_username
proxy_command = None
if CONF.proxy_command:
# Build a session through a user-defined socket
proxy_command = CONF.proxy_command
# tmckay-fp we have the node_group for the instance right here
# okay, this test here whether access_instance.management_ip is an
# fp -- just compare to internal?
# in the neutron case, we check the node group for the
# access_instance and look for fp
elif CONF.use_namespaces and not net_utils.has_floating_ip(
access_instance):
# Build a session through a netcat socket in the Neutron namespace
proxy_command = (
'ip netns exec qrouter-{router_id} nc {host} {port}')
# proxy_command is currently a template, turn it into a real command
# i.e. dereference {host}, {port}, etc.
if proxy_command:
rootwrap = CONF.rootwrap_command if CONF.use_rootwrap else ''
proxy_command = self._build_proxy_command(
proxy_command, instance=access_instance, port=22,
info=None, rootwrap_command=rootwrap)
host_ip = _get_access_ip(self.instance)
return (host_ip,
host_ng.image_username,
cluster.management_private_key,
proxy_command,
gateway_host,
gateway_image_username)
def _run(self, func, *args, **kwargs):
proc = procutils.start_subprocess()
try:
procutils.run_in_subprocess(proc, _connect,
self._get_conn_params())
return procutils.run_in_subprocess(proc, func, args, kwargs)
except Exception:
with excutils.save_and_reraise_exception():
procutils.shutdown_subprocess(proc, _cleanup)
finally:
procutils.shutdown_subprocess(proc, _cleanup)
def _run_with_log(self, func, timeout, description, *args, **kwargs):
start_time = time.time()
try:
with e_timeout.Timeout(timeout,
ex.TimeoutException(timeout,
op_name=description)):
return self._run(func, *args, **kwargs)
finally:
self._log_command('"%s" took %.1f seconds to complete' % (
description, time.time() - start_time))
def _run_s(self, func, timeout, description, *args, **kwargs):
timeout = _get_ssh_timeout(func, timeout)
_acquire_remote_semaphore()
try:
return self._run_with_log(func, timeout,
description, *args, **kwargs)
finally:
_release_remote_semaphore()
def get_http_client(self, port, info=None):
self._log_command('Retrieving HTTP session for {0}:{1}'.format(
_get_access_ip(self.instance), port))
host_ng = self.instance.node_group
cluster = host_ng.cluster
access_instance = self.instance
access_port = port
proxy_gateway_node = cluster.get_proxy_gateway_node()
gateway_host = None
gateway_username = None
gateway_private_key = None
if proxy_gateway_node and not host_ng.is_proxy_gateway:
access_instance = proxy_gateway_node
access_port = 22
gateway_host = proxy_gateway_node.management_ip
gateway_username = proxy_gateway_node.node_group.image_username
gateway_private_key = cluster.management_private_key
proxy_command = None
if CONF.proxy_command:
# Build a session through a user-defined socket
proxy_command = CONF.proxy_command
# tmckay-fp again we can check the node group for the instance
# what are the implications for nova here? None.
# This is a test on whether access_instance has a floating_ip
# in the neutron case, we check the node group for the
# access_instance and look for fp
elif (CONF.use_namespaces and not net_utils.has_floating_ip(
access_instance)):
# need neutron info
if not info:
info = self.get_neutron_info(access_instance)
# Build a session through a netcat socket in the Neutron namespace
proxy_command = (
'ip netns exec qrouter-{router_id} nc {host} {port}')
# proxy_command is currently a template, turn it into a real command
# i.e. dereference {host}, {port}, etc.
if proxy_command:
rootwrap = CONF.rootwrap_command if CONF.use_rootwrap else ''
proxy_command = self._build_proxy_command(
proxy_command, instance=access_instance, port=access_port,
info=info, rootwrap_command=rootwrap)
return _get_http_client(_get_access_ip(self.instance), port,
proxy_command, gateway_host,
gateway_username,
gateway_private_key)
def close_http_session(self, port):
global _sessions
host = _get_access_ip(self.instance)
self._log_command(_("Closing HTTP session for %(host)s:%(port)s") % {
'host': host, 'port': port})
session = _sessions.get((host, port), None)
if session is None:
raise ex.NotFoundException(
{'host': host, 'port': port},
_('Session for %(host)s:%(port)s not cached'))
session.close()
del _sessions[(host, port)]
def execute_command(self, cmd, run_as_root=False, get_stderr=False,
raise_when_error=True, timeout=None):
description = _('Executing "%s"') % cmd
self._log_command(description)
return self._run_s(_execute_command, timeout, description,
cmd, run_as_root, get_stderr, raise_when_error)
def write_file_to(self, remote_file, data, run_as_root=False,
timeout=None):
description = _('Writing file "%s"') % remote_file
self._log_command(description)
self._run_s(_write_file_to, timeout, description,
remote_file, data, run_as_root)
def write_files_to(self, files, run_as_root=False, timeout=None):
description = _('Writing files "%s"') % list(files)
self._log_command(description)
self._run_s(_write_files_to, timeout, description, files, run_as_root)
def append_to_file(self, r_file, data, run_as_root=False, timeout=None):
description = _('Appending to file "%s"') % r_file
self._log_command(description)
self._run_s(_append_to_file, timeout, description,
r_file, data, run_as_root)
def append_to_files(self, files, run_as_root=False, timeout=None):
description = _('Appending to files "%s"') % list(files)
self._log_command(description)
self._run_s(_append_to_files, timeout, description, files, run_as_root)
def prepend_to_file(self, r_file, data, run_as_root=False, timeout=None):
description = _('Prepending to file "%s"') % r_file
self._log_command(description)
self._run_s(_prepend_to_file, timeout, description,
r_file, data, run_as_root)
def read_file_from(self, remote_file, run_as_root=False, timeout=None):
description = _('Reading file "%s"') % remote_file
self._log_command(description)
return self._run_s(_read_file_from, timeout, description,
remote_file, run_as_root)
def get_python_version(self, timeout=None):
return self._run_s(
_get_python_to_execute, timeout, "get_python_version")
def get_os_distrib(self, timeout=None):
return self._run_s(_get_os_distrib, timeout, "get_os_distrib")
def get_os_version(self, timeout=None):
return self._run_s(_get_os_version, timeout, "get_os_version")
def install_packages(self, packages, timeout=None):
description = _('Installing packages "%s"') % list(packages)
self._log_command(description)
self._run_s(_install_packages, timeout, description, packages)
def update_repository(self, timeout=None):
description = _('Updating repository')
self._log_command(description)
self._run_s(_update_repository, timeout, description)
def replace_remote_string(self, remote_file, old_str, new_str,
timeout=None):
description = _('In file "%(file)s" replacing string '
'"%(old_string)s" with "%(new_string)s"') % {
"file": remote_file,
"old_string": old_str, "new_string": new_str}
self._log_command(description)
self._run_s(_replace_remote_string, timeout, description,
remote_file, old_str, new_str)
def replace_remote_line(self, remote_file,
old_line_with_start_string,
new_line, timeout=None):
description = _('In file "%(file)s" replacing line'
' beginning with string '
'"%(old_line_with_start_string)s"'
' with "%(new_line)s"') % {
"file": remote_file,
"old_line_with_start_string": old_line_with_start_string,
"new_line": new_line}
self._log_command(description)
self._run_s(_replace_remote_line, timeout, description,
remote_file, old_line_with_start_string, new_line)
def execute_on_vm_interactive(self, cmd, matcher, timeout=None):
"""Runs given command and responds to prompts.
'cmd' is a command to execute.
'matcher' is an object which provides responses on command's
prompts. It should have two methods implemented:
* get_response(buf) - returns response on prompt if it is
found in 'buf' string, which is a part of command output.
If no prompt is found, the method should return None.
* is_eof(buf) - returns True if current 'buf' indicates that
the command is finished. False should be returned
otherwise.
"""
description = _('Executing interactively "%s"') % cmd
self._log_command(description)
self._run_s(_execute_on_vm_interactive, timeout,
description, cmd, matcher)
def _log_command(self, str):
with context.set_current_instance_id(self.instance.instance_id):
LOG.debug(str)
class BulkInstanceInteropHelper(InstanceInteropHelper):
def __init__(self, instance):
super(BulkInstanceInteropHelper, self).__init__(instance)
self.proc = procutils.start_subprocess()
try:
procutils.run_in_subprocess(self.proc, _connect,
self._get_conn_params())
except Exception:
with excutils.save_and_reraise_exception():
procutils.shutdown_subprocess(self.proc, _cleanup)
def close(self):
procutils.shutdown_subprocess(self.proc, _cleanup)
def _run(self, func, *args, **kwargs):
return procutils.run_in_subprocess(self.proc, func, args, kwargs)
def _run_s(self, func, timeout, description, *args, **kwargs):
timeout = _get_ssh_timeout(func, timeout)
return self._run_with_log(func, timeout, description, *args, **kwargs)
class SshRemoteDriver(remote.RemoteDriver):
def get_type_and_version(self):
return "ssh.1.0"
def setup_remote(self, engine):
global _global_remote_semaphore
global INFRA
_global_remote_semaphore = semaphore.Semaphore(
CONF.global_remote_threshold)
INFRA = engine
def get_remote(self, instance):
return InstanceInteropHelper(instance)
def get_userdata_template(self):
# SSH does not need any instance customization
return ""
|
|
"""
S. Van Hoey
2016-06-06
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from skimage.feature import canny
from skimage.segmentation import clear_border
from skimage.morphology import dilation, rectangle
from skimage.measure import regionprops
import cv2 as cv
from utils import (calculate_convexity,
calculate_circularity_reciprocal)
CHANNEL_CODE = {'blue': 0, 'green': 1, 'red': 2}
DEFAULT_FILTERS = {'circularity_reciprocal': {'min': 0.2, 'max': 1.6},
'convexity': {'min': 0.92}}
class NotAllowedChannel(Exception):
"""
Exception placeholder for easier debugging.
"""
pass
class Logger(object):
"""
Log the sequence of log statements performed
"""
def __init__(self):
self.log = []
def add_log(self, message):
"""add a log statement to the sequence"""
self.log.append(message)
def get_last_log(self):
return self.log[-1]
def print_log_sequence(self):
print("Steps undertaken since from raw image:")
print("\n".join(self.log))
print("\n")
def clear_log(self):
"""clear all the logs"""
self.log = []
def batchbubblekicker(data_path, channel, pipeline, *args):
"""
Given a folder with processable files and a channel to use, a sequence
of steps class as implemented in the pipelines.py file will be applied on
each of the individual images
:param data_path: folder containing images to process
:param channel: green | red | blue
:param pipeline: class from pipelines.py to use as processing sequence
:param args: arguments required by the pipeline
:return: dictionary with for each file the output binary image
"""
results = {}
for imgfile in os.listdir(data_path):
current_bubbler = pipeline(os.path.join(data_path, imgfile),
channel=channel)
results[imgfile] = current_bubbler.run(*args)
return results
class BubbleKicker(object):
def __init__(self, filename, channel='red'):
"""
This class contains a set of functions that can be applied to a
bubble image in order to derive a binary bubble-image and calculate the
statistics/distribution
:param filename: image file name
:param channel: green | red | blue
"""
self.raw_file = self._read_image(filename)
self.logs = Logger()
self._channel_control(channel)
self._channel = channel
self.raw_image = self.raw_file[:, :, CHANNEL_CODE[self._channel]]
self.current_image = self.raw_image.copy()
@staticmethod
def _read_image(filename):
"""read the image from a file and store
an RGB-image MxNx3
"""
image = cv.imread(filename)
return image
def reset_to_raw(self):
"""make the current image again the raw image"""
self.current_image = self.raw_image.copy()
self.logs.clear_log()
def switch_channel(self, channel):
"""change the color channel"""
self._channel_control(channel)
self._channel = channel
self.raw_image = self.raw_file[:, :, CHANNEL_CODE[self._channel]]
self.current_image = self.raw_image.copy()
self.logs.clear_log()
print("Currently using channel {}".format(self._channel))
def what_channel(self):
"""check the current working channel (R, G or B?)"""
print(self._channel)
@staticmethod
def _channel_control(channel):
"""check if channel is either red, green, blue"""
if channel not in ['red', 'green', 'blue']:
raise NotAllowedChannel('Not a valid channel for '
'RGB color scheme!')
def edge_detect_canny_opencv(self, threshold=[0.01, 0.5]):
"""perform the edge detection algorithm of Canny on the image using
the openCV package. Thresholds are respectively min and max threshodls for building
the gaussian."""
image = cv.Canny(self.current_image,
threshold[0],
threshold[1])
self.current_image = image
self.logs.add_log('edge-detect with thresholds {} -> {} '
'- opencv'.format(threshold[0], threshold[1]))
return image
def edge_detect_canny_skimage(self, sigma=3, threshold=[0.01, 0.5]):
"""perform the edge detection algorithm of Canny on the image using scikit package"""
image = canny(self.current_image,
sigma=sigma,
low_threshold=threshold[0],
high_threshold=threshold[1])
self.current_image = image
# append function to logs
self.logs.add_log('edge-detect with '
'thresholds {} -> {} and sigma {} '
'- skimage'.format(threshold[0],
threshold[1],
sigma))
return image
def adaptive_threshold_opencv(self, blocksize=91, cvalue=18):
"""
perform the edge detection algorithm of Canny on the image using an
adaptive threshold method for which the user can specify width of the
window of action and a C value used as reference for building
the gaussian distribution. This function uses the openCV package
Parameters
----------
blocksize:
cvalue:
"""
image = cv.adaptiveThreshold(self.current_image, 1,
cv.ADAPTIVE_THRESH_GAUSSIAN_C,
cv.THRESH_BINARY, blocksize, cvalue)
self.current_image = image
self.logs.add_log('adaptive threshold bubble detection '
'with blocksize {} and cvalue {} '
'- opencv'.format(blocksize, cvalue))
return image
def dilate_opencv(self, footprintsize=3):
"""perform the dilation of the image"""
# set up structuring element with footprintsize
kernel = np.ones((footprintsize, footprintsize), np.uint8)
# perform algorithm with given environment,
# store in same memory location
image = cv.dilate(self.current_image, kernel, iterations=1)
# update current image
self.current_image = image
# append function to logs
self.logs.add_log('dilate with footprintsize {} '
'- opencv'.format(footprintsize))
return image
def dilate_skimage(self):
"""perform the dilation of the image"""
# set up structuring element
# (@Giacomo, is (1, 90) and (1, 0) different? using rectangle here...
struct_env = rectangle(1, 1)
# perform algorithm with given environment,
# store in same memory location
image = dilation(self.current_image, selem=struct_env,
out=self.current_image)
# update current image
self.current_image = image
# append function to logs
self.logs.add_log('dilate - skimage')
return image
def fill_holes_opencv(self):
"""fill the holes of the image"""
# perform algorithm
h, w = self.current_image.shape[:2] # stores image sizes
mask = np.zeros((h + 2, w + 2), np.uint8)
# floodfill operates on the saved image itself
cv.floodFill(self.current_image, mask, (0, 0), 0)
# append function to logs
self.logs.add_log('fill holes - opencv')
return self.current_image
def clear_border_skimage(self, buffer_size=3, bgval=1):
"""clear the borders of the image using a belt of pixels definable in buffer_size and
asign a pixel value of bgval
Parameters
----------
buffer_size: int
indicates the belt of pixels around the image border that should be considered to
eliminate touching objects (default is 3)
bgvalue: int
all touching objects are set to this value (default is 1)
"""
# perform algorithm
image_inv = cv.bitwise_not(self.current_image)
image = clear_border(image_inv, buffer_size=buffer_size, bgval=bgval)
# update current image
self.current_image = image
# append function to logs
self.logs.add_log('clear border with buffer size {} and bgval {} '
'- skimage'.format(buffer_size, bgval))
return image
def erode_opencv(self, footprintsize=1):
"""erode detected edges with a given footprint. This function is meant to be used after dilation of the edges so to reset the original edge."""
kernel = np.ones((footprintsize, footprintsize), np.uint8)
image = cv.erode(self.current_image, kernel, iterations=1)
# update current image
self.current_image = image
# append function to logs
self.logs.add_log('erode with footprintsize {} '
'- opencv'.format(footprintsize))
return image
def what_have_i_done(self):
""" print the current log statements as a sequence of
performed steps"""
self.logs.print_log_sequence()
def plot(self):
"""plot the current image"""
fig, ax = plt.subplots()
ax.imshow(self.current_image, cmap=plt.cm.gray)
if len(self.logs.log) > 0:
ax.set_title(self.logs.log[-1])
return fig, ax
def _bubble_properties_table(binary_image):
"""provide a label for each bubble in the image"""
nbubbles, marker_image = cv.connectedComponents(1 - binary_image)
props = regionprops(marker_image)
bubble_properties = \
pd.DataFrame([{"label": bubble.label,
"area": bubble.area,
"centroid": bubble.centroid,
"convex_area": bubble.convex_area,
"equivalent_diameter": bubble.equivalent_diameter,
"perimeter": bubble.perimeter} for bubble in props])
bubble_properties["convexity"] = \
calculate_convexity(bubble_properties["perimeter"],
bubble_properties["area"])
bubble_properties["circularity_reciprocal"] = \
calculate_circularity_reciprocal(bubble_properties["perimeter"],
bubble_properties["area"])
bubble_properties = bubble_properties.set_index("label")
return nbubbles, marker_image, bubble_properties
def _bubble_properties_filter(property_table, id_image,
rules=DEFAULT_FILTERS):
"""exclude bubbles based on a set of rules
:return:
"""
bubble_props = property_table.copy()
all_ids = bubble_props.index.tolist()
for prop_name, ruleset in rules.items():
print(ruleset)
for rule, value in ruleset.items():
if rule == 'min':
bubble_props = \
bubble_props[bubble_props[prop_name] > value]
elif rule == 'max':
bubble_props = \
bubble_props[bubble_props[prop_name] < value]
else:
raise Exception("Rule not supported, "
"use min or max as filter")
removed_ids = [el for el in all_ids if el
not in bubble_props.index.tolist()]
for idb in removed_ids:
id_image[id_image == idb] = 0
return id_image, bubble_props
def bubble_properties_calculate(binary_image,
rules=DEFAULT_FILTERS):
"""
:param binary_image:
:param rules:
:return:
"""
# get the bubble identifications and properties
nbubbles, id_image, \
prop_table = _bubble_properties_table(binary_image)
# filter based on the defined rules
id_image, properties = _bubble_properties_filter(prop_table,
id_image, rules)
return id_image, properties
def bubble_properties_plot(property_table,
which_property="equivalent_diameter",
bins=20):
"""calculate and create the distribution plot"""
fontsize_labels = 14.
formatter = FuncFormatter(
lambda y, pos: "{:d}%".format(int(round(y * 100))))
fig, ax1 = plt.subplots()
ax1.hist(property_table[which_property], bins,
normed=0, cumulative=False, histtype='bar',
color='gray', ec='white')
ax1.get_xaxis().tick_bottom()
# left axis - histogram
ax1.set_ylabel(r'Frequency', color='gray',
fontsize=fontsize_labels)
ax1.spines['top'].set_visible(False)
# right axis - cumul distribution
ax2 = ax1.twinx()
ax2.hist(property_table[which_property],
bins, normed=1, cumulative=True,
histtype='step', color='k', linewidth= 3.)
ax2.yaxis.set_major_formatter(formatter)
ax2.set_ylabel(r'Cumulative percentage (%)', color='k',
fontsize=fontsize_labels)
ax2.spines['top'].set_visible(False)
ax2.set_ylim(0, 1.)
# additional options
ax1.set_xlim(0, property_table[which_property].max())
ax1.tick_params(axis='x', which='both', pad=10)
ax1.set_xlabel(which_property)
return fig, (ax1, ax2)
|
|
from xscontainer import util
from xscontainer.util import log
import os
import tempfile
import threading
import XenAPI
XSCONTAINER_PRIVATE_SECRET_UUID = 'xscontainer-private-secret-uuid'
XSCONTAINER_PUBLIC_SECRET_UUID = 'xscontainer-public-secret-uuid'
XSCONTAINER_SSH_HOSTKEY = 'xscontainer-sshhostkey'
XSCONTAINER_USERNAME = 'xscontainer-username'
XSCONTAINER_MODE = 'xscontainer-mode'
NULLREF = 'OpaqueRef:NULL'
GLOBAL_XAPI_SESSION = None
GLOBAL_XAPI_SESSION_LOCK = threading.Lock()
def refresh_session_on_failure(func):
"""
Decorator method for refreshing the local session object if an exception
is raised during the API call.
"""
def decorated(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exception:
log.error("Caught exception '%s'. Retrying with new session."
% (str(exception)))
reinit_global_xapi_session()
# Return the func undecorated
return func(*args, **kwargs)
return decorated
class XenAPIClient(object):
def __init__(self, session):
self.session = session
def get_session(self):
return self.session
def get_session_handle(self):
return self.get_session().handle
def get_all_vm_records(self):
return self.get_session().xenapi.VM.get_all_records()
@refresh_session_on_failure
def api_call(self, object_name, method, *args):
method_args = (self.get_session_handle(),) + args
method_name = "%s.%s" % (object_name, method)
res = getattr(self.get_session(), method_name)(*method_args)
return XenAPI._parse_result(res)
class LocalXenAPIClient(XenAPIClient):
"""
Localhost XenAPI client that uses a globally shared session.
"""
def __init__(self):
session = get_local_api_session()
super(LocalXenAPIClient, self).__init__(session)
def get_session(self):
return get_local_api_session()
@refresh_session_on_failure
def api_call(self, object_name, method, *args):
return super(LocalXenAPIClient, self).api_call(object_name, method,
*args)
class XenAPIObject(object):
OBJECT = None
ref = None
uuid = None
def __init__(self, client, ref=None, uuid=None):
if not ref and not uuid:
raise Exception("XenAPI object requires either a ref or a uuid.")
self.client = client
if uuid and not ref:
ref = self.client.api_call(self.OBJECT, "get_by_uuid", uuid)
self.ref = ref
self.uuid = uuid
def get_id(self):
return self.ref
def get_session(self):
return self.client.get_session()
def get_session_handle(self):
return self.get_session().handle
# @todo: for the case when a non-local global session is being used,
# this decorator unnecessarily retries on exception.
@refresh_session_on_failure
def api_call(self, method, *args):
method_args = (self.get_session_handle(), self.ref) + args
method_name = "%s.%s" % (self.OBJECT, method)
res = getattr(self.get_session(), method_name)(*method_args)
return XenAPI._parse_result(res)
def remove_from_other_config(self, key):
return self.api_call("remove_from_other_config", key)
class Host(XenAPIObject):
OBJECT = "Host"
class VM(XenAPIObject):
OBJECT = "VM"
def get_uuid(self):
if self.uuid is None:
self.uuid = self.get_session().xenapi.VM.get_uuid(self.ref)
return self.uuid
def get_other_config(self):
return self.client.get_session().xenapi.VM.get_other_config(self.ref)
def update_other_config(self, key, value):
# session.xenapi.VM.remove_from_other_config(vmref, name)
# session.xenapi.VM.add_to_other_config(vmref, name, value)
other_config = self.get_other_config()
other_config[key] = value
self.client.get_session().xenapi.VM.set_other_config(self.ref,
other_config)
def get_local_api_session():
global GLOBAL_XAPI_SESSION
# Prefer to use a global session object to keep all communication
# with the host on the same ref.
if GLOBAL_XAPI_SESSION is None:
GLOBAL_XAPI_SESSION = init_local_api_session()
return GLOBAL_XAPI_SESSION
def init_local_api_session():
session = XenAPI.xapi_local()
session.xenapi.login_with_password("root", "", "1.0", "xscontainer")
return session
def reinit_global_xapi_session():
global GLOBAL_XAPI_SESSION
# Make threadsafe
GLOBAL_XAPI_SESSION_LOCK.acquire()
GLOBAL_XAPI_SESSION = init_local_api_session()
GLOBAL_XAPI_SESSION_LOCK.release()
log.info("The Global XAPI session has been updated.")
return GLOBAL_XAPI_SESSION
def get_hi_mgmtnet_ref(session):
networkrecords = session.xenapi.network.get_all_records()
for networkref, networkrecord in networkrecords.iteritems():
if networkrecord['bridge'] == 'xenapi':
return networkref
def disable_gw_of_hi_mgmtnet_ref(session):
networkref = get_hi_mgmtnet_ref(session)
other_config = session.xenapi.network.get_other_config(networkref)
other_config['ip_disable_gw'] = 'true'
session.xenapi.network.set_other_config(networkref, other_config)
def get_hi_mgmtnet_device(session, vmuuid):
vmrecord = get_vm_record_by_uuid(session, vmuuid)
mgmtnet_ref = get_hi_mgmtnet_ref(session)
for vmvifref in vmrecord['VIFs']:
vifrecord = session.xenapi.VIF.get_record(vmvifref)
if vifrecord['network'] == mgmtnet_ref:
return vifrecord['device']
def get_vm_ips(session, vmuuid):
vmref = get_vm_ref_by_uuid(session, vmuuid)
guest_metrics = session.xenapi.VM.get_guest_metrics(vmref)
if guest_metrics != NULLREF:
ips = session.xenapi.VM_guest_metrics.get_networks(guest_metrics)
else:
# The VM is probably shut-down
ips = {}
return ips
def get_vm_is_running(session, vm_uuid):
vm_ref = session.xenapi.VM.get_by_uuid(vm_uuid)
vm_record = session.xenapi.VM.get_record(vm_ref)
return (vm_record['power_state'] == 'Running')
def get_hi_preferene_on(session):
pool = session.xenapi.pool.get_all()[0]
other_config = session.xenapi.pool.get_other_config(pool)
if ('xscontainer-use-hostinternalnetwork' in other_config and
(other_config['xscontainer-use-hostinternalnetwork'].lower()
in ['1', 'yes', 'true', 'on'])):
return True
# Return the default
return False
def get_this_host_uuid():
# ToDo: There must be a better way that also works with plugins?!?
uuid = None
filehandler = open("/etc/xensource-inventory", 'r')
try:
for line in filehandler.readlines():
if line.startswith("INSTALLATION_UUID"):
uuid = line.split("'")[1]
break
finally:
filehandler.close()
return uuid
def get_this_host_ref(session):
host_uuid = get_this_host_uuid()
host_ref = session.xenapi.host.get_by_uuid(host_uuid)
return host_ref
def call_plugin(session, hostref, plugin, function, args):
result = session.xenapi.host.call_plugin(hostref, plugin, function, args)
return result
def get_vm_record_by_uuid(session, vmuuid):
vmref = get_vm_ref_by_uuid(session, vmuuid)
vmrecord = session.xenapi.VM.get_record(vmref)
return vmrecord
def get_vm_ref_by_uuid(session, vmuuid):
vmref = session.xenapi.VM.get_by_uuid(vmuuid)
return vmref
def get_vm_records(session):
vmrecords = session.xenapi.VM.get_all_records()
return vmrecords
def get_vm_other_config(session, vmuuid):
vm_ref = get_vm_ref_by_uuid(session, vmuuid)
other_config = session.xenapi.VM.get_other_config(vm_ref)
return other_config
def _retry_device_exists(function, config, devicenumberfield):
devicenumber = 0
config[devicenumberfield] = str(devicenumber)
while True:
try:
ref = function(config)
return ref
except XenAPI.Failure as failure:
if (failure.details[0] != 'DEVICE_ALREADY_EXISTS' or
devicenumber > 20):
raise failure
devicenumber = devicenumber + 1
config[devicenumberfield] = str(devicenumber)
def create_vif(session, network, vmref):
devicenumber = 0
vifconfig = {'device': str(devicenumber),
'network': network,
'VM': vmref,
'MAC': "",
'MTU': "1500",
"qos_algorithm_type": "",
"qos_algorithm_params": {},
"other_config": {}
}
return _retry_device_exists(session.xenapi.VIF.create, vifconfig, 'device')
def create_vbd(session, vmref, vdiref, vbdmode, bootable,
other_config_keys={}):
vbdconf = {'VDI': vdiref,
'VM': vmref,
'userdevice': '1',
'type': 'Disk',
'mode': vbdmode,
'bootable': bootable,
'empty': False,
'other_config': other_config_keys,
'qos_algorithm_type': '',
'qos_algorithm_params': {}, }
return _retry_device_exists(session.xenapi.VBD.create, vbdconf,
'userdevice')
# ToDo: Ugly - this function may modify the file specified as filename
def import_disk(session, sruuid, filename, fileformat, namelabel,
other_config_keys={}):
log.info("import_disk file %s on sr %s" % (filename, sruuid))
targetsr = session.xenapi.SR.get_by_uuid(sruuid)
sizeinb = None
if fileformat == "vhd":
cmd = ['vhd-util', 'query', '-n', filename, '-v']
sizeinmb = util.runlocal(cmd)[1]
sizeinb = int(sizeinmb) * 1024 * 1024
elif fileformat == "raw":
sizeinb = os.path.getsize(filename)
# Workaround: can't otherwise import disks that aren't aligned to 2MB
newsizeinb = sizeinb + \
((2 * 1024 * 1024) - sizeinb % (2 * 1024 * 1024))
if sizeinb < newsizeinb:
log.info('Resizing raw disk from size %d to %d' %
(sizeinb, newsizeinb))
filehandle = open(filename, "r+b")
filehandle.seek(newsizeinb - 1)
filehandle.write("\0")
filehandle.close()
sizeinb = os.path.getsize(filename)
else:
raise Exception('Invalid fileformat: %s ' % fileformat)
log.info("Preparing vdi of size %d" % (sizeinb))
vdiconf = {'SR': targetsr, 'virtual_size': str(sizeinb), 'type': 'system',
'sharable': False, 'read_only': False, 'other_config': {},
'name_label': namelabel}
vdiref = session.xenapi.VDI.create(vdiconf)
other_config = session.xenapi.VDI.get_other_config(vdiref)
for key, value in other_config_keys.iteritems():
other_config[key] = value
session.xenapi.VDI.set_other_config(vdiref, other_config)
vdiuuid = session.xenapi.VDI.get_record(vdiref)['uuid']
cmd = ['curl', '-k', '--upload', filename,
'https://localhost/import_raw_vdi?session_id=%s&vdi=%s&format=%s'
% (session.handle, vdiuuid, fileformat)]
util.runlocal(cmd)
return vdiref
def export_disk(session, vdiuuid):
log.info("export_disk vdi %s" % (vdiuuid))
filename = tempfile.mkstemp(suffix='.raw')[1]
cmd = ['curl', '-L', '-k', '-o', filename,
'https://localhost/export_raw_vdi?session_id=%s&vdi=%s&format=raw'
% (session.handle, vdiuuid)]
util.runlocal(cmd)
return filename
def get_default_sr(session):
pool = session.xenapi.pool.get_all()[0]
default_sr = session.xenapi.pool.get_default_SR(pool)
return default_sr
def get_value_from_vm_other_config(session, vmuuid, name):
other_config = get_vm_other_config(session, vmuuid)
if name in other_config:
return other_config[name]
else:
return None
def update_vm_other_config(session, vm_uuid, kvpairs):
vm_ref = get_vm_ref_by_uuid(session, vm_uuid)
# session.xenapi.VM.remove_from_other_config(vmref, name)
# session.xenapi.VM.add_to_other_config(vmref, name, value)
other_config = session.xenapi.VM.get_other_config(vm_ref)
for key, value in kvpairs.items():
other_config[key] = value
session.xenapi.VM.set_other_config(vm_ref, other_config)
def get_idrsa_secret(session, secret_type):
other_config = get_pool_other_config(session)
if (XSCONTAINER_PRIVATE_SECRET_UUID not in other_config or
XSCONTAINER_PUBLIC_SECRET_UUID not in other_config):
set_idrsa_secret(session)
other_config = get_pool_other_config(session)
secret_uuid = other_config[secret_type]
secret_ref = session.xenapi.secret.get_by_uuid(secret_uuid)
secret_record = session.xenapi.secret.get_record(secret_ref)
return secret_record['value']
def get_idrsa_secret_private(session):
return get_idrsa_secret(session, XSCONTAINER_PRIVATE_SECRET_UUID)
def get_idrsa_secret_public(session):
return get_idrsa_secret(session, XSCONTAINER_PUBLIC_SECRET_UUID)
def get_idrsa_secret_public_keyonly(session):
return get_idrsa_secret_public(session).split(' ')[1]
def set_idrsa_secret(session):
log.info("set_idrsa_secret is generating a new secret")
(privateidrsa, publicidrsa) = util.create_idrsa()
set_pool_other_config_values(session, {
XSCONTAINER_PRIVATE_SECRET_UUID:
create_secret_return_uuid(session, privateidrsa),
XSCONTAINER_PUBLIC_SECRET_UUID:
create_secret_return_uuid(session, publicidrsa)
})
def create_secret_return_uuid(session, value):
secret_ref = session.xenapi.secret.create({'value': value})
secret_record = session.xenapi.secret.get_record(secret_ref)
return secret_record['uuid']
def get_pool_other_config(session):
pool_ref = session.xenapi.pool.get_all()[0]
other_config = session.xenapi.pool.get_other_config(pool_ref)
return other_config
def set_pool_other_config_values(session, values_to_set):
other_config = get_pool_other_config(session)
for key, value in values_to_set.iteritems():
other_config[key] = value
pool_ref = session.xenapi.pool.get_all()[0]
session.xenapi.pool.set_other_config(pool_ref, other_config)
def get_suitable_vm_ips(session, vmuuid, port):
ips = get_vm_ips(session, vmuuid)
stage1filteredips = []
for address in ips.itervalues():
if ':' not in address:
# If we get here - it's ipv4
if address.startswith('169.254.'):
# we prefer host internal networks and put them at the front
stage1filteredips.insert(0, address)
else:
stage1filteredips.append(address)
else:
# Ignore ipv6 as Dom0 won't be able to use it
pass
ipfound = False
for address in stage1filteredips:
if util.test_connection(address, port):
ipfound = True
yield address
if not ipfound:
raise util.XSContainerException(
"No valid IP found for vmuuid %s" % (vmuuid))
def get_suitable_vm_ip(session, vmuuid, port):
return get_suitable_vm_ips(session, vmuuid, port).next()
def get_vm_xscontainer_username(session, vmuuid):
username = get_value_from_vm_other_config(session, vmuuid,
XSCONTAINER_USERNAME)
if username is None:
# assume CoreOs's "core" by default
username = 'core'
return username
def set_vm_xscontainer_username(session, vmuuid, newusername):
update_vm_other_config(
session, vmuuid, {XSCONTAINER_USERNAME: newusername})
def get_vm_xscontainer_mode(session, vmuuid):
mode = get_value_from_vm_other_config(session, vmuuid,
XSCONTAINER_MODE)
if mode is None:
# default so ssh as that is what came first
mode = 'ssh'
assert mode in ('ssh', 'tls')
return mode
def set_vm_xscontainer_mode(session, vmuuid, mode):
assert mode in ('ssh', 'tls')
update_vm_other_config(session, vmuuid, {XSCONTAINER_MODE: mode})
def send_message(session, vm_uuid, title, body):
message_prio_warning = "3"
message_type_vm = "VM"
message_ref = session.xenapi.message.create(title, message_prio_warning,
message_type_vm, vm_uuid, body)
return message_ref
def destroy_message(session, message_ref):
session.xenapi.message.destroy(message_ref)
def get_ssh_hostkey(session, vm_uuid):
return get_value_from_vm_other_config(session, vm_uuid,
XSCONTAINER_SSH_HOSTKEY)
def set_ssh_hostkey(session, vm_uuid, host_key):
update_vm_other_config(session, vm_uuid,
{XSCONTAINER_SSH_HOSTKEY: host_key})
def get_host_ref_for_sr_uuid(session, sr_uuid):
sr_ref = session.xenapi.SR.get_by_uuid(sr_uuid)
return get_host_ref_for_sr_ref(session, sr_ref)
def get_host_ref_for_sr_ref(session, sr_ref):
pbd_refs = session.xenapi.SR.get_PBDs(sr_ref)
host_ref = None
for pbd_ref in pbd_refs:
pbd_record = session.xenapi.PBD.get_record(pbd_ref)
if pbd_record['currently_attached']:
host_ref = pbd_record['host']
break
return host_ref
def get_host_ref_for_vdi_uuid(session, vdi_uuid):
vdi_ref = session.xenapi.VDI.get_by_uuid(vdi_uuid)
vdi_record = session.xenapi.VDI.get_record(vdi_ref)
return get_host_ref_for_sr_ref(session, vdi_record['SR'])
def get_host_ref_for_vm_uuid(session, vm_uuid):
vm_record = get_vm_record_by_uuid(session, vm_uuid)
host_ref = None
if 'resident_on' in vm_record and vm_record['resident_on'] != NULLREF:
host_ref = vm_record['resident_on']
return host_ref
def get_cd_vbd_ref(session, vm_uuid):
vm_record = get_vm_record_by_uuid(session, vm_uuid)
for vbd_ref in vm_record['VBDs']:
vbd_record = session.xenapi.VBD.get_record(vbd_ref)
if vbd_record['type'] == 'CD':
return vbd_ref
return None
def get_first_sr_uuid(session, vm_uuid):
""" Returns the SR of the first VBD of a VM. May be used as a suggestion
to place further disks for a VM, as the SR can asserted to be
reachable by the VM. It is not necessarilly the SR of the rootdisk of
the VM."""
vm_record = get_vm_record_by_uuid(session, vm_uuid)
for vbd_ref in vm_record['VBDs']:
vbd_record = session.xenapi.VBD.get_record(vbd_ref)
if vbd_record['type'] == 'Disk' and vbd_record['VDI'] != NULLREF:
vdi_ref = vbd_record['VDI']
vdi_record = session.xenapi.VDI.get_record(vdi_ref)
if vdi_record['SR'] != NULLREF:
sr_ref = vdi_record['SR']
sr_record = session.xenapi.SR.get_record(sr_ref)
return sr_record['uuid']
return None
|
|
from bottle import request, response
from six.moves.urllib.parse import quote
import os
import datetime
from webrecorder.basecontroller import BaseController, wr_api_spec
from webrecorder.webreccork import ValidationException
from webrecorder.models.base import DupeNameException
from webrecorder.models.datshare import DatShare
from webrecorder.utils import get_bool
from pywb.warcserver.index.cdxobject import CDXObject
# ============================================================================
class CollsController(BaseController):
def __init__(self, *args, **kwargs):
super(CollsController, self).__init__(*args, **kwargs)
config = kwargs['config']
self.solr_mgr = kwargs.get('solr_mgr')
self.allow_external = get_bool(os.environ.get('ALLOW_EXTERNAL', False))
self.is_search_auto = get_bool(os.environ.get('SEARCH_AUTO', False))
def init_routes(self):
wr_api_spec.set_curr_tag('Collections')
@self.app.post('/api/v1/collections')
@self.api(query=['user'],
req=['title', 'public', 'public_index'],
resp='collection')
def create_collection():
user = self.get_user(api=True, redir_check=False)
data = request.json or {}
title = data.get('title', '')
coll_name = self.sanitize_title(title)
if not coll_name:
self._raise_error(400, 'invalid_coll_name')
is_public = data.get('public', False)
is_public_index = data.get('public_index', False)
is_external = data.get('external', False)
is_anon = self.access.is_anon(user)
if is_external:
if not self.allow_external:
self._raise_error(403, 'external_not_allowed')
#if not is_anon:
# self._raise_error(400, 'not_valid_for_external')
elif is_anon:
if coll_name != 'temp':
self._raise_error(400, 'invalid_temp_coll_name')
if user.has_collection(coll_name):
self._raise_error(400, 'duplicate_name')
try:
collection = user.create_collection(coll_name, title=title,
desc='', public=is_public,
public_index=is_public_index)
if is_external:
collection.set_external(True)
# if auto-indexing is on, mark new collections as auto-indexed to distinguish from prev collections
if self.is_search_auto:
collection.set_bool_prop('autoindexed', True)
user.mark_updated()
self.flash_message('Created collection <b>{0}</b>!'.format(collection.get_prop('title')), 'success')
resp = {'collection': collection.serialize()}
except DupeNameException as de:
self._raise_error(400, 'duplicate_name')
except Exception as ve:
print(ve)
self.flash_message(str(ve))
self._raise_error(400, 'duplicate_name')
return resp
@self.app.get('/api/v1/collections')
@self.api(query=['user', 'include_recordings', 'include_lists', 'include_pages'],
resp='collections')
def get_collections():
user = self.get_user(api=True, redir_check=False)
kwargs = {'include_recordings': get_bool(request.query.get('include_recordings')),
'include_lists': get_bool(request.query.get('include_lists')),
'include_pages': get_bool(request.query.get('include_pages')),
}
collections = user.get_collections()
return {'collections': [coll.serialize(**kwargs) for coll in collections]}
@self.app.get('/api/v1/collection/<coll_name>')
@self.api(query=['user'],
resp='collection')
def get_collection(coll_name):
user = self.get_user(api=True, redir_check=False)
return self.get_collection_info(coll_name, user=user)
@self.app.delete('/api/v1/collection/<coll_name>')
@self.api(query=['user'],
resp='deleted')
def delete_collection(coll_name):
user, collection = self.load_user_coll(coll_name=coll_name)
errs = user.remove_collection(collection, delete=True)
if errs.get('error'):
return self._raise_error(400, errs['error'])
else:
return {'deleted_id': coll_name}
@self.app.put('/api/v1/collection/<coll_name>/warc')
def add_external_warc(coll_name):
if not self.allow_external:
self._raise_error(403, 'external_not_allowed')
user, collection = self.load_user_coll(coll_name=coll_name)
self.access.assert_can_admin_coll(collection)
if not collection.is_external():
self._raise_error(400, 'external_only')
num_added = collection.add_warcs(request.json.get('warcs', {}))
return {'success': num_added}
@self.app.put('/api/v1/collection/<coll_name>/cdx')
def add_external_cdxj(coll_name):
if not self.allow_external:
self._raise_error(403, 'external_not_allowed')
user, collection = self.load_user_coll(coll_name=coll_name)
self.access.assert_can_admin_coll(collection)
if not collection.is_external():
self._raise_error(400, 'external_only')
num_added = collection.add_cdxj(request.body.read())
return {'success': num_added}
@self.app.post('/api/v1/collection/<coll_name>')
@self.api(query=['user'],
req=['title', 'desc', 'public', 'public_index'],
resp='collection')
def update_collection(coll_name):
user, collection = self.load_user_coll(coll_name=coll_name)
self.access.assert_can_admin_coll(collection)
data = request.json or {}
if 'title' in data:
new_coll_title = data['title']
new_coll_name = self.sanitize_title(new_coll_title)
if not new_coll_name:
self._raise_error(400, 'invalid_coll_name')
try:
new_coll_name = user.colls.rename(collection, new_coll_name, allow_dupe=False)
except DupeNameException as de:
self._raise_error(400, 'duplicate_name')
collection['title'] = new_coll_title
if 'desc' in data:
collection['desc'] = data['desc']
# TODO: notify the user if this is a request from the admin panel
if 'public' in data:
#if self.access.is_superuser() and data.get('notify'):
# pass
collection.set_public(data['public'])
if 'public_index' in data:
collection.set_bool_prop('public_index', data['public_index'])
collection.mark_updated()
return {'collection': collection.serialize()}
@self.app.get('/api/v1/collection/<coll_name>/page_bookmarks')
@self.api(query=['user'],
resp='bookmarks')
def get_page_bookmarks(coll_name):
user, collection = self.load_user_coll(coll_name=coll_name)
rec = request.query.get('rec')
if rec:
recording = collection.get_recording(rec)
if not recording:
return {'page_bookmarks': {}}
rec_pages = collection.list_rec_pages(recording)
else:
rec_pages = None
return {'page_bookmarks': collection.get_all_page_bookmarks(rec_pages)}
@self.app.get('/api/v1/url_search')
def do_url_search():
user, collection = self.load_user_coll()
results = []
search = request.query.getunicode('search', '').lower()
url_query = request.query.getunicode('url', '').lower()
has_query = search or url_query
ts_from = request.query.getunicode('from')
ts_to = request.query.getunicode('to')
date_filter = ts_from and ts_to
if date_filter:
try:
ts_from = int(ts_from)
ts_to = int(ts_to)
except ValueError:
date_filter = False
session = request.query.getunicode('session')
# remove trailing comma,
mimes = request.query.getunicode('mime', '').rstrip(',')
mimes = mimes.split(',') if mimes else []
# search pages or default to page search if no mime supplied
if 'text/html' in mimes or len(mimes) == 0:
try:
mimes.remove('text/html')
except ValueError:
pass
# shortcut empty search
if not has_query and not date_filter and not session:
results = collection.list_pages()
else:
for page in collection.list_pages():
# check for legacy hidden flag
if page.get('hidden', False):
continue
if date_filter:
try:
# trim seconds
ts = int(page['timestamp'][:12])
except ValueError:
continue
if ts < ts_from or ts > ts_to:
continue
if session and page['rec'] != session:
continue
if search and search not in page.get('title', '').lower():
continue
if url_query and url_query not in page['url'].lower():
continue
results.append(page)
# search non-page cdx
if len(mimes):
for line, _ in collection.get_cdxj_iter():
cdxj = CDXObject(line.encode('utf-8'))
if date_filter:
try:
# trim seconds
ts = int(cdxj['timestamp'][:12])
except ValueError:
continue
if ts < ts_from or ts > ts_to:
continue
if search and search not in cdxj['url'].lower():
continue
if url_query and url_query not in cdxj['url'].lower():
continue
if mimes and not any(cdxj['mime'].startswith(mime) for mime in mimes):
continue
results.append({'url': cdxj['url'],
'timestamp': cdxj['timestamp'],
'mime': cdxj['mime']})
return {'results': results}
@self.app.get('/api/v1/text_search')
def do_text_search():
if not self.solr_mgr:
self._raise_error(400, 'not_supported')
user, collection = self.load_user_coll()
return self.solr_mgr.query_solr(collection.my_id, request.query)
# DAT
@self.app.post('/api/v1/collection/<coll_name>/dat/share')
def dat_do_share(coll_name):
user, collection = self.load_user_coll(coll_name=coll_name)
# BETA only
self.require_admin_beta_access(collection)
try:
data = request.json or {}
result = DatShare.dat_share.share(collection, data.get('always_update', False))
except Exception as e:
result = {'error': 'api_error', 'details': str(e)}
if 'error' in result:
self._raise_error(400, result['error'])
return result
@self.app.post('/api/v1/collection/<coll_name>/dat/unshare')
def dat_do_unshare(coll_name):
user, collection = self.load_user_coll(coll_name=coll_name)
# BETA only
self.require_admin_beta_access(collection)
try:
result = DatShare.dat_share.unshare(collection)
except Exception as e:
result = {'error': 'api_error', 'details': str(e)}
if 'error' in result:
self._raise_error(400, result['error'])
return result
@self.app.post('/api/v1/collection/<coll_name>/commit')
def commit_file(coll_name):
user, collection = self.load_user_coll(coll_name=coll_name)
self.access.assert_can_admin_coll(collection)
data = request.json or {}
res = collection.commit_all(data.get('commit_id'))
if not res:
return {'success': True}
else:
return {'commit_id': res}
@self.app.post('/api/v1/collection/<coll_name>/generate_derivs')
def generate_derivs(coll_name):
user, collection = self.load_user_coll(coll_name=coll_name)
self.access.assert_can_admin_coll(collection)
if not self.is_search_auto:
self._raise_error(400, 'not_supported')
title = 'Derivates Regenerated on ' + datetime.datetime.now().isoformat()
derivs_recording = collection.create_recording(title=title,
rec_type='derivs')
res = collection.requeue_pages_for_derivs(derivs_recording.my_id, get_bool(request.query.get('include_existing')))
if res > 0:
collection.set_bool_prop('autoindexed', True)
return {'queued': res}
# LEGACY ENDPOINTS (to remove)
# Collection view (all recordings)
@self.app.get(['/<user>/<coll_name>', '/<user>/<coll_name>/'])
@self.jinja2_view('collection_info.html')
def coll_info(user, coll_name):
return self.get_collection_info_for_view(user, coll_name)
@self.app.get(['/<user>/<coll_name>/<rec_list:re:([\w,-]+)>', '/<user>/<coll_name>/<rec_list:re:([\w,-]+)>/'])
@self.jinja2_view('collection_info.html')
def coll_info(user, coll_name, rec_list):
#rec_list = [self.sanitize_title(title) for title in rec_list.split(',')]
return self.get_collection_info_for_view(user, coll_name)
wr_api_spec.set_curr_tag(None)
def get_collection_info_for_view(self, user, coll_name):
self.redir_host()
result = self.get_collection_info(coll_name, user=user, include_pages=True)
result['coll'] = result['collection']['id']
result['coll_name'] = result['coll']
result['coll_title'] = quote(result['collection']['title'])
#if not result or result.get('error'):
# self._raise_error(404, 'Collection not found')
return result
def get_collection_info(self, coll_name, user=None, include_pages=False):
user, collection = self.load_user_coll(user=user, coll_name=coll_name)
result = {'collection': collection.serialize(include_rec_pages=include_pages,
include_lists=True,
include_recordings=True,
include_pages=True,
check_slug=coll_name)}
result['user'] = user.my_id
result['size_remaining'] = user.get_size_remaining()
return result
|
|
import operator
import re
import warnings
import numpy as np
import pytest
from pandas._libs.sparse import IntIndex
import pandas.util._test_decorators as td
import pandas as pd
from pandas import isna
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert arr.dtype.subtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == SparseDtype(np.float64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
def test_constructor_dtype_str(self):
result = SparseArray([1, 2, 3], dtype="int")
expected = SparseArray([1, 2, 3], dtype=int)
tm.assert_sp_array_equal(result, expected)
def test_constructor_sparse_dtype(self):
result = SparseArray([1, 0, 0, 1], dtype=SparseDtype("int64", -1))
expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype("int64")
def test_constructor_sparse_dtype_str(self):
result = SparseArray([1, 0, 0, 1], dtype="Sparse[int32]")
expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype("int32")
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(["A", "A", np.nan, "B"], dtype=np.object)
assert arr.dtype == SparseDtype(np.object)
assert np.isnan(arr.fill_value)
arr = SparseArray(["A", "A", np.nan, "B"], dtype=np.object, fill_value="A")
assert arr.dtype == SparseDtype(np.object, "A")
assert arr.fill_value == "A"
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == SparseDtype(np.object, False)
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
@pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
def test_constructor_na_dtype(self, dtype):
with pytest.raises(ValueError, match="Cannot convert"):
SparseArray([0, 1, np.nan], dtype=dtype)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
# XXX: Behavior change: specifying SparseIndex no longer changes the
# fill_value
expected = SparseArray([0, 1, 2, 0], kind="integer")
tm.assert_sp_array_equal(arr, expected)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(
data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64,
fill_value=0,
)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(
data=[1, 2], sparse_index=IntIndex(4, [1, 2]), fill_value=0, dtype=np.int64
)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(
data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None,
fill_value=0,
)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize("sparse_index", [None, IntIndex(1, [0])])
def test_constructor_spindex_dtype_scalar(self, sparse_index):
# scalar input
arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
def test_constructor_spindex_dtype_scalar_broadcasts(self):
arr = SparseArray(
data=[1, 2], sparse_index=IntIndex(4, [1, 2]), fill_value=0, dtype=None
)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize(
"data, fill_value",
[
(np.array([1, 2]), 0),
(np.array([1.0, 2.0]), np.nan),
([True, False], False),
([pd.Timestamp("2017-01-01")], pd.NaT),
],
)
def test_constructor_inferred_fill_value(self, data, fill_value):
result = SparseArray(data).fill_value
if pd.isna(fill_value):
assert pd.isna(result)
else:
assert result == fill_value
@pytest.mark.parametrize("format", ["coo", "csc", "csr"])
@pytest.mark.parametrize(
"size",
[pytest.param(0, marks=td.skip_if_np_lt("1.16", reason="NumPy-11383")), 10],
)
@td.skip_if_no_scipy
def test_from_spmatrix(self, size, format):
import scipy.sparse
mat = scipy.sparse.random(size, 1, density=0.5, format=format)
result = SparseArray.from_spmatrix(mat)
result = np.asarray(result)
expected = mat.toarray().ravel()
tm.assert_numpy_array_equal(result, expected)
@td.skip_if_no_scipy
def test_from_spmatrix_raises(self):
import scipy.sparse
mat = scipy.sparse.eye(5, 4, format="csc")
with pytest.raises(ValueError, match="not '4'"):
SparseArray.from_spmatrix(mat)
@pytest.mark.parametrize(
"scalar,dtype",
[
(False, SparseDtype(bool, False)),
(0.0, SparseDtype("float64", 0)),
(1, SparseDtype("int64", 1)),
("z", SparseDtype("object", "z")),
],
)
def test_scalar_with_index_infer_dtype(self, scalar, dtype):
# GH 19163
arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar)
exp = SparseArray([scalar, scalar, scalar], fill_value=scalar)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == dtype
assert exp.dtype == dtype
@pytest.mark.parametrize("fill", [1, np.nan, 0])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip(self, kind, fill):
# see gh-13999
arr = SparseArray([np.nan, 1, np.nan, 2, 3], kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray(
[0, 0, 0, 1, 1, 2], dtype=np.int64, kind=kind, fill_value=fill
)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
@pytest.mark.parametrize("fill", [True, False, np.nan])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip2(self, kind, fill):
# see gh-13999
arr = SparseArray(
[True, False, True, True], dtype=np.bool, kind=kind, fill_value=fill
)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
with pytest.raises(IndexError, match=errmsg):
self.arr[11]
with pytest.raises(IndexError, match=errmsg):
self.arr[-11]
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take_scalar_raises(self):
msg = "'indices' must be an array, not a scalar '2'."
with pytest.raises(ValueError, match=msg):
self.arr.take(2)
def test_take(self):
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
@pytest.mark.parametrize("fill_value", [0, None, np.nan])
def test_shift_fill_value(self, fill_value):
# GH #24128
sparse = SparseArray(np.array([1, 0, 0, 3, 0]), fill_value=8.0)
res = sparse.shift(1, fill_value=fill_value)
if isna(fill_value):
fill_value = res.dtype.na_value
exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]), fill_value=8.0)
tm.assert_sp_array_equal(res, exp)
def test_bad_take(self):
with pytest.raises(IndexError, match="bounds"):
self.arr.take([11])
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# XXX: test change: fill_value=True -> allow_fill=True
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'"
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), allow_fill=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
# XXX: behavior change.
# the old way of filling self.fill_value doesn't follow EA rules.
# It's supposed to be self.dtype.na_value (nan in this case)
expected = SparseArray([0, np.nan, np.nan], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'."
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
# XXX: did the default kind from take change?
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan], kind="block")
tm.assert_sp_array_equal(result, expected)
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan], kind="block")
tm.assert_sp_array_equal(result, expected)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_set_item(self):
def setitem():
self.arr[5] = 3
def setslice():
self.arr[1:5] = 2
with pytest.raises(TypeError, match="assignment via setitem"):
setitem()
with pytest.raises(TypeError, match="assignment via setitem"):
setslice()
def test_constructor_from_too_large_array(self):
with pytest.raises(TypeError, match="expected dimension <= 1 data"):
SparseArray(np.arange(10).reshape((2, 5)))
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
assert res.fill_value == 0
assert_almost_equal(res.sp_values, self.zarr.sp_values)
def test_constructor_copy(self):
cp = SparseArray(self.arr, copy=True)
cp.sp_values[:3] = 0
assert not (self.arr.sp_values[:3] == 0).any()
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
assert (self.arr.sp_values[:3] == 0).all()
def test_constructor_bool(self):
# GH 10648
data = np.array([False, False, True, True, False, False])
arr = SparseArray(data, fill_value=False, dtype=bool)
assert arr.dtype == SparseDtype(bool)
tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices, np.array([2, 3], np.int32))
dense = arr.to_dense()
assert dense.dtype == bool
tm.assert_numpy_array_equal(dense, data)
def test_constructor_bool_fill_value(self):
arr = SparseArray([True, False, True], dtype=None)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True)
assert arr.dtype == SparseDtype(np.bool, True)
assert arr.fill_value
def test_constructor_float32(self):
# GH 10648
data = np.array([1.0, np.nan, 3], dtype=np.float32)
arr = SparseArray(data, dtype=np.float32)
assert arr.dtype == SparseDtype(np.float32)
tm.assert_numpy_array_equal(arr.sp_values, np.array([1, 3], dtype=np.float32))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(
arr.sp_index.indices, np.array([0, 2], dtype=np.int32)
)
dense = arr.to_dense()
assert dense.dtype == np.float32
tm.assert_numpy_array_equal(dense, data)
def test_astype(self):
# float -> float
arr = SparseArray([None, None, 0, 2])
result = arr.astype("Sparse[float32]")
expected = SparseArray([None, None, 0, 2], dtype=np.dtype("float32"))
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("float64", fill_value=0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(
np.array([0.0, 2.0], dtype=dtype.subtype), IntIndex(4, [2, 3]), dtype
)
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("int64", 0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(
np.array([0, 2], dtype=np.int64), IntIndex(4, [2, 3]), dtype
)
tm.assert_sp_array_equal(result, expected)
arr = SparseArray([0, np.nan, 0, 1], fill_value=0)
with pytest.raises(ValueError, match="NA"):
arr.astype("Sparse[i8]")
def test_astype_bool(self):
a = pd.SparseArray([1, 0, 0, 1], dtype=SparseDtype(int, 0))
result = a.astype(bool)
expected = SparseArray([True, 0, 0, True], dtype=SparseDtype(bool, 0))
tm.assert_sp_array_equal(result, expected)
# update fill value
result = a.astype(SparseDtype(bool, False))
expected = SparseArray(
[True, False, False, True], dtype=SparseDtype(bool, False)
)
tm.assert_sp_array_equal(result, expected)
def test_astype_all(self, any_real_dtype):
vals = np.array([1, 2, 3])
arr = SparseArray(vals, fill_value=1)
typ = np.dtype(any_real_dtype)
res = arr.astype(typ)
assert res.dtype == SparseDtype(typ, 1)
assert res.sp_values.dtype == typ
tm.assert_numpy_array_equal(np.asarray(res.to_dense()), vals.astype(typ))
@pytest.mark.parametrize(
"array, dtype, expected",
[
(
SparseArray([0, 1]),
"float",
SparseArray([0.0, 1.0], dtype=SparseDtype(float, 0.0)),
),
(SparseArray([0, 1]), bool, SparseArray([False, True])),
(
SparseArray([0, 1], fill_value=1),
bool,
SparseArray([False, True], dtype=SparseDtype(bool, True)),
),
pytest.param(
SparseArray([0, 1]),
"datetime64[ns]",
SparseArray(
np.array([0, 1], dtype="datetime64[ns]"),
dtype=SparseDtype("datetime64[ns]", pd.Timestamp("1970")),
),
marks=[pytest.mark.xfail(reason="NumPy-7619")],
),
(
SparseArray([0, 1, 10]),
str,
SparseArray(["0", "1", "10"], dtype=SparseDtype(str, "0")),
),
(SparseArray(["10", "20"]), float, SparseArray([10.0, 20.0])),
(
SparseArray([0, 1, 0]),
object,
SparseArray([0, 1, 0], dtype=SparseDtype(object, 0)),
),
],
)
def test_astype_more(self, array, dtype, expected):
result = array.astype(dtype)
tm.assert_sp_array_equal(result, expected)
def test_astype_nan_raises(self):
arr = SparseArray([1.0, np.nan])
with pytest.raises(ValueError, match="Cannot convert non-finite"):
arr.astype(int)
def test_set_fill_value(self):
arr = SparseArray([1.0, np.nan, 2.0], fill_value=np.nan)
arr.fill_value = 2
assert arr.fill_value == 2
arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64)
arr.fill_value = 2
assert arr.fill_value == 2
# XXX: this seems fine? You can construct an integer
# sparsearray with NaN fill value, why not update one?
# coerces to int
# msg = "unable to set fill_value 3\\.1 to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 3.1
assert arr.fill_value == 3.1
# msg = "unable to set fill_value nan to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
arr.fill_value = True
assert arr.fill_value
# coerces to bool
# msg = "unable to set fill_value 0 to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 0
assert arr.fill_value == 0
# msg = "unable to set fill_value nan to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
@pytest.mark.parametrize("val", [[1, 2, 3], np.array([1, 2]), (1, 2, 3)])
def test_set_fill_invalid_non_scalar(self, val):
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
msg = "fill_value must be a scalar"
with pytest.raises(ValueError, match=msg):
arr.fill_value = val
def test_copy(self):
arr2 = self.arr.copy()
assert arr2.sp_values is not self.arr.sp_values
assert arr2.sp_index is self.arr.sp_index
def test_values_asarray(self):
assert_almost_equal(self.arr.to_dense(), self.arr_data)
@pytest.mark.parametrize(
"data,shape,dtype",
[
([0, 0, 0, 0, 0], (5,), None),
([], (0,), None),
([0], (1,), None),
(["A", "A", np.nan, "B"], (4,), np.object),
],
)
def test_shape(self, data, shape, dtype):
# GH 21126
out = SparseArray(data, dtype=dtype)
assert out.shape == shape
@pytest.mark.parametrize(
"vals",
[
[np.nan, np.nan, np.nan, np.nan, np.nan],
[1, np.nan, np.nan, 3, np.nan],
[1, np.nan, 0, 3, 0],
],
)
@pytest.mark.parametrize("fill_value", [None, 0])
def test_dense_repr(self, vals, fill_value):
vals = np.array(vals)
arr = SparseArray(vals, fill_value=fill_value)
res = arr.to_dense()
tm.assert_numpy_array_equal(res, vals)
with tm.assert_produces_warning(FutureWarning):
res2 = arr.get_values()
tm.assert_numpy_array_equal(res2, vals)
def test_getitem(self):
def _checkit(i):
assert_almost_equal(self.arr[i], self.arr.to_dense()[i])
for i in range(len(self.arr)):
_checkit(i)
_checkit(-i)
def test_getitem_arraylike_mask(self):
arr = SparseArray([0, 1, 2])
result = arr[[True, False, True]]
expected = SparseArray([0, 2])
tm.assert_sp_array_equal(result, expected)
def test_getslice(self):
result = self.arr[:-3]
exp = SparseArray(self.arr.to_dense()[:-3])
tm.assert_sp_array_equal(result, exp)
result = self.arr[-4:]
exp = SparseArray(self.arr.to_dense()[-4:])
tm.assert_sp_array_equal(result, exp)
# two corner cases from Series
result = self.arr[-12:]
exp = SparseArray(self.arr)
tm.assert_sp_array_equal(result, exp)
result = self.arr[:-12]
exp = SparseArray(self.arr.to_dense()[:0])
tm.assert_sp_array_equal(result, exp)
def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
sparse = SparseArray(dense)
res = sparse[4:,] # noqa: E231
exp = SparseArray(dense[4:,]) # noqa: E231
tm.assert_sp_array_equal(res, exp)
sparse = SparseArray(dense, fill_value=0)
res = sparse[4:,] # noqa: E231
exp = SparseArray(dense[4:,], fill_value=0) # noqa: E231
tm.assert_sp_array_equal(res, exp)
with pytest.raises(IndexError):
sparse[4:, :]
with pytest.raises(IndexError):
# check numpy compat
dense[4:, :]
def test_boolean_slice_empty(self):
arr = pd.SparseArray([0, 1, 2])
res = arr[[False, False, False]]
assert res.dtype == arr.dtype
@pytest.mark.parametrize("op", ["add", "sub", "mul", "truediv", "floordiv", "pow"])
def test_binary_operators(self, op):
op = getattr(operator, op)
data1 = np.random.randn(20)
data2 = np.random.randn(20)
data1[::2] = np.nan
data2[::3] = np.nan
arr1 = SparseArray(data1)
arr2 = SparseArray(data2)
data1[::2] = 3
data2[::3] = 3
farr1 = SparseArray(data1, fill_value=3)
farr2 = SparseArray(data2, fill_value=3)
def _check_op(op, first, second):
res = op(first, second)
exp = SparseArray(
op(first.to_dense(), second.to_dense()), fill_value=first.fill_value
)
assert isinstance(res, SparseArray)
assert_almost_equal(res.to_dense(), exp.to_dense())
res2 = op(first, second.to_dense())
assert isinstance(res2, SparseArray)
tm.assert_sp_array_equal(res, res2)
res3 = op(first.to_dense(), second)
assert isinstance(res3, SparseArray)
tm.assert_sp_array_equal(res, res3)
res4 = op(first, 4)
assert isinstance(res4, SparseArray)
# Ignore this if the actual op raises (e.g. pow).
try:
exp = op(first.to_dense(), 4)
exp_fv = op(first.fill_value, 4)
except ValueError:
pass
else:
assert_almost_equal(res4.fill_value, exp_fv)
assert_almost_equal(res4.to_dense(), exp)
with np.errstate(all="ignore"):
for first_arr, second_arr in [(arr1, arr2), (farr1, farr2)]:
_check_op(op, first_arr, second_arr)
def test_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
tm.assert_sp_array_equal(unpickled, obj)
_check_roundtrip(self.arr)
_check_roundtrip(self.zarr)
def test_generator_warnings(self):
sp_arr = SparseArray([1, 2, 3])
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings(action="always", category=DeprecationWarning)
warnings.filterwarnings(action="always", category=PendingDeprecationWarning)
for _ in sp_arr:
pass
assert len(w) == 0
def test_fillna(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0])
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan])
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
# float dtype's fill_value is np.nan, replaced by -1
s = SparseArray([0.0, 0.0, 0.0, 0.0])
res = s.fillna(-1)
exp = SparseArray([0.0, 0.0, 0.0, 0.0], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
# int dtype shouldn't have missing. No changes.
s = SparseArray([0, 0, 0, 0])
assert s.dtype == SparseDtype(np.int64)
assert s.fill_value == 0
res = s.fillna(-1)
tm.assert_sp_array_equal(res, s)
s = SparseArray([0, 0, 0, 0], fill_value=0)
assert s.dtype == SparseDtype(np.int64)
assert s.fill_value == 0
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=0)
tm.assert_sp_array_equal(res, exp)
# fill_value can be nan if there is no missing hole.
# only fill_value will be changed
s = SparseArray([0, 0, 0, 0], fill_value=np.nan)
assert s.dtype == SparseDtype(np.int64, fill_value=np.nan)
assert np.isnan(s.fill_value)
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
def test_fillna_overlap(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
# filling with existing value doesn't replace existing value with
# fill_value, i.e. existing 3 remains in sp_values
res = s.fillna(3)
exp = np.array([1, 3, 3, 3, 3], dtype=np.float64)
tm.assert_numpy_array_equal(res.to_dense(), exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(3)
exp = SparseArray([1, 3, 3, 3, 3], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
def test_nonzero(self):
# Tests regression #21172.
sa = pd.SparseArray([float("nan"), float("nan"), 1, 0, 0, 2, 0, 0, 0, 3, 0, 0])
expected = np.array([2, 5, 9], dtype=np.int32)
result, = sa.nonzero()
tm.assert_numpy_array_equal(expected, result)
sa = pd.SparseArray([0, 0, 1, 0, 0, 2, 0, 0, 0, 3, 0, 0])
result, = sa.nonzero()
tm.assert_numpy_array_equal(expected, result)
class TestSparseArrayAnalytics:
@pytest.mark.parametrize(
"data,pos,neg",
[
([True, True, True], True, False),
([1, 2, 1], 1, 0),
([1.0, 2.0, 1.0], 1.0, 0.0),
],
)
def test_all(self, data, pos, neg):
# GH 17570
out = SparseArray(data).all()
assert out
out = SparseArray(data, fill_value=pos).all()
assert out
data[1] = neg
out = SparseArray(data).all()
assert not out
out = SparseArray(data, fill_value=pos).all()
assert not out
@pytest.mark.parametrize(
"data,pos,neg",
[
([True, True, True], True, False),
([1, 2, 1], 1, 0),
([1.0, 2.0, 1.0], 1.0, 0.0),
],
)
@td.skip_if_np_lt("1.15") # prior didn't dispatch
def test_numpy_all(self, data, pos, neg):
# GH 17570
out = np.all(SparseArray(data))
assert out
out = np.all(SparseArray(data, fill_value=pos))
assert out
data[1] = neg
out = np.all(SparseArray(data))
assert not out
out = np.all(SparseArray(data, fill_value=pos))
assert not out
# raises with a different message on py2.
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.all(SparseArray(data), out=np.array([]))
@pytest.mark.parametrize(
"data,pos,neg",
[
([False, True, False], True, False),
([0, 2, 0], 2, 0),
([0.0, 2.0, 0.0], 2.0, 0.0),
],
)
def test_any(self, data, pos, neg):
# GH 17570
out = SparseArray(data).any()
assert out
out = SparseArray(data, fill_value=pos).any()
assert out
data[1] = neg
out = SparseArray(data).any()
assert not out
out = SparseArray(data, fill_value=pos).any()
assert not out
@pytest.mark.parametrize(
"data,pos,neg",
[
([False, True, False], True, False),
([0, 2, 0], 2, 0),
([0.0, 2.0, 0.0], 2.0, 0.0),
],
)
@td.skip_if_np_lt("1.15") # prior didn't dispatch
def test_numpy_any(self, data, pos, neg):
# GH 17570
out = np.any(SparseArray(data))
assert out
out = np.any(SparseArray(data, fill_value=pos))
assert out
data[1] = neg
out = np.any(SparseArray(data))
assert not out
out = np.any(SparseArray(data, fill_value=pos))
assert not out
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.any(SparseArray(data), out=out)
def test_sum(self):
data = np.arange(10).astype(float)
out = SparseArray(data).sum()
assert out == 45.0
data[5] = np.nan
out = SparseArray(data, fill_value=2).sum()
assert out == 40.0
out = SparseArray(data, fill_value=np.nan).sum()
assert out == 40.0
def test_numpy_sum(self):
data = np.arange(10).astype(float)
out = np.sum(SparseArray(data))
assert out == 45.0
data[5] = np.nan
out = np.sum(SparseArray(data, fill_value=2))
assert out == 40.0
out = np.sum(SparseArray(data, fill_value=np.nan))
assert out == 40.0
msg = "the 'dtype' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.sum(SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.sum(SparseArray(data), out=out)
@pytest.mark.parametrize(
"data,expected",
[
(
np.array([1, 2, 3, 4, 5], dtype=float), # non-null data
SparseArray(np.array([1.0, 3.0, 6.0, 10.0, 15.0])),
),
(
np.array([1, 2, np.nan, 4, 5], dtype=float), # null data
SparseArray(np.array([1.0, 3.0, np.nan, 7.0, 12.0])),
),
],
)
@pytest.mark.parametrize("numpy", [True, False])
def test_cumsum(self, data, expected, numpy):
cumsum = np.cumsum if numpy else lambda s: s.cumsum()
out = cumsum(SparseArray(data))
tm.assert_sp_array_equal(out, expected)
out = cumsum(SparseArray(data, fill_value=np.nan))
tm.assert_sp_array_equal(out, expected)
out = cumsum(SparseArray(data, fill_value=2))
tm.assert_sp_array_equal(out, expected)
if numpy: # numpy compatibility checks.
msg = "the 'dtype' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.cumsum(SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.cumsum(SparseArray(data), out=out)
else:
axis = 1 # SparseArray currently 1-D, so only axis = 0 is valid.
msg = "axis\\(={axis}\\) out of bounds".format(axis=axis)
with pytest.raises(ValueError, match=msg):
SparseArray(data).cumsum(axis=axis)
def test_mean(self):
data = np.arange(10).astype(float)
out = SparseArray(data).mean()
assert out == 4.5
data[5] = np.nan
out = SparseArray(data).mean()
assert out == 40.0 / 9
def test_numpy_mean(self):
data = np.arange(10).astype(float)
out = np.mean(SparseArray(data))
assert out == 4.5
data[5] = np.nan
out = np.mean(SparseArray(data))
assert out == 40.0 / 9
msg = "the 'dtype' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.mean(SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.mean(SparseArray(data), out=out)
def test_ufunc(self):
# GH 13853 make sure ufunc is applied to fill_value
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray([1, np.nan, 2, np.nan, 2])
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index, fill_value=1)
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=-1)
result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index, fill_value=1)
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray(np.sin([1, np.nan, 2, np.nan, -2]))
tm.assert_sp_array_equal(np.sin(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray(np.sin([1, -1, 2, -2]), fill_value=np.sin(1))
tm.assert_sp_array_equal(np.sin(sparse), result)
sparse = SparseArray([1, -1, 0, -2], fill_value=0)
result = SparseArray(np.sin([1, -1, 0, -2]), fill_value=np.sin(0))
tm.assert_sp_array_equal(np.sin(sparse), result)
def test_ufunc_args(self):
# GH 13853 make sure ufunc is applied to fill_value, including its arg
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray([2, np.nan, 3, np.nan, -1])
tm.assert_sp_array_equal(np.add(sparse, 1), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray([2, 0, 3, -1], fill_value=2)
tm.assert_sp_array_equal(np.add(sparse, 1), result)
sparse = SparseArray([1, -1, 0, -2], fill_value=0)
result = SparseArray([2, 0, 1, -1], fill_value=1)
tm.assert_sp_array_equal(np.add(sparse, 1), result)
@pytest.mark.parametrize("fill_value", [0.0, np.nan])
def test_modf(self, fill_value):
# https://github.com/pandas-dev/pandas/issues/26946
sparse = pd.SparseArray([fill_value] * 10 + [1.1, 2.2], fill_value=fill_value)
r1, r2 = np.modf(sparse)
e1, e2 = np.modf(np.asarray(sparse))
tm.assert_sp_array_equal(r1, pd.SparseArray(e1, fill_value=fill_value))
tm.assert_sp_array_equal(r2, pd.SparseArray(e2, fill_value=fill_value))
def test_nbytes_integer(self):
arr = SparseArray([1, 0, 0, 0, 2], kind="integer")
result = arr.nbytes
# (2 * 8) + 2 * 4
assert result == 24
def test_nbytes_block(self):
arr = SparseArray([1, 2, 0, 0, 0], kind="block")
result = arr.nbytes
# (2 * 8) + 4 + 4
# sp_values, blocs, blenghts
assert result == 24
def test_asarray_datetime64(self):
s = pd.SparseArray(pd.to_datetime(["2012", None, None, "2013"]))
np.asarray(s)
def test_density(self):
arr = SparseArray([0, 1])
assert arr.density == 0.5
def test_npoints(self):
arr = SparseArray([0, 1])
assert arr.npoints == 1
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestAccessor:
@pytest.mark.parametrize("attr", ["npoints", "density", "fill_value", "sp_values"])
def test_get_attributes(self, attr):
arr = SparseArray([0, 1])
ser = pd.Series(arr)
result = getattr(ser.sparse, attr)
expected = getattr(arr, attr)
assert result == expected
@td.skip_if_no_scipy
def test_from_coo(self):
import scipy.sparse
row = [0, 3, 1, 0]
col = [0, 3, 1, 2]
data = [4, 5, 7, 9]
sp_array = scipy.sparse.coo_matrix((data, (row, col)))
result = pd.Series.sparse.from_coo(sp_array)
index = pd.MultiIndex.from_arrays([[0, 0, 1, 3], [0, 2, 1, 3]])
expected = pd.Series([4, 9, 7, 5], index=index, dtype="Sparse[int]")
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_to_coo(self):
import scipy.sparse
ser = pd.Series(
[1, 2, 3],
index=pd.MultiIndex.from_product([[0], [1, 2, 3]], names=["a", "b"]),
dtype="Sparse[int]",
)
A, _, _ = ser.sparse.to_coo()
assert isinstance(A, scipy.sparse.coo.coo_matrix)
def test_non_sparse_raises(self):
ser = pd.Series([1, 2, 3])
with pytest.raises(AttributeError, match=".sparse"):
ser.sparse.density
def test_setting_fill_value_fillna_still_works():
# This is why letting users update fill_value / dtype is bad
# astype has the same problem.
arr = SparseArray([1.0, np.nan, 1.0], fill_value=0.0)
arr.fill_value = np.nan
result = arr.isna()
# Can't do direct comparison, since the sp_index will be different
# So let's convert to ndarray and check there.
result = np.asarray(result)
expected = np.array([False, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_setting_fill_value_updates():
arr = SparseArray([0.0, np.nan], fill_value=0)
arr.fill_value = np.nan
# use private constructor to get the index right
# otherwise both nans would be un-stored.
expected = SparseArray._simple_new(
sparse_array=np.array([np.nan]),
sparse_index=IntIndex(2, [1]),
dtype=SparseDtype(float, np.nan),
)
tm.assert_sp_array_equal(arr, expected)
@pytest.mark.parametrize(
"arr, loc",
[
([None, 1, 2], 0),
([0, None, 2], 1),
([0, 1, None], 2),
([0, 1, 1, None, None], 3),
([1, 1, 1, 2], -1),
([], -1),
],
)
def test_first_fill_value_loc(arr, loc):
result = SparseArray(arr)._first_fill_value_loc()
assert result == loc
@pytest.mark.parametrize(
"arr", [[1, 2, np.nan, np.nan], [1, np.nan, 2, np.nan], [1, 2, np.nan]]
)
@pytest.mark.parametrize("fill_value", [np.nan, 0, 1])
def test_unique_na_fill(arr, fill_value):
a = pd.SparseArray(arr, fill_value=fill_value).unique()
b = pd.Series(arr).unique()
assert isinstance(a, SparseArray)
a = np.asarray(a)
tm.assert_numpy_array_equal(a, b)
def test_unique_all_sparse():
# https://github.com/pandas-dev/pandas/issues/23168
arr = SparseArray([0, 0])
result = arr.unique()
expected = SparseArray([0])
tm.assert_sp_array_equal(result, expected)
def test_map():
arr = SparseArray([0, 1, 2])
expected = SparseArray([10, 11, 12], fill_value=10)
# dict
result = arr.map({0: 10, 1: 11, 2: 12})
tm.assert_sp_array_equal(result, expected)
# series
result = arr.map(pd.Series({0: 10, 1: 11, 2: 12}))
tm.assert_sp_array_equal(result, expected)
# function
result = arr.map(pd.Series({0: 10, 1: 11, 2: 12}))
expected = SparseArray([10, 11, 12], fill_value=10)
tm.assert_sp_array_equal(result, expected)
def test_map_missing():
arr = SparseArray([0, 1, 2])
expected = SparseArray([10, 11, None], fill_value=10)
result = arr.map({0: 10, 1: 11})
tm.assert_sp_array_equal(result, expected)
def test_deprecated_values():
arr = SparseArray([0, 1, 2])
with tm.assert_produces_warning(FutureWarning):
result = arr.values
tm.assert_numpy_array_equal(result, arr.to_dense())
|
|
r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: https://en.wikipedia.org/wiki/\
Johnson%E2%80%93Lindenstrauss_lemma
"""
import sys
from time import time
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.utils.fixes import parse_version
# `normed` is being deprecated in favor of `density` in histograms
if parse_version(matplotlib.__version__) >= parse_version("2.1"):
density_param = {"density": True}
else:
density_param = {"normed": True}
# %%
# Theoretical bounds
# ==================
# The distortion introduced by a random projection `p` is asserted by
# the fact that `p` is defining an eps-embedding with good probability
# as defined by:
#
# .. math::
# (1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
#
# Where u and v are any rows taken from a dataset of shape (n_samples,
# n_features) and p is a projection by a random Gaussian N(0, 1) matrix
# of shape (n_components, n_features) (or a sparse Achlioptas matrix).
#
# The minimum number of components to guarantees the eps-embedding is
# given by:
#
# .. math::
# n\_components \geq 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
#
#
# The first plot shows that with an increasing number of samples ``n_samples``,
# the minimal number of dimensions ``n_components`` increased logarithmically
# in order to guarantee an ``eps``-embedding.
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
plt.show()
# %%
# The second plot shows that an increase of the admissible
# distortion ``eps`` allows to reduce drastically the minimal number of
# dimensions ``n_components`` for a given number of samples ``n_samples``
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
plt.show()
# %%
# Empirical validation
# ====================
#
# We validate the above bounds on the 20 newsgroups text document
# (TF-IDF word frequencies) dataset or on the digits dataset:
#
# - for the 20 newsgroups dataset some 500 documents with 100k
# features in total are projected using a sparse random matrix to smaller
# euclidean spaces with various values for the target number of dimensions
# ``n_components``.
#
# - for the digits dataset, some 8x8 gray level pixels data for 500
# handwritten digits pictures are randomly projected to spaces for various
# larger number of dimensions ``n_components``.
#
# The default dataset is the 20 newsgroups dataset. To run the example on the
# digits dataset, pass the ``--use-digits-dataset`` command line argument to
# this script.
if "--use-digits-dataset" in sys.argv:
data = load_digits().data[:500]
else:
data = fetch_20newsgroups_vectorized().data[:500]
# %%
# For each value of ``n_components``, we plot:
#
# - 2D distribution of sample pairs with pairwise distances in original
# and projected spaces as x and y axis respectively.
#
# - 1D histogram of the ratio of those distances (projected / original).
n_samples, n_features = data.shape
print(
"Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features)
)
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print(
"Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0)
)
if hasattr(rp, "components_"):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(projected_data, squared=True).ravel()[nonzero]
plt.figure()
min_dist = min(projected_dists.min(), dists.min())
max_dist = max(projected_dists.max(), dists.max())
plt.hexbin(
dists,
projected_dists,
gridsize=100,
cmap=plt.cm.PuBu,
extent=[min_dist, max_dist, min_dist, max_dist],
)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" % n_components)
cb = plt.colorbar()
cb.set_label("Sample pairs counts")
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)" % (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, range=(0.0, 2.0), edgecolor="k", **density_param)
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" % n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
# %%
# We can see that for low values of ``n_components`` the distribution is wide
# with many distorted pairs and a skewed distribution (due to the hard
# limit of zero ratio on the left as distances are always positives)
# while for larger values of n_components the distortion is controlled
# and the distances are well preserved by the random projection.
# %%
# Remarks
# =======
#
# According to the JL lemma, projecting 500 samples without too much distortion
# will require at least several thousands dimensions, irrespective of the
# number of features of the original dataset.
#
# Hence using random projections on the digits dataset which only has 64
# features in the input space does not make sense: it does not allow
# for dimensionality reduction in this case.
#
# On the twenty newsgroups on the other hand the dimensionality can be
# decreased from 56436 down to 10000 while reasonably preserving
# pairwise distances.
|
|
from datetime import time
from decimal import Decimal
import numpy as np
import pandas as pd
import pytest
from multipledispatch.conflict import ambiguities
from pandas.api.types import CategoricalDtype, DatetimeTZDtype
import ibis
import ibis.expr.datatypes as dt
import ibis.expr.schema as sch
def test_no_infer_ambiguities():
assert not ambiguities(dt.infer.funcs)
@pytest.mark.parametrize(
('value', 'expected_dtype'),
[
# numpy types
(np.int8(5), dt.int8),
(np.int16(-1), dt.int16),
(np.int32(2), dt.int32),
(np.int64(-5), dt.int64),
(np.uint8(5), dt.uint8),
(np.uint16(50), dt.uint16),
(np.uint32(500), dt.uint32),
(np.uint64(5000), dt.uint64),
(np.float32(5.5), dt.float32),
(np.float32(5.5), dt.float),
(np.float64(5.55), dt.float64),
(np.float64(5.55), dt.double),
(np.bool_(True), dt.boolean),
(np.bool_(False), dt.boolean),
# pandas types
(
pd.Timestamp('2015-01-01 12:00:00', tz='US/Eastern'),
dt.Timestamp('US/Eastern'),
),
],
)
def test_infer_dtype(value, expected_dtype):
assert dt.infer(value) == expected_dtype
@pytest.mark.parametrize(
('value', 'expected_dtypes'),
[
# Explicitly-defined dtype
(np.array([1, 2, 3], dtype='int8'), (dt.Array(dt.int8),)),
(np.array([1, 2, 3], dtype='int16'), (dt.Array(dt.int16),)),
(np.array([1, 2, 3], dtype='int32'), (dt.Array(dt.int32),)),
(np.array([1, 2, 3], dtype='int64'), (dt.Array(dt.int64),)),
(np.array([1, 2, 3], dtype='uint8'), (dt.Array(dt.uint8),)),
(np.array([1, 2, 3], dtype='uint16'), (dt.Array(dt.uint16),)),
(np.array([1, 2, 3], dtype='uint32'), (dt.Array(dt.uint32),)),
(np.array([1, 2, 3], dtype='uint64'), (dt.Array(dt.uint64),)),
(np.array([1.0, 2.0, 3.0], dtype='float32'), (dt.Array(dt.float32),)),
(np.array([1.0, 2.0, 3.0], dtype='float64'), (dt.Array(dt.float64),)),
(np.array([True, False, True], dtype='bool'), (dt.Array(dt.boolean),)),
# Implicit dtype
# Integer array could be inferred to int64 or int32 depending on system
(np.array([1, 2, 3]), (dt.Array(dt.int64), dt.Array(dt.int32))),
(np.array([1.0, 2.0, 3.0]), (dt.Array(dt.float64),)),
(np.array([np.nan, np.nan, np.nan]), (dt.Array(dt.float64),)),
(np.array([True, False, True]), (dt.Array(dt.boolean),)),
(np.array(['1', '2', '3']), (dt.Array(dt.string),)),
(
np.array(
[
pd.Timestamp('2015-01-01 12:00:00'),
pd.Timestamp('2015-01-02 12:00:00'),
pd.Timestamp('2015-01-03 12:00:00'),
]
),
(dt.Array(dt.timestamp),),
),
# Implied from object dtype
(np.array([1, 2, 3], dtype=object), (dt.Array(dt.int64),)),
(np.array([1.0, 2.0, 3.0], dtype=object), (dt.Array(dt.float64),)),
(np.array([True, False, True], dtype=object), (dt.Array(dt.boolean),)),
(np.array(['1', '2', '3'], dtype=object), (dt.Array(dt.string),)),
(
np.array(
[
pd.Timestamp('2015-01-01 12:00:00'),
pd.Timestamp('2015-01-02 12:00:00'),
pd.Timestamp('2015-01-03 12:00:00'),
],
dtype=object,
),
(dt.Array(dt.timestamp),),
),
],
)
def test_infer_np_array(value, expected_dtypes):
assert dt.infer(value) in expected_dtypes
@pytest.mark.parametrize(
('numpy_dtype', 'ibis_dtype'),
[
(np.bool_, dt.boolean),
(np.int8, dt.int8),
(np.int16, dt.int16),
(np.int32, dt.int32),
(np.int64, dt.int64),
(np.uint8, dt.uint8),
(np.uint16, dt.uint16),
(np.uint32, dt.uint32),
(np.uint64, dt.uint64),
(np.float16, dt.float16),
(np.float32, dt.float32),
(np.float64, dt.float64),
(np.double, dt.double),
(np.str_, dt.string),
(np.datetime64, dt.timestamp),
(np.timedelta64, dt.interval),
],
)
def test_numpy_dtype(numpy_dtype, ibis_dtype):
assert dt.dtype(np.dtype(numpy_dtype)) == ibis_dtype
@pytest.mark.parametrize(
('pandas_dtype', 'ibis_dtype'),
[
(
DatetimeTZDtype(tz='US/Eastern', unit='ns'),
dt.Timestamp('US/Eastern'),
),
(CategoricalDtype(), dt.Category()),
],
)
def test_pandas_dtype(pandas_dtype, ibis_dtype):
assert dt.dtype(pandas_dtype) == ibis_dtype
@pytest.mark.parametrize(
('col_data', 'schema_type'),
[
([True, False, False], 'bool'),
(np.array([-3, 9, 17], dtype='int8'), 'int8'),
(np.array([-5, 0, 12], dtype='int16'), 'int16'),
(np.array([-12, 3, 25000], dtype='int32'), 'int32'),
(np.array([102, 67228734, -0], dtype='int64'), 'int64'),
(np.array([45e-3, -0.4, 99.0], dtype='float32'), 'float'),
(np.array([-3e43, 43.0, 10000000.0], dtype='float64'), 'double'),
(np.array([3, 0, 16], dtype='uint8'), 'uint8'),
(np.array([5569, 1, 33], dtype='uint16'), 'uint16'),
(np.array([100, 0, 6], dtype='uint32'), 'uint32'),
(np.array([666, 2, 3], dtype='uint64'), 'uint64'),
(
[
pd.Timestamp('2010-11-01 00:01:00'),
pd.Timestamp('2010-11-01 00:02:00.1000'),
pd.Timestamp('2010-11-01 00:03:00.300000'),
],
'timestamp',
),
(
[
pd.Timedelta('1 days'),
pd.Timedelta('-1 days 2 min 3us'),
pd.Timedelta('-2 days +23:57:59.999997'),
],
"interval('ns')",
),
(['foo', 'bar', 'hello'], "string"),
(pd.Series(['a', 'b', 'c', 'a']).astype('category'), dt.Category()),
(pd.Series([b'1', b'2', b'3']), dt.string),
# mixed-integer
(pd.Series([1, 2, '3']), dt.binary),
# mixed-integer-float
(pd.Series([1, 2, 3.0]), dt.float64),
(
pd.Series([Decimal('1.0'), Decimal('2.0'), Decimal('3.0')]),
dt.float64,
),
# complex
(pd.Series([1 + 1j, 1 + 2j, 1 + 3j], dtype=object), dt.binary),
(
pd.Series(
[
pd.to_datetime('2010-11-01'),
pd.to_datetime('2010-11-02'),
pd.to_datetime('2010-11-03'),
]
),
dt.timestamp,
),
(pd.Series([time(1), time(2), time(3)]), dt.time),
(
pd.Series(
[
pd.Period('2011-01'),
pd.Period('2011-02'),
pd.Period('2011-03'),
],
dtype=object,
),
dt.binary,
),
# mixed
(pd.Series([b'1', '2', 3.0]), dt.binary),
# empty
(pd.Series([], dtype='object'), dt.binary),
],
)
def test_schema_infer(col_data, schema_type):
df = pd.DataFrame({'col': col_data})
inferred = sch.infer(df)
expected = ibis.schema([('col', schema_type)])
assert inferred == expected
|
|
#!/usr/bin/env python
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GPDB Configuration
Usage:
from mpp.lib.config import GPDBConfig
"""
from collections import defaultdict
from mpp.lib.PSQL import PSQL
from tinctest.main import TINCException
import os
# ============================================================================
class GPDBConfigException(TINCException): pass
class GPDBConfig():
'''Class with methods to get GPDB Configuration informaitons '''
class Record:
def __init__(self, line):
line = line.split('|')
line = [l.strip() for l in line]
self.dbid = int(line[0])
self.content = int(line[1])
self.role = line[2] == 'p'
self.preferred_role = line[3] == 'p'
self.mode = line[4] == 's'
self.status = line[5] == 'u'
self.hostname = line[6]
self.address = line[7]
self.port = line[8]
self.datadir = line[9]
self.replication_port =line[10]
def __init__(self):
self.record = []
self._fill()
def _fill(self):
'''Get the records and add to Record class '''
self.record = []
config_sql = "select dbid, content, role, preferred_role, mode, status, hostname, address, port, fselocation as datadir, replication_port from gp_segment_configuration, pg_filespace_entry, pg_catalog.pg_filespace fs where fsefsoid = fs.oid and fsname='pg_system' and gp_segment_configuration.dbid=pg_filespace_entry.fsedbid ORDER BY content, preferred_role;"
config_out = PSQL.run_sql_command(config_sql, flags = '-t -q', dbname='postgres')
if len(config_out.strip()) > 0:
config_out = config_out.splitlines()
for line in config_out:
if line.find("NOTICE")<0:
line = line.strip()
if line:
self.record.append(GPDBConfig.Record(line))
else:
raise GPDBConfigException('Unable to select gp_segment_configuration')
def has_mirror(self):
''' Checks if the configuration has mirror'''
return reduce(lambda x, y: x or y,
[not r.role for r in self.record])
def get_countprimarysegments(self):
''' Returns number of primary segments '''
n = 0
for r in self.record:
if r.role and r.content != -1:
n += 1
return n
def get_segments_count_per_host(self):
"""
Return a dict of hostname and segments count.
"""
seg_count = defaultdict(int)
for r in self.record:
if r.role and r.content != -1:
seg_count[r.hostname] += 1
return seg_count
def get_hosts(self, segments = False):
'''
@summary Returns the list of hostnames
@param segments : True or False (True -returns only segment hosts)
'''
list = []
for r in self.record:
if segments:
if r.content != -1:
list.append(r.hostname)
else:
list.append(r.hostname)
return set(list)
def get_hostandport_of_segment(self, psegmentNumber = 0, pRole = 'p'):
'''
@summary: Return a tuple that contains the host and port of the specified segment.
@param pSegmentNumber : The segment number (0 - N-1, where N is the number of segments).
@param pRole: 'p' for Primary, 'm' for Mirror
'''
if pRole == 'p':
role = True
else:
role = False
for seg in self.record:
if seg.content == psegmentNumber and seg.role == role:
return (seg.hostname, seg.port)
def get_host_and_datadir_of_segment(self, dbid=-1):
'''
@description : Return hostname and data_dir for the dbid provided
'''
for r in self.record:
if r.dbid == int(dbid):
return(r.hostname, r.datadir)
def is_segments_local(self, prole='p'):
'''
@summary: Check from the segment "address" column whether the GPDB configuration is localhost
Now checks whether "all" segments has "localhost" address
@param pRole: 'p' for primary, 'm' for mirror
'''
if prole == 'p':
role = True
else:
role = False
n = 0
for r in self.record:
if r.content != -1:
if r.role == role:
if r.address == "localhost":
n = n+1
return (self.get_countprimarysegments()==n)
def is_multinode(self):
'''
Check whether GPDB is multinode
For OSX, it will always be single node. It's documented about issues with OSX and GPBD setting up
@note: On DCA, the hostname for each segment is different, but the address is pointing to localhost
localhost
'''
if os.uname()[0] == 'Darwin':
return False
# We check hostname, as we could have different addresses
# on the same host.
hostname_set = set([r.hostname for r in self.record])
if len(hostname_set) == 1:
return False
else:
return True
def has_master_mirror(self):
''' Returns true if standby is configured '''
master = 0
for r in self.record:
if r.content == -1:
master += 1
if master == 1:
return False
else:
return True
def get_count_segments(self):
'''Returns number of segments '''
out = PSQL.run_sql_command("select count(*) from gp_segment_configuration where content != -1 and role = 'p' and status = 'u'", flags='-q -t', dbname='template1')
for line in out:
return line.strip()
def is_mastermirror_synchronized(self):
''' Returns True is master and standby are synchronized'''
out = PSQL.run_sql_command('select summary_state from gp_master_mirroring',flags='-q -t', dbname='template1')
if len(out.strip()) > 0:
for line in out:
line = line.strip()
if line == 'Synchronized':
return True
return False
def get_masterdata_directory(self):
''' Returns the MASTER_DATA_DIRECTORY '''
for r in self.record:
if r.role and r.content == -1:
return r.datadir
def get_masterhost(self):
''' Returns master hostname'''
for r in self.record:
if r.role and r.content == -1:
return r.hostname
def get_master_standbyhost(self):
''' Return standby hostname '''
for r in self.record:
if (r.content == -1) and (not r.role):
return r.hostname
def is_not_insync_segments(self):
'''Returns True if no down or change_tracking segments '''
gpseg_sql = "select count(*) from gp_segment_configuration where mode <>'s' or status <> 'u';"
not_insync_segments = PSQL.run_sql_command(gpseg_sql, flags = '-t -q')
if not_insync_segments.strip() != '0' :
return False
return True
def is_balanced_segments(self):
'''Returns True if primary and mirror are balanced'''
gpseg_sql = "select count(*) from gp_segment_configuration where role != preferred_role;"
balance_segments = PSQL.run_sql_command(gpseg_sql, flags = '-t -q')
if balance_segments.strip() != '0' :
return False
return True
def count_of_nodes_in_mode(self, mode = 'c'):
"""
PURPOSE:
gives count of number of nodes in change tracking
@return:
count of number of nodes in change tracking
"""
sqlcmd = "select count(*) from gp_segment_configuration where mode = '" + mode + "'"
(num_cl) = PSQL.run_sql_command(sqlcmd)
num_cl = num_cl.split('\n')[3].strip()
return num_cl
def is_down_segments(self):
for r in self.record:
if r.status == 'd':
return True
return False
def get_dbid(self, content, seg_role):
''' Returns the db_id given contentid and role '''
for r in self.record:
if r.content == content and r.role == (seg_role == "p"):
return r.dbid
|
|
"""Release data for NetworkX.
When NetworkX is imported a number of steps are followed to determine
the version information.
1) If the release is not a development release (dev=False), then version
information is read from version.py, a file containing statically
defined version information. This file should exist on every
downloadable release of NetworkX since setup.py creates it during
packaging/installation. However, version.py might not exist if one
is running NetworkX from the mercurial repository. In the event that
version.py does not exist, then no vcs information will be available.
2) If the release is a development release, then version information
is read dynamically, when possible. If no dynamic information can be
read, then an attempt is made to read the information from version.py.
If version.py does not exist, then no vcs information will be available.
Clarification:
version.py is created only by setup.py
When setup.py creates version.py, it does so before packaging/installation.
So the created file is included in the source distribution. When a user
downloads a tar.gz file and extracts the files, the files will not be in a
live version control repository. So when the user runs setup.py to install
NetworkX, we must make sure write_versionfile() does not overwrite the
revision information contained in the version.py that was included in the
tar.gz file. This is why write_versionfile() includes an early escape.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
from __future__ import absolute_import
import os
import sys
import time
import datetime
basedir = os.path.abspath(os.path.split(__file__)[0])
def write_versionfile():
"""Creates a static file containing version information."""
versionfile = os.path.join(basedir, 'version.py')
text = '''"""
Version information for NetworkX, created during installation.
Do not add this file to the repository.
"""
import datetime
version = %(version)r
date = %(date)r
# Was NetworkX built from a development version? If so, remember that the major
# and minor versions reference the "target" (rather than "current") release.
dev = %(dev)r
# Format: (name, major, min, revision)
version_info = %(version_info)r
# Format: a 'datetime.datetime' instance
date_info = %(date_info)r
# Format: (vcs, vcs_tuple)
vcs_info = %(vcs_info)r
'''
# Try to update all information
date, date_info, version, version_info, vcs_info = get_info(dynamic=True)
def writefile():
fh = open(versionfile, 'w')
subs = {
'dev' : dev,
'version': version,
'version_info': version_info,
'date': date,
'date_info': date_info,
'vcs_info': vcs_info
}
fh.write(text % subs)
fh.close()
if vcs_info[0] == 'mercurial':
# Then, we want to update version.py.
writefile()
else:
if os.path.isfile(versionfile):
# This is *good*, and the most likely place users will be when
# running setup.py. We do not want to overwrite version.py.
# Grab the version so that setup can use it.
sys.path.insert(0, basedir)
from version import version
del sys.path[0]
else:
# This is *bad*. It means the user might have a tarball that
# does not include version.py. Let this error raise so we can
# fix the tarball.
##raise Exception('version.py not found!')
# We no longer require that prepared tarballs include a version.py
# So we use the possibly trunctated value from get_info()
# Then we write a new file.
writefile()
return version
def get_revision():
"""Returns revision and vcs information, dynamically obtained."""
vcs, revision, tag = None, None, None
hgdir = os.path.join(basedir, '..', '.hg')
gitdir = os.path.join(basedir, '..', '.git')
if os.path.isdir(gitdir):
vcs = 'git'
# For now, we are not bothering with revision and tag.
vcs_info = (vcs, (revision, tag))
return revision, vcs_info
def get_info(dynamic=True):
## Date information
date_info = datetime.datetime.now()
date = time.asctime(date_info.timetuple())
revision, version, version_info, vcs_info = None, None, None, None
import_failed = False
dynamic_failed = False
if dynamic:
revision, vcs_info = get_revision()
if revision is None:
dynamic_failed = True
if dynamic_failed or not dynamic:
# This is where most final releases of NetworkX will be.
# All info should come from version.py. If it does not exist, then
# no vcs information will be provided.
sys.path.insert(0, basedir)
try:
from version import date, date_info, version, version_info, vcs_info
except ImportError:
import_failed = True
vcs_info = (None, (None, None))
else:
revision = vcs_info[1][0]
del sys.path[0]
if import_failed or (dynamic and not dynamic_failed):
# We are here if:
# we failed to determine static versioning info, or
# we successfully obtained dynamic revision info
version = ''.join([str(major), '.', str(minor)])
if dev:
version += '.dev_' + date_info.strftime("%Y%m%d%H%M%S")
version_info = (name, major, minor, revision)
return date, date_info, version, version_info, vcs_info
## Version information
name = 'networkx'
major = "2"
minor = "0"
## Declare current release as a development release.
## Change to False before tagging a release; then change back.
dev = True
description = "Python package for creating and manipulating graphs and networks"
long_description = \
"""
NetworkX is a Python package for the creation, manipulation, and
study of the structure, dynamics, and functions of complex networks.
"""
license = 'BSD'
authors = {'Hagberg' : ('Aric Hagberg','[email protected]'),
'Schult' : ('Dan Schult','[email protected]'),
'Swart' : ('Pieter Swart','[email protected]')
}
maintainer = "NetworkX Developers"
maintainer_email = "[email protected]"
url = 'http://networkx.github.io/'
download_url= 'https://pypi.python.org/pypi/networkx/'
platforms = ['Linux','Mac OSX','Windows','Unix']
keywords = ['Networks', 'Graph Theory', 'Mathematics', 'network', 'graph', 'discrete mathematics', 'math']
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics']
date, date_info, version, version_info, vcs_info = get_info()
if __name__ == '__main__':
# Write versionfile for nightly snapshots.
write_versionfile()
|
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base64
import sys
import time
from webkitpy.port import Port, Driver, DriverOutput
from webkitpy.port.base import VirtualTestSuite
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.crashlogs import CrashLogs
# This sets basic expectations for a test. Each individual expectation
# can be overridden by a keyword argument in TestList.add().
class TestInstance(object):
def __init__(self, name):
self.name = name
self.base = name[(name.rfind("/") + 1):name.rfind(".")]
self.crash = False
self.web_process_crash = False
self.exception = False
self.hang = False
self.keyboard = False
self.error = ''
self.timeout = False
self.is_reftest = False
# The values of each field are treated as raw byte strings. They
# will be converted to unicode strings where appropriate using
# FileSystem.read_text_file().
self.actual_text = self.base + '-txt'
self.actual_checksum = self.base + '-checksum'
# We add the '\x8a' for the image file to prevent the value from
# being treated as UTF-8 (the character is invalid)
self.actual_image = self.base + '\x8a' + '-png' + 'tEXtchecksum\x00' + self.actual_checksum
self.expected_text = self.actual_text
self.expected_image = self.actual_image
self.actual_audio = None
self.expected_audio = None
# This is an in-memory list of tests, what we want them to produce, and
# what we want to claim are the expected results.
class TestList(object):
def __init__(self):
self.tests = {}
def add(self, name, **kwargs):
test = TestInstance(name)
for key, value in kwargs.items():
test.__dict__[key] = value
self.tests[name] = test
def add_reftest(self, name, reference_name, same_image):
self.add(name, actual_checksum='xxx', actual_image='XXX', is_reftest=True)
if same_image:
self.add(reference_name, actual_checksum='xxx', actual_image='XXX', is_reftest=True)
else:
self.add(reference_name, actual_checksum='yyy', actual_image='YYY', is_reftest=True)
def keys(self):
return self.tests.keys()
def __contains__(self, item):
return item in self.tests
def __getitem__(self, item):
return self.tests[item]
#
# These numbers may need to be updated whenever we add or delete tests.
#
TOTAL_TESTS = 106
TOTAL_SKIPS = 28
TOTAL_RETRIES = 14
UNEXPECTED_PASSES = 6
UNEXPECTED_FAILURES = 17
def unit_test_list():
tests = TestList()
tests.add('failures/expected/crash.html', crash=True)
tests.add('failures/expected/exception.html', exception=True)
tests.add('failures/expected/timeout.html', timeout=True)
tests.add('failures/expected/hang.html', hang=True)
tests.add('failures/expected/missing_text.html', expected_text=None)
tests.add('failures/expected/image.html',
actual_image='image_fail-pngtEXtchecksum\x00checksum_fail',
expected_image='image-pngtEXtchecksum\x00checksum-png')
tests.add('failures/expected/image_checksum.html',
actual_checksum='image_checksum_fail-checksum',
actual_image='image_checksum_fail-png')
tests.add('failures/expected/audio.html',
actual_audio=base64.b64encode('audio_fail-wav'), expected_audio='audio-wav',
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('failures/expected/keyboard.html', keyboard=True)
tests.add('failures/expected/missing_check.html',
expected_image='missing_check-png')
tests.add('failures/expected/missing_image.html', expected_image=None)
tests.add('failures/expected/missing_audio.html', expected_audio=None,
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('failures/expected/missing_text.html', expected_text=None)
tests.add('failures/expected/newlines_leading.html',
expected_text="\nfoo\n", actual_text="foo\n")
tests.add('failures/expected/newlines_trailing.html',
expected_text="foo\n\n", actual_text="foo\n")
tests.add('failures/expected/newlines_with_excess_CR.html',
expected_text="foo\r\r\r\n", actual_text="foo\n")
tests.add('failures/expected/text.html', actual_text='text_fail-png')
tests.add('failures/expected/skip_text.html', actual_text='text diff')
tests.add('failures/flaky/text.html')
tests.add('failures/unexpected/missing_text.html', expected_text=None)
tests.add('failures/unexpected/missing_check.html', expected_image='missing-check-png')
tests.add('failures/unexpected/missing_image.html', expected_image=None)
tests.add('failures/unexpected/missing_render_tree_dump.html', actual_text="""layer at (0,0) size 800x600
RenderView at (0,0) size 800x600
layer at (0,0) size 800x34
RenderBlock {HTML} at (0,0) size 800x34
RenderBody {BODY} at (8,8) size 784x18
RenderText {#text} at (0,0) size 133x18
text run at (0,0) width 133: "This is an image test!"
""", expected_text=None)
tests.add('failures/unexpected/crash.html', crash=True)
tests.add('failures/unexpected/crash-with-stderr.html', crash=True,
error="mock-std-error-output")
tests.add('failures/unexpected/web-process-crash-with-stderr.html', web_process_crash=True,
error="mock-std-error-output")
tests.add('failures/unexpected/pass.html')
tests.add('failures/unexpected/text-checksum.html',
actual_text='text-checksum_fail-txt',
actual_checksum='text-checksum_fail-checksum')
tests.add('failures/unexpected/text-image-checksum.html',
actual_text='text-image-checksum_fail-txt',
actual_image='text-image-checksum_fail-pngtEXtchecksum\x00checksum_fail',
actual_checksum='text-image-checksum_fail-checksum')
tests.add('failures/unexpected/checksum-with-matching-image.html',
actual_checksum='text-image-checksum_fail-checksum')
tests.add('failures/unexpected/skip_pass.html')
tests.add('failures/unexpected/text.html', actual_text='text_fail-txt')
tests.add('failures/unexpected/timeout.html', timeout=True)
tests.add('http/tests/passes/text.html')
tests.add('http/tests/passes/image.html')
tests.add('http/tests/ssl/text.html')
tests.add('passes/args.html')
tests.add('passes/error.html', error='stuff going to stderr')
tests.add('passes/image.html')
tests.add('passes/audio.html',
actual_audio=base64.b64encode('audio-wav'), expected_audio='audio-wav',
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('passes/platform_image.html')
tests.add('passes/checksum_in_image.html',
expected_image='tEXtchecksum\x00checksum_in_image-checksum')
tests.add('passes/skipped/skip.html')
# Note that here the checksums don't match but the images do, so this test passes "unexpectedly".
# See https://bugs.webkit.org/show_bug.cgi?id=69444 .
tests.add('failures/unexpected/checksum.html', actual_checksum='checksum_fail-checksum')
# Text output files contain "\r\n" on Windows. This may be
# helpfully filtered to "\r\r\n" by our Python/Cygwin tooling.
tests.add('passes/text.html',
expected_text='\nfoo\n\n', actual_text='\nfoo\r\n\r\r\n')
# For reftests.
tests.add_reftest('passes/reftest.html', 'passes/reftest-expected.html', same_image=True)
tests.add_reftest('passes/mismatch.html', 'passes/mismatch-expected-mismatch.html', same_image=False)
tests.add_reftest('passes/svgreftest.svg', 'passes/svgreftest-expected.svg', same_image=True)
tests.add_reftest('passes/xhtreftest.xht', 'passes/xhtreftest-expected.html', same_image=True)
tests.add_reftest('passes/phpreftest.php', 'passes/phpreftest-expected-mismatch.svg', same_image=False)
tests.add_reftest('failures/expected/reftest.html', 'failures/expected/reftest-expected.html', same_image=False)
tests.add_reftest('failures/expected/mismatch.html', 'failures/expected/mismatch-expected-mismatch.html', same_image=True)
tests.add_reftest('failures/unexpected/reftest.html', 'failures/unexpected/reftest-expected.html', same_image=False)
tests.add_reftest('failures/unexpected/mismatch.html', 'failures/unexpected/mismatch-expected-mismatch.html', same_image=True)
tests.add('failures/unexpected/reftest-nopixel.html', actual_checksum=None, actual_image=None, is_reftest=True)
tests.add('failures/unexpected/reftest-nopixel-expected.html', actual_checksum=None, actual_image=None, is_reftest=True)
# FIXME: Add a reftest which crashes.
tests.add('reftests/foo/test.html')
tests.add('reftests/foo/test-ref.html')
tests.add('reftests/foo/multiple-match-success.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-match-failure.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-mismatch-success.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-mismatch-failure.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-both-success.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-both-failure.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/matching-ref.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/mismatching-ref.html', actual_checksum='def', actual_image='def')
tests.add('reftests/foo/second-mismatching-ref.html', actual_checksum='ghi', actual_image='ghi')
# The following files shouldn't be treated as reftests
tests.add_reftest('reftests/foo/unlistedtest.html', 'reftests/foo/unlistedtest-expected.html', same_image=True)
tests.add('reftests/foo/reference/bar/common.html')
tests.add('reftests/foo/reftest/bar/shared.html')
tests.add('websocket/tests/passes/text.html')
# For testing test are properly included from platform directories.
tests.add('platform/test-mac-leopard/http/test.html')
tests.add('platform/test-win-win7/http/test.html')
# For --no-http tests, test that platform specific HTTP tests are properly skipped.
tests.add('platform/test-snow-leopard/http/test.html')
tests.add('platform/test-snow-leopard/websocket/test.html')
# For testing if perf tests are running in a locked shard.
tests.add('perf/foo/test.html')
tests.add('perf/foo/test-ref.html')
# For testing --pixel-test-directories.
tests.add('failures/unexpected/pixeldir/image_in_pixeldir.html',
actual_image='image_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
expected_image='image_in_pixeldir-pngtEXtchecksum\x00checksum-png')
tests.add('failures/unexpected/image_not_in_pixeldir.html',
actual_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
expected_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum-png')
# For testing that virtual test suites don't expand names containing themselves
# See webkit.org/b/97925 and base_unittest.PortTest.test_tests().
tests.add('passes/test-virtual-passes.html')
tests.add('passes/passes/test-virtual-passes.html')
return tests
# Here we use a non-standard location for the layout tests, to ensure that
# this works. The path contains a '.' in the name because we've seen bugs
# related to this before.
LAYOUT_TEST_DIR = '/test.checkout/LayoutTests'
PERF_TEST_DIR = '/test.checkout/PerformanceTests'
# Here we synthesize an in-memory filesystem from the test list
# in order to fully control the test output and to demonstrate that
# we don't need a real filesystem to run the tests.
def add_unit_tests_to_mock_filesystem(filesystem):
# Add the test_expectations file.
filesystem.maybe_make_directory(LAYOUT_TEST_DIR + '/platform/test')
if not filesystem.exists(LAYOUT_TEST_DIR + '/platform/test/TestExpectations'):
filesystem.write_text_file(LAYOUT_TEST_DIR + '/platform/test/TestExpectations', """
Bug(test) failures/expected/crash.html [ Crash ]
Bug(test) failures/expected/image.html [ ImageOnlyFailure ]
Bug(test) failures/expected/audio.html [ Failure ]
Bug(test) failures/expected/image_checksum.html [ ImageOnlyFailure ]
Bug(test) failures/expected/mismatch.html [ ImageOnlyFailure ]
Bug(test) failures/expected/missing_check.html [ Missing Pass ]
Bug(test) failures/expected/missing_image.html [ Missing Pass ]
Bug(test) failures/expected/missing_audio.html [ Missing Pass ]
Bug(test) failures/expected/missing_text.html [ Missing Pass ]
Bug(test) failures/expected/newlines_leading.html [ Failure ]
Bug(test) failures/expected/newlines_trailing.html [ Failure ]
Bug(test) failures/expected/newlines_with_excess_CR.html [ Failure ]
Bug(test) failures/expected/reftest.html [ ImageOnlyFailure ]
Bug(test) failures/expected/text.html [ Failure ]
Bug(test) failures/expected/timeout.html [ Timeout ]
Bug(test) failures/expected/hang.html [ WontFix ]
Bug(test) failures/expected/keyboard.html [ WontFix ]
Bug(test) failures/expected/exception.html [ WontFix ]
Bug(test) failures/unexpected/pass.html [ Failure ]
Bug(test) passes/skipped/skip.html [ Skip ]
""")
filesystem.maybe_make_directory(LAYOUT_TEST_DIR + '/reftests/foo')
filesystem.write_text_file(LAYOUT_TEST_DIR + '/reftests/foo/reftest.list', """
== test.html test-ref.html
== multiple-match-success.html mismatching-ref.html
== multiple-match-success.html matching-ref.html
== multiple-match-failure.html mismatching-ref.html
== multiple-match-failure.html second-mismatching-ref.html
!= multiple-mismatch-success.html mismatching-ref.html
!= multiple-mismatch-success.html second-mismatching-ref.html
!= multiple-mismatch-failure.html mismatching-ref.html
!= multiple-mismatch-failure.html matching-ref.html
== multiple-both-success.html matching-ref.html
== multiple-both-success.html mismatching-ref.html
!= multiple-both-success.html second-mismatching-ref.html
== multiple-both-failure.html matching-ref.html
!= multiple-both-failure.html second-mismatching-ref.html
!= multiple-both-failure.html matching-ref.html
""")
# FIXME: This test was only being ignored because of missing a leading '/'.
# Fixing the typo causes several tests to assert, so disabling the test entirely.
# Add in a file should be ignored by port.find_test_files().
#files[LAYOUT_TEST_DIR + '/userscripts/resources/iframe.html'] = 'iframe'
def add_file(test, suffix, contents):
dirname = filesystem.join(LAYOUT_TEST_DIR, test.name[0:test.name.rfind('/')])
base = test.base
filesystem.maybe_make_directory(dirname)
filesystem.write_binary_file(filesystem.join(dirname, base + suffix), contents)
# Add each test and the expected output, if any.
test_list = unit_test_list()
for test in test_list.tests.values():
add_file(test, test.name[test.name.rfind('.'):], '')
if test.is_reftest:
continue
if test.actual_audio:
add_file(test, '-expected.wav', test.expected_audio)
continue
add_file(test, '-expected.txt', test.expected_text)
add_file(test, '-expected.png', test.expected_image)
filesystem.write_text_file(filesystem.join(LAYOUT_TEST_DIR, 'virtual', 'passes', 'args-expected.txt'), 'args-txt --virtual-arg')
# Clear the list of written files so that we can watch what happens during testing.
filesystem.clear_written_files()
class TestPort(Port):
port_name = 'test'
default_port_name = 'test-mac-leopard'
"""Test implementation of the Port interface."""
ALL_BASELINE_VARIANTS = (
'test-linux-x86_64',
'test-mac-snowleopard', 'test-mac-leopard',
'test-win-vista', 'test-win-win7', 'test-win-xp',
)
@classmethod
def determine_full_port_name(cls, host, options, port_name):
if port_name == 'test':
return TestPort.default_port_name
return port_name
def __init__(self, host, port_name=None, **kwargs):
Port.__init__(self, host, port_name or TestPort.default_port_name, **kwargs)
self._tests = unit_test_list()
self._flakes = set()
self._expectations_path = LAYOUT_TEST_DIR + '/platform/test/TestExpectations'
self._results_directory = None
self._operating_system = 'mac'
if self._name.startswith('test-win'):
self._operating_system = 'win'
elif self._name.startswith('test-linux'):
self._operating_system = 'linux'
version_map = {
'test-win-xp': 'xp',
'test-win-win7': 'win7',
'test-win-vista': 'vista',
'test-mac-leopard': 'leopard',
'test-mac-snowleopard': 'snowleopard',
'test-linux-x86_64': 'lucid',
}
self._version = version_map[self._name]
def default_pixel_tests(self):
return True
def _path_to_driver(self):
# This routine shouldn't normally be called, but it is called by
# the mock_drt Driver. We return something, but make sure it's useless.
return 'MOCK _path_to_driver'
def baseline_search_path(self):
search_paths = {
'test-mac-snowleopard': ['test-mac-snowleopard'],
'test-mac-leopard': ['test-mac-leopard', 'test-mac-snowleopard'],
'test-win-win7': ['test-win-win7'],
'test-win-vista': ['test-win-vista', 'test-win-win7'],
'test-win-xp': ['test-win-xp', 'test-win-vista', 'test-win-win7'],
'test-linux-x86_64': ['test-linux', 'test-win-win7'],
}
return [self._webkit_baseline_path(d) for d in search_paths[self.name()]]
def default_child_processes(self):
return 1
def worker_startup_delay_secs(self):
return 0
def check_build(self, needs_http):
return True
def check_sys_deps(self, needs_http):
return True
def default_configuration(self):
return 'Release'
def diff_image(self, expected_contents, actual_contents, tolerance=None):
diffed = actual_contents != expected_contents
if not actual_contents and not expected_contents:
return (None, 0, None)
if not actual_contents or not expected_contents:
return (True, 0, None)
if 'ref' in expected_contents:
assert tolerance == 0
if diffed:
return ("< %s\n---\n> %s\n" % (expected_contents, actual_contents), 1, None)
return (None, 0, None)
def layout_tests_dir(self):
return LAYOUT_TEST_DIR
def perf_tests_dir(self):
return PERF_TEST_DIR
def webkit_base(self):
return '/test.checkout'
def _skipped_tests_for_unsupported_features(self, test_list):
return set(['failures/expected/skip_text.html',
'failures/unexpected/skip_pass.html',
'virtual/skipped'])
def name(self):
return self._name
def operating_system(self):
return self._operating_system
def _path_to_wdiff(self):
return None
def default_results_directory(self):
return '/tmp/layout-test-results'
def setup_test_run(self):
pass
def _driver_class(self):
return TestDriver
def start_http_server(self, additional_dirs=None, number_of_servers=None):
pass
def start_websocket_server(self):
pass
def acquire_http_lock(self):
pass
def stop_http_server(self):
pass
def stop_websocket_server(self):
pass
def release_http_lock(self):
pass
def _path_to_lighttpd(self):
return "/usr/sbin/lighttpd"
def _path_to_lighttpd_modules(self):
return "/usr/lib/lighttpd"
def _path_to_lighttpd_php(self):
return "/usr/bin/php-cgi"
def _path_to_apache(self):
return "/usr/sbin/httpd"
def _path_to_apache_config_file(self):
return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', 'httpd.conf')
def path_to_test_expectations_file(self):
return self._expectations_path
def all_test_configurations(self):
"""Returns a sequence of the TestConfigurations the port supports."""
# By default, we assume we want to test every graphics type in
# every configuration on every system.
test_configurations = []
for version, architecture in self._all_systems():
for build_type in self._all_build_types():
test_configurations.append(TestConfiguration(
version=version,
architecture=architecture,
build_type=build_type))
return test_configurations
def _all_systems(self):
return (('leopard', 'x86'),
('snowleopard', 'x86'),
('xp', 'x86'),
('vista', 'x86'),
('win7', 'x86'),
('lucid', 'x86'),
('lucid', 'x86_64'))
def _all_build_types(self):
return ('debug', 'release')
def configuration_specifier_macros(self):
"""To avoid surprises when introducing new macros, these are intentionally fixed in time."""
return {'mac': ['leopard', 'snowleopard'], 'win': ['xp', 'vista', 'win7'], 'linux': ['lucid']}
def all_baseline_variants(self):
return self.ALL_BASELINE_VARIANTS
def virtual_test_suites(self):
return [
VirtualTestSuite('virtual/passes', 'passes', ['--virtual-arg']),
VirtualTestSuite('virtual/skipped', 'failures/expected', ['--virtual-arg2']),
]
def make_args(self):
return '--makeargs="-j4"'
def check_webkit_style_command(self):
return ["mock-check-webkit-style"]
def update_webkit_command(self, non_interactive=False):
return ["mock-update-webkit"]
def build_webkit_command(self, build_style=None):
return ["mock-build-webkit"]
def prepare_changelog_command(self):
return ['mock-prepare-ChangeLog']
def run_python_unittests_command(self):
return ['mock-test-webkitpy']
def run_perl_unittests_command(self):
return ['mock-test-webkitperl']
def run_javascriptcore_tests_command(self):
return ['mock-run-javascriptcore-tests']
def run_webkit_unit_tests_command(self):
return ['mock-run-webkit-unit-tests']
def run_webkit_tests_command(self):
return ['mock-run-webkit-tests']
def run_bindings_tests_command(self):
return ['mock-run-bindings-tests']
class TestDriver(Driver):
"""Test/Dummy implementation of the DumpRenderTree interface."""
next_pid = 1
def __init__(self, *args, **kwargs):
super(TestDriver, self).__init__(*args, **kwargs)
self.started = False
self.pid = 0
def cmd_line(self, pixel_tests, per_test_args):
pixel_tests_flag = '-p' if pixel_tests else ''
return [self._port._path_to_driver()] + [pixel_tests_flag] + self._port.get_option('additional_drt_flag', []) + per_test_args
def run_test(self, test_input, stop_when_done):
if not self.started:
self.started = True
self.pid = TestDriver.next_pid
TestDriver.next_pid += 1
start_time = time.time()
test_name = test_input.test_name
test_args = test_input.args or []
test = self._port._tests[test_name]
if test.keyboard:
raise KeyboardInterrupt
if test.exception:
raise ValueError('exception from ' + test_name)
if test.hang:
time.sleep((float(test_input.timeout) * 4) / 1000.0 + 1.0) # The 1.0 comes from thread_padding_sec in layout_test_runnery.
audio = None
actual_text = test.actual_text
if 'flaky' in test_name and not test_name in self._port._flakes:
self._port._flakes.add(test_name)
actual_text = 'flaky text failure'
if actual_text and test_args and test_name == 'passes/args.html':
actual_text = actual_text + ' ' + ' '.join(test_args)
if test.actual_audio:
audio = base64.b64decode(test.actual_audio)
crashed_process_name = None
crashed_pid = None
if test.crash:
crashed_process_name = self._port.driver_name()
crashed_pid = 1
elif test.web_process_crash:
crashed_process_name = 'WebProcess'
crashed_pid = 2
crash_log = ''
if crashed_process_name:
crash_logs = CrashLogs(self._port.host)
crash_log = crash_logs.find_newest_log(crashed_process_name, None) or ''
if stop_when_done:
self.stop()
if test.actual_checksum == test_input.image_hash:
image = None
else:
image = test.actual_image
return DriverOutput(actual_text, image, test.actual_checksum, audio,
crash=test.crash or test.web_process_crash, crashed_process_name=crashed_process_name,
crashed_pid=crashed_pid, crash_log=crash_log,
test_time=time.time() - start_time, timeout=test.timeout, error=test.error, pid=self.pid)
def stop(self):
self.started = False
|
|
"""Azure Image Creation, Management, Testing"""
import json
import re
import sys
import urllib
from copy import deepcopy
import pkg_resources
import yaml
import gen
import gen.build_deploy.util as util
import gen.template
import pkgpanda.build
from gen.internals import Late, Source
from pkgpanda.util import split_by_token
# TODO(cmaloney): Make it so the template only completes when services are properly up.
late_services = ""
ILLEGAL_ARM_CHARS_PATTERN = re.compile("[']")
DOWNLOAD_URL_TEMPLATE = ("{download_url}{reproducible_artifact_path}/azure/{arm_template_name}")
INSTANCE_GROUPS = {
'master': {
'report_name': 'MasterServerGroup',
'roles': ['master', 'azure_master']
},
'slave': {
'report_name': 'SlaveServerGroup',
'roles': ['slave']
},
'slave_public': {
'report_name': 'PublicSlaveServerGroup',
'roles': ['slave_public']
}
}
def validate_provider(provider):
assert provider == 'azure'
azure_base_source = Source(entry={
'validate': [
validate_provider
],
'default': {
'platform': 'azure',
'enable_docker_gc': 'true'
},
'must': {
'resolvers': '["168.63.129.16"]',
'ip_detect_contents': yaml.dump(pkg_resources.resource_string('gen', 'ip-detect/azure.sh').decode()),
'master_discovery': 'static',
'exhibitor_storage_backend': 'azure',
'master_cloud_config': '{{ master_cloud_config }}',
'slave_cloud_config': '{{ slave_cloud_config }}',
'slave_public_cloud_config': '{{ slave_public_cloud_config }}',
'fault_domain_enabled': 'true',
'fault_domain_detect_contents': yaml.dump(
pkg_resources.resource_string('gen', 'fault-domain-detect/azure.sh').decode())
},
'conditional': {
'oauth_available': {
'true': {
'must': {
'oauth_enabled': Late("[[[variables('oauthEnabled')]]]"),
'adminrouter_auth_enabled': Late("[[[variables('oauthEnabled')]]]"),
}
},
'false': {},
}
}
})
def validate_cloud_config(cc_string):
'''
Validate that there aren't any single quotes present since they break the
ARM template system. Exit with an error message if any invalid characters
are detected.
@param cc_string: str, Cloud Configuration
'''
if "'" in cc_string:
print("ERROR: Illegal cloud config string detected.", file=sys.stderr)
print("ERROR: {} contains a `'`".format(cc_string), file=sys.stderr)
sys.exit(1)
def transform(cloud_config_yaml_str):
'''
Transforms the given yaml into a list of strings which are concatenated
together by the ARM template system. We must make it a list of strings so
that ARM template parameters appear at the top level of the template and get
substituted.
'''
cc_json = json.dumps(yaml.safe_load(cloud_config_yaml_str), sort_keys=True)
def _quote_literals(parts):
for part, is_param in parts:
if is_param:
yield part
else:
validate_cloud_config(part)
yield "'{}'".format(part)
# We're embedding this as a json string.
return json.dumps(
"[base64(concat('#cloud-config\n\n', " +
", ".join(_quote_literals(split_by_token('[[[', ']]]', cc_json, strip_token_decoration=True))) +
"))]"
)
def render_arm(
arm_template,
master_cloudconfig_yaml_str,
slave_cloudconfig_yaml_str,
slave_public_cloudconfig_yaml_str):
template_str = gen.template.parse_str(arm_template).render({
'master_cloud_config': transform(master_cloudconfig_yaml_str),
'slave_cloud_config': transform(slave_cloudconfig_yaml_str),
'slave_public_cloud_config': transform(slave_public_cloudconfig_yaml_str)
})
# Add in some metadata to help support engineers
template_json = json.loads(template_str)
template_json['variables']['DcosImageCommit'] = util.dcos_image_commit
template_json['variables']['TemplateGenerationDate'] = util.template_generation_date
return json.dumps(template_json)
def gen_templates(gen_arguments, arm_template, extra_sources):
'''
Render the cloud_config template given a particular set of options
@param user_args: dict, args to pass to the gen library. These are user
input arguments which get filled in/prompted for.
@param arm_template: string, path to the source arm template for rendering
by the gen library (e.g. 'azure/templates/azuredeploy.json')
'''
results = gen.generate(
arguments=gen_arguments,
extra_templates=['azure/cloud-config.yaml', 'azure/templates/' + arm_template + '.json'],
extra_sources=[azure_base_source] + extra_sources)
cloud_config = results.templates['cloud-config.yaml']
# Add general services
cloud_config = results.utils.add_services(cloud_config, 'canonical')
# Specialize for master, slave, slave_public
variant_cloudconfig = {}
for variant, params in INSTANCE_GROUPS.items():
cc_variant = deepcopy(cloud_config)
# Add roles
cc_variant = results.utils.add_roles(cc_variant, params['roles'] + ['azure'])
# NOTE: If this gets printed in string stylerather than '|' the Azure
# parameters which need to be split out for the arm to
# interpret end up all escaped and undoing it would be hard.
variant_cloudconfig[variant] = results.utils.render_cloudconfig(cc_variant)
# Render the arm
arm = render_arm(
results.templates[arm_template + '.json'],
variant_cloudconfig['master'],
variant_cloudconfig['slave'],
variant_cloudconfig['slave_public'])
return (arm, results)
def master_list_arm_json(num_masters, varietal):
'''
Return a JSON string containing a list of ARM expressions for the master IP's of the cluster.
@param num_masters: int, number of master nodes in the cluster
@param varietal: string, indicate template varietal to build for either 'acs' or 'dcos'
'''
if varietal == 'dcos':
arm_expression = "[[[reference('masterNodeNic{}').ipConfigurations[0].properties.privateIPAddress]]]"
elif varietal == 'acs':
arm_expression = "[[[reference(variables('masterVMNic')[{}]).ipConfigurations[0].properties.privateIPAddress]]]"
else:
raise ValueError("Unknown Azure varietal specified")
return json.dumps([arm_expression.format(x) for x in range(num_masters)])
azure_dcos_source = Source({
'must': {
'exhibitor_azure_prefix': Late("[[[variables('uniqueName')]]]"),
'exhibitor_azure_account_name': Late("[[[variables('storageAccountName')]]]"),
'exhibitor_azure_account_key': Late(
"[[[listKeys(resourceId('Microsoft.Storage/storageAccounts', "
"variables('storageAccountName')), '2015-05-01-preview').key1]]]"),
'cluster_name': Late("[[[variables('uniqueName')]]]")
}
})
azure_acs_source = Source({
'must': {
'ui_tracking': 'false',
'telemetry_enabled': 'false',
'exhibitor_azure_prefix': Late("[[[variables('masterPublicIPAddressName')]]]"),
'exhibitor_azure_account_name': Late("[[[variables('masterStorageAccountExhibitorName')]]]"),
'exhibitor_azure_account_key': Late(
"[[[listKeys(resourceId('Microsoft.Storage/storageAccounts', "
"variables('masterStorageAccountExhibitorName')), '2015-06-15').key1]]]"),
'cluster_name': Late("[[[variables('masterPublicIPAddressName')]]]"),
'bootstrap_tmp_dir': "/var/tmp"
}
})
def make_template(num_masters, gen_arguments, varietal, bootstrap_variant_prefix):
'''
Return a tuple: the generated template for num_masters and the artifact dict.
@param num_masters: int, number of master nodes to embed in the generated template
@param gen_arguments: dict, args to pass to the gen library. These are user
input arguments which get filled in/prompted for.
@param varietal: string, indicate template varietal to build for either 'acs' or 'dcos'
'''
master_list_source = Source()
master_list_source.add_must('master_list', Late(master_list_arm_json(num_masters, varietal)))
master_list_source.add_must('num_masters', str(num_masters))
if varietal == 'dcos':
arm, results = gen_templates(
gen_arguments,
'azuredeploy',
extra_sources=[master_list_source, azure_dcos_source])
elif varietal == 'acs':
arm, results = gen_templates(
gen_arguments,
'acs',
extra_sources=[master_list_source, azure_acs_source])
else:
raise ValueError("Unknown Azure varietal specified")
yield {'packages': results.config_package_ids}
if results.late_package_id:
yield {'packages': [results.late_package_id]}
yield {
'channel_path': 'azure/{}{}-{}master.azuredeploy.json'.format(bootstrap_variant_prefix, varietal, num_masters),
'local_content': arm,
'content_type': 'application/json; charset=utf-8'
}
def do_create(tag, build_name, reproducible_artifact_path, commit, variant_arguments, all_completes):
for arm_t in ['dcos', 'acs']:
for num_masters in [1, 3, 5]:
for bootstrap_name, gen_arguments in variant_arguments.items():
yield from make_template(
num_masters,
gen_arguments,
arm_t,
pkgpanda.util.variant_prefix(bootstrap_name))
yield {
'channel_path': 'azure.html',
'local_content': gen_buttons(
build_name,
reproducible_artifact_path,
tag,
commit,
next(iter(variant_arguments.values()))['azure_download_url']),
'content_type': 'text/html; charset=utf-8'
}
def gen_buttons(build_name, reproducible_artifact_path, tag, commit, download_url):
'''
Generate the button page, that is, "Deploy a cluster to Azure" page
'''
dcos_urls = [
encode_url_as_param(DOWNLOAD_URL_TEMPLATE.format(
download_url=download_url,
reproducible_artifact_path=reproducible_artifact_path,
arm_template_name='dcos-{}master.azuredeploy.json'.format(x)))
for x in [1, 3, 5]]
acs_urls = [
encode_url_as_param(DOWNLOAD_URL_TEMPLATE.format(
download_url=download_url,
reproducible_artifact_path=reproducible_artifact_path,
arm_template_name='acs-{}master.azuredeploy.json'.format(x)))
for x in [1, 3, 5]]
return gen.template.parse_resources('azure/templates/azure.html').render({
'build_name': build_name,
'tag': tag,
'commit': commit,
'dcos_urls': dcos_urls,
'acs_urls': acs_urls
})
# Escape URL characters like '/' and ':' so that it can be used with the Azure
# web endpoint of https://portal.azure.com/#create/Microsoft.Template/uri/
def encode_url_as_param(s):
s = s.encode('utf8')
s = urllib.parse.quote_plus(s)
return s
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2019 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""WiFi Rate Control Statistics Primitive."""
from datetime import datetime
from construct import Struct, Int8ub, Int16ub, Int32ub, Bytes, Array
from construct import Container
import empower.managers.ranmanager.lvapp as lvapp
from empower_core.etheraddress import EtherAddress
from empower.managers.ranmanager.lvapp.wifiapp import EWiFiApp
from empower_core.app import EVERY
PT_WIFI_RC_STATS_REQUEST = 0x80
PT_WIFI_RC_STATS_RESPONSE = 0x81
WIFI_RC_STATS_REQUEST = Struct(
"version" / Int8ub,
"type" / Int8ub,
"length" / Int32ub,
"seq" / Int32ub,
"xid" / Int32ub,
"device" / Bytes(6),
"sta" / Bytes(6),
)
WIFI_RC_STATS_REQUEST.name = "wifi_rc_stats_request"
RC_ENTRY = Struct(
"rate" / Int8ub,
"prob" / Int32ub,
"cur_prob" / Int32ub,
"cur_tp" / Int32ub,
"last_attempts" / Int32ub,
"last_successes" / Int32ub,
"hist_attempts" / Int32ub,
"hist_successes" / Int32ub
)
RC_ENTRY.name = "rc_entry"
WIFI_RC_STATS_RESPONSE = Struct(
"version" / Int8ub,
"type" / Int8ub,
"length" / Int32ub,
"seq" / Int32ub,
"xid" / Int32ub,
"device" / Bytes(6),
"iface_id" / Int32ub,
"sta" / Bytes(6),
"nb_entries" / Int16ub,
"stats" / Array(lambda ctx: ctx.nb_entries, RC_ENTRY),
)
WIFI_RC_STATS_RESPONSE.name = "wifi_rc_stats_response"
class RCStats(EWiFiApp):
"""WiFi Rate Control Statistics Primitive.
This primitive collects the RC statistics from the specified LVAP.
Parameters:
sta: the LVAP to track as an EtherAddress (mandatory)
every: the loop period in ms (optional, default 2000ms)
Example:
POST /api/v1/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/apps
{
"name": "empower.apps.wifircstats.wifircstats",
"params": {
"sta": "11:22:33:44:55:66",
"every": 2000
}
}
"""
def __init__(self, context, service_id, sta, every=EVERY):
super().__init__(context=context,
service_id=service_id,
sta=sta,
every=every)
# Register messages
lvapp.register_message(PT_WIFI_RC_STATS_REQUEST,
WIFI_RC_STATS_REQUEST)
lvapp.register_message(PT_WIFI_RC_STATS_RESPONSE,
WIFI_RC_STATS_RESPONSE)
# Data structures
self.rates = {}
self.best_prob = None
self.best_tp = None
def __eq__(self, other):
if isinstance(other, RCStats):
return self.sta == other.sta and self.every == other.every
return False
@property
def sta(self):
""" Return the station address. """
return self.params['sta']
@sta.setter
def sta(self, sta):
""" Set the station address. """
self.params['sta'] = EtherAddress(sta)
def to_dict(self):
"""Return JSON-serializable representation of the object."""
out = super().to_dict()
out['sta'] = self.sta
out['rates'] = self.rates
out['best_prob'] = self.best_prob
out['best_tp'] = self.best_tp
return out
def loop(self):
"""Send out requests"""
if self.sta not in self.context.lvaps:
return
lvap = self.context.lvaps[self.sta]
msg = Container(length=WIFI_RC_STATS_REQUEST.sizeof(),
sta=lvap.addr.to_raw())
lvap.wtp.connection.send_message(PT_WIFI_RC_STATS_REQUEST,
msg,
self.handle_response)
def handle_response(self, response, *_):
"""Handle WIFI_RC_STATS_RESPONSE message."""
lvap = self.context.lvaps[self.sta]
# update this object
self.rates = {}
self.best_prob = None
self.best_tp = None
# generate data points
points = []
timestamp = datetime.utcnow()
for entry in response.stats:
rate = entry.rate if lvap.ht_caps else entry.rate / 2.0
fields = {
'prob': entry.prob / 180.0,
'cur_prob': entry.cur_prob / 180.0,
'cur_tp': entry.cur_tp / ((18000 << 10) / 96) / 10,
'last_attempts': entry.last_attempts,
'last_successes': entry.last_successes,
'hist_attempts': entry.hist_attempts,
'hist_successes': entry.hist_successes,
}
tags = dict(self.params)
tags["rate"] = rate
self.rates[rate] = fields
sample = {
"measurement": self.name,
"tags": tags,
"time": timestamp,
"fields": fields
}
points.append(sample)
# compute statistics
self.best_prob = \
max(self.rates.keys(), key=(lambda key: self.rates[key]['prob']))
self.best_tp = \
max(self.rates.keys(), key=(lambda key: self.rates[key]['cur_tp']))
# save to db
self.write_points(points)
# handle callbacks
self.handle_callbacks()
def launch(context, service_id, sta, every=EVERY):
""" Initialize the module. """
return RCStats(context=context, service_id=service_id, sta=sta,
every=every)
|
|
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import attr
from frozendict import frozendict
from twisted.internet.defer import Deferred
from synapse.appservice import ApplicationService
from synapse.events import EventBase
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.types import JsonDict, StateMap
if TYPE_CHECKING:
from synapse.storage import Storage
from synapse.storage.databases.main import DataStore
@attr.s(slots=True, auto_attribs=True)
class EventContext:
"""
Holds information relevant to persisting an event
Attributes:
rejected: A rejection reason if the event was rejected, else False
_state_group: The ID of the state group for this event. Note that state events
are persisted with a state group which includes the new event, so this is
effectively the state *after* the event in question.
For a *rejected* state event, where the state of the rejected event is
ignored, this state_group should never make it into the
event_to_state_groups table. Indeed, inspecting this value for a rejected
state event is almost certainly incorrect.
For an outlier, where we don't have the state at the event, this will be
None.
Note that this is a private attribute: it should be accessed via
the ``state_group`` property.
state_group_before_event: The ID of the state group representing the state
of the room before this event.
If this is a non-state event, this will be the same as ``state_group``. If
it's a state event, it will be the same as ``prev_group``.
If ``state_group`` is None (ie, the event is an outlier),
``state_group_before_event`` will always also be ``None``.
prev_group: If it is known, ``state_group``'s prev_group. Note that this being
None does not necessarily mean that ``state_group`` does not have
a prev_group!
If the event is a state event, this is normally the same as ``prev_group``.
If ``state_group`` is None (ie, the event is an outlier), ``prev_group``
will always also be ``None``.
Note that this *not* (necessarily) the state group associated with
``_prev_state_ids``.
delta_ids: If ``prev_group`` is not None, the state delta between ``prev_group``
and ``state_group``.
app_service: If this event is being sent by a (local) application service, that
app service.
_current_state_ids: The room state map, including this event - ie, the state
in ``state_group``.
(type, state_key) -> event_id
For an outlier, this is {}
Note that this is a private attribute: it should be accessed via
``get_current_state_ids``. _AsyncEventContext impl calculates this
on-demand: it will be None until that happens.
_prev_state_ids: The room state map, excluding this event - ie, the state
in ``state_group_before_event``. For a non-state
event, this will be the same as _current_state_events.
Note that it is a completely different thing to prev_group!
(type, state_key) -> event_id
For an outlier, this is {}
As with _current_state_ids, this is a private attribute. It should be
accessed via get_prev_state_ids.
partial_state: if True, we may be storing this event with a temporary,
incomplete state.
"""
rejected: Union[bool, str] = False
_state_group: Optional[int] = None
state_group_before_event: Optional[int] = None
prev_group: Optional[int] = None
delta_ids: Optional[StateMap[str]] = None
app_service: Optional[ApplicationService] = None
_current_state_ids: Optional[StateMap[str]] = None
_prev_state_ids: Optional[StateMap[str]] = None
partial_state: bool = False
@staticmethod
def with_state(
state_group: Optional[int],
state_group_before_event: Optional[int],
current_state_ids: Optional[StateMap[str]],
prev_state_ids: Optional[StateMap[str]],
partial_state: bool,
prev_group: Optional[int] = None,
delta_ids: Optional[StateMap[str]] = None,
) -> "EventContext":
return EventContext(
current_state_ids=current_state_ids,
prev_state_ids=prev_state_ids,
state_group=state_group,
state_group_before_event=state_group_before_event,
prev_group=prev_group,
delta_ids=delta_ids,
partial_state=partial_state,
)
@staticmethod
def for_outlier() -> "EventContext":
"""Return an EventContext instance suitable for persisting an outlier event"""
return EventContext(
current_state_ids={},
prev_state_ids={},
)
async def serialize(self, event: EventBase, store: "DataStore") -> JsonDict:
"""Converts self to a type that can be serialized as JSON, and then
deserialized by `deserialize`
Args:
event: The event that this context relates to
Returns:
The serialized event.
"""
# We don't serialize the full state dicts, instead they get pulled out
# of the DB on the other side. However, the other side can't figure out
# the prev_state_ids, so if we're a state event we include the event
# id that we replaced in the state.
if event.is_state():
prev_state_ids = await self.get_prev_state_ids()
prev_state_id = prev_state_ids.get((event.type, event.state_key))
else:
prev_state_id = None
return {
"prev_state_id": prev_state_id,
"event_type": event.type,
"event_state_key": event.get_state_key(),
"state_group": self._state_group,
"state_group_before_event": self.state_group_before_event,
"rejected": self.rejected,
"prev_group": self.prev_group,
"delta_ids": _encode_state_dict(self.delta_ids),
"app_service_id": self.app_service.id if self.app_service else None,
"partial_state": self.partial_state,
}
@staticmethod
def deserialize(storage: "Storage", input: JsonDict) -> "EventContext":
"""Converts a dict that was produced by `serialize` back into a
EventContext.
Args:
storage: Used to convert AS ID to AS object and fetch state.
input: A dict produced by `serialize`
Returns:
The event context.
"""
context = _AsyncEventContextImpl(
# We use the state_group and prev_state_id stuff to pull the
# current_state_ids out of the DB and construct prev_state_ids.
storage=storage,
prev_state_id=input["prev_state_id"],
event_type=input["event_type"],
event_state_key=input["event_state_key"],
state_group=input["state_group"],
state_group_before_event=input["state_group_before_event"],
prev_group=input["prev_group"],
delta_ids=_decode_state_dict(input["delta_ids"]),
rejected=input["rejected"],
partial_state=input.get("partial_state", False),
)
app_service_id = input["app_service_id"]
if app_service_id:
context.app_service = storage.main.get_app_service_by_id(app_service_id)
return context
@property
def state_group(self) -> Optional[int]:
"""The ID of the state group for this event.
Note that state events are persisted with a state group which includes the new
event, so this is effectively the state *after* the event in question.
For an outlier, where we don't have the state at the event, this will be None.
It is an error to access this for a rejected event, since rejected state should
not make it into the room state. Accessing this property will raise an exception
if ``rejected`` is set.
"""
if self.rejected:
raise RuntimeError("Attempt to access state_group of rejected event")
return self._state_group
async def get_current_state_ids(self) -> Optional[StateMap[str]]:
"""
Gets the room state map, including this event - ie, the state in ``state_group``
It is an error to access this for a rejected event, since rejected state should
not make it into the room state. This method will raise an exception if
``rejected`` is set.
Returns:
Returns None if state_group is None, which happens when the associated
event is an outlier.
Maps a (type, state_key) to the event ID of the state event matching
this tuple.
"""
if self.rejected:
raise RuntimeError("Attempt to access state_ids of rejected event")
await self._ensure_fetched()
return self._current_state_ids
async def get_prev_state_ids(self) -> StateMap[str]:
"""
Gets the room state map, excluding this event.
For a non-state event, this will be the same as get_current_state_ids().
Returns:
Returns {} if state_group is None, which happens when the associated
event is an outlier.
Maps a (type, state_key) to the event ID of the state event matching
this tuple.
"""
await self._ensure_fetched()
# There *should* be previous state IDs now.
assert self._prev_state_ids is not None
return self._prev_state_ids
def get_cached_current_state_ids(self) -> Optional[StateMap[str]]:
"""Gets the current state IDs if we have them already cached.
It is an error to access this for a rejected event, since rejected state should
not make it into the room state. This method will raise an exception if
``rejected`` is set.
Returns:
Returns None if we haven't cached the state or if state_group is None
(which happens when the associated event is an outlier).
Otherwise, returns the the current state IDs.
"""
if self.rejected:
raise RuntimeError("Attempt to access state_ids of rejected event")
return self._current_state_ids
async def _ensure_fetched(self) -> None:
return None
@attr.s(slots=True)
class _AsyncEventContextImpl(EventContext):
"""
An implementation of EventContext which fetches _current_state_ids and
_prev_state_ids from the database on demand.
Attributes:
_storage
_fetching_state_deferred: Resolves when *_state_ids have been calculated.
None if we haven't started calculating yet
_event_type: The type of the event the context is associated with.
_event_state_key: The state_key of the event the context is associated with.
_prev_state_id: If the event associated with the context is a state event,
then `_prev_state_id` is the event_id of the state that was replaced.
"""
# This needs to have a default as we're inheriting
_storage: "Storage" = attr.ib(default=None)
_prev_state_id: Optional[str] = attr.ib(default=None)
_event_type: str = attr.ib(default=None)
_event_state_key: Optional[str] = attr.ib(default=None)
_fetching_state_deferred: Optional["Deferred[None]"] = attr.ib(default=None)
async def _ensure_fetched(self) -> None:
if not self._fetching_state_deferred:
self._fetching_state_deferred = run_in_background(self._fill_out_state)
await make_deferred_yieldable(self._fetching_state_deferred)
async def _fill_out_state(self) -> None:
"""Called to populate the _current_state_ids and _prev_state_ids
attributes by loading from the database.
"""
if self.state_group is None:
# No state group means the event is an outlier. Usually the state_ids dicts are also
# pre-set to empty dicts, but they get reset when the context is serialized, so set
# them to empty dicts again here.
self._current_state_ids = {}
self._prev_state_ids = {}
return
current_state_ids = await self._storage.state.get_state_ids_for_group(
self.state_group
)
# Set this separately so mypy knows current_state_ids is not None.
self._current_state_ids = current_state_ids
if self._event_state_key is not None:
self._prev_state_ids = dict(current_state_ids)
key = (self._event_type, self._event_state_key)
if self._prev_state_id:
self._prev_state_ids[key] = self._prev_state_id
else:
self._prev_state_ids.pop(key, None)
else:
self._prev_state_ids = current_state_ids
def _encode_state_dict(
state_dict: Optional[StateMap[str]],
) -> Optional[List[Tuple[str, str, str]]]:
"""Since dicts of (type, state_key) -> event_id cannot be serialized in
JSON we need to convert them to a form that can.
"""
if state_dict is None:
return None
return [(etype, state_key, v) for (etype, state_key), v in state_dict.items()]
def _decode_state_dict(
input: Optional[List[Tuple[str, str, str]]]
) -> Optional[StateMap[str]]:
"""Decodes a state dict encoded using `_encode_state_dict` above"""
if input is None:
return None
return frozendict({(etype, state_key): v for etype, state_key, v in input})
|
|
import math
from functools import reduce
import numpy as np
import operator
from llvmlite import ir
from llvmlite.llvmpy.core import Type, Constant
import llvmlite.llvmpy.core as lc
from numba.core.imputils import lower_builtin, lower_getattr, lower_getattr_generic, lower_cast, lower_constant, iternext_impl, call_getiter, call_iternext, impl_ret_borrowed, impl_ret_untracked, numba_typeref_ctor
from numba.core import typing, types, utils, cgutils
from numba.core.extending import overload, intrinsic
from numba.core.typeconv import Conversion
from numba.core.errors import TypingError
@overload(operator.truth)
def ol_truth(val):
if isinstance(val, types.Boolean):
def impl(val):
return val
return impl
@lower_builtin(operator.is_not, types.Any, types.Any)
def generic_is_not(context, builder, sig, args):
"""
Implement `x is not y` as `not (x is y)`.
"""
is_impl = context.get_function(operator.is_, sig)
return builder.not_(is_impl(builder, args))
@lower_builtin(operator.is_, types.Any, types.Any)
def generic_is(context, builder, sig, args):
"""
Default implementation for `x is y`
"""
lhs_type, rhs_type = sig.args
# the lhs and rhs have the same type
if lhs_type == rhs_type:
# mutable types
if lhs_type.mutable:
raise NotImplementedError('no default `is` implementation')
# immutable types
else:
# fallbacks to `==`
try:
eq_impl = context.get_function(operator.eq, sig)
except NotImplementedError:
# no `==` implemented for this type
return cgutils.false_bit
else:
return eq_impl(builder, args)
else:
return cgutils.false_bit
@lower_builtin(operator.is_, types.Opaque, types.Opaque)
def opaque_is(context, builder, sig, args):
"""
Implementation for `x is y` for Opaque types.
"""
lhs_type, rhs_type = sig.args
# the lhs and rhs have the same type
if lhs_type == rhs_type:
lhs_ptr = builder.ptrtoint(args[0], cgutils.intp_t)
rhs_ptr = builder.ptrtoint(args[1], cgutils.intp_t)
return builder.icmp_unsigned('==', lhs_ptr, rhs_ptr)
else:
return cgutils.false_bit
@lower_builtin(operator.is_, types.Boolean, types.Boolean)
def bool_is_impl(context, builder, sig, args):
"""
Implementation for `x is y` for types derived from types.Boolean
(e.g. BooleanLiteral), and cross-checks between literal and non-literal
booleans, to satisfy Python's behavior preserving identity for bools.
"""
arg1, arg2 = args
arg1_type, arg2_type = sig.args
_arg1 = context.cast(builder, arg1, arg1_type, types.boolean)
_arg2 = context.cast(builder, arg2, arg2_type, types.boolean)
eq_impl = context.get_function(
operator.eq,
typing.signature(types.boolean, types.boolean, types.boolean)
)
return eq_impl(builder, (_arg1, _arg2))
# keep types.IntegerLiteral, as otherwise there's ambiguity between this and int_eq_impl
@lower_builtin(operator.eq, types.Literal, types.Literal)
@lower_builtin(operator.eq, types.IntegerLiteral, types.IntegerLiteral)
def const_eq_impl(context, builder, sig, args):
arg1, arg2 = sig.args
val = 0
if arg1.literal_value == arg2.literal_value:
val = 1
res = ir.Constant(ir.IntType(1), val)
return impl_ret_untracked(context, builder, sig.return_type, res)
# keep types.IntegerLiteral, as otherwise there's ambiguity between this and int_ne_impl
@lower_builtin(operator.ne, types.Literal, types.Literal)
@lower_builtin(operator.ne, types.IntegerLiteral, types.IntegerLiteral)
def const_ne_impl(context, builder, sig, args):
arg1, arg2 = sig.args
val = 0
if arg1.literal_value != arg2.literal_value:
val = 1
res = ir.Constant(ir.IntType(1), val)
return impl_ret_untracked(context, builder, sig.return_type, res)
def gen_non_eq(val):
def none_equality(a, b):
a_none = isinstance(a, types.NoneType)
b_none = isinstance(b, types.NoneType)
if a_none and b_none:
def impl(a, b):
return val
return impl
elif a_none ^ b_none:
def impl(a, b):
return not val
return impl
return none_equality
overload(operator.eq)(gen_non_eq(True))
overload(operator.ne)(gen_non_eq(False))
#-------------------------------------------------------------------------------
@lower_getattr_generic(types.DeferredType)
def deferred_getattr(context, builder, typ, value, attr):
"""
Deferred.__getattr__ => redirect to the actual type.
"""
inner_type = typ.get()
val = context.cast(builder, value, typ, inner_type)
imp = context.get_getattr(inner_type, attr)
return imp(context, builder, inner_type, val, attr)
@lower_cast(types.Any, types.DeferredType)
@lower_cast(types.Optional, types.DeferredType)
@lower_cast(types.Boolean, types.DeferredType)
def any_to_deferred(context, builder, fromty, toty, val):
actual = context.cast(builder, val, fromty, toty.get())
model = context.data_model_manager[toty]
return model.set(builder, model.make_uninitialized(), actual)
@lower_cast(types.DeferredType, types.Any)
@lower_cast(types.DeferredType, types.Boolean)
@lower_cast(types.DeferredType, types.Optional)
def deferred_to_any(context, builder, fromty, toty, val):
model = context.data_model_manager[fromty]
val = model.get(builder, val)
return context.cast(builder, val, fromty.get(), toty)
#------------------------------------------------------------------------------
@lower_builtin(operator.getitem, types.CPointer, types.Integer)
def getitem_cpointer(context, builder, sig, args):
base_ptr, idx = args
elem_ptr = builder.gep(base_ptr, [idx])
res = builder.load(elem_ptr)
return impl_ret_borrowed(context, builder, sig.return_type, res)
@lower_builtin(operator.setitem, types.CPointer, types.Integer, types.Any)
def setitem_cpointer(context, builder, sig, args):
base_ptr, idx, val = args
elem_ptr = builder.gep(base_ptr, [idx])
builder.store(val, elem_ptr)
#-------------------------------------------------------------------------------
def do_minmax(context, builder, argtys, args, cmpop):
assert len(argtys) == len(args), (argtys, args)
assert len(args) > 0
def binary_minmax(accumulator, value):
# This is careful to reproduce Python's algorithm, e.g.
# max(1.5, nan, 2.5) should return 2.5 (not nan or 1.5)
accty, acc = accumulator
vty, v = value
ty = context.typing_context.unify_types(accty, vty)
assert ty is not None
acc = context.cast(builder, acc, accty, ty)
v = context.cast(builder, v, vty, ty)
cmpsig = typing.signature(types.boolean, ty, ty)
ge = context.get_function(cmpop, cmpsig)
pred = ge(builder, (v, acc))
res = builder.select(pred, v, acc)
return ty, res
typvals = zip(argtys, args)
resty, resval = reduce(binary_minmax, typvals)
return resval
@lower_builtin(max, types.BaseTuple)
def max_iterable(context, builder, sig, args):
argtys = list(sig.args[0])
args = cgutils.unpack_tuple(builder, args[0])
return do_minmax(context, builder, argtys, args, operator.gt)
@lower_builtin(max, types.VarArg(types.Any))
def max_vararg(context, builder, sig, args):
return do_minmax(context, builder, sig.args, args, operator.gt)
@lower_builtin(min, types.BaseTuple)
def min_iterable(context, builder, sig, args):
argtys = list(sig.args[0])
args = cgutils.unpack_tuple(builder, args[0])
return do_minmax(context, builder, argtys, args, operator.lt)
@lower_builtin(min, types.VarArg(types.Any))
def min_vararg(context, builder, sig, args):
return do_minmax(context, builder, sig.args, args, operator.lt)
def _round_intrinsic(tp):
# round() rounds half to even
return "llvm.rint.f%d" % (tp.bitwidth,)
@lower_builtin(round, types.Float)
def round_impl_unary(context, builder, sig, args):
fltty = sig.args[0]
llty = context.get_value_type(fltty)
module = builder.module
fnty = Type.function(llty, [llty])
fn = cgutils.get_or_insert_function(module, fnty, _round_intrinsic(fltty))
res = builder.call(fn, args)
# unary round() returns an int
res = builder.fptosi(res, context.get_value_type(sig.return_type))
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(round, types.Float, types.Integer)
def round_impl_binary(context, builder, sig, args):
fltty = sig.args[0]
# Allow calling the intrinsic from the Python implementation below.
# This avoids the conversion to an int in Python 3's unary round().
_round = types.ExternalFunction(
_round_intrinsic(fltty), typing.signature(fltty, fltty))
def round_ndigits(x, ndigits):
if math.isinf(x) or math.isnan(x):
return x
if ndigits >= 0:
if ndigits > 22:
# pow1 and pow2 are each safe from overflow, but
# pow1*pow2 ~= pow(10.0, ndigits) might overflow.
pow1 = 10.0 ** (ndigits - 22)
pow2 = 1e22
else:
pow1 = 10.0 ** ndigits
pow2 = 1.0
y = (x * pow1) * pow2
if math.isinf(y):
return x
return (_round(y) / pow2) / pow1
else:
pow1 = 10.0 ** (-ndigits)
y = x / pow1
return _round(y) * pow1
res = context.compile_internal(builder, round_ndigits, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
#-------------------------------------------------------------------------------
# Numeric constructors
@lower_builtin(int, types.Any)
@lower_builtin(float, types.Any)
def int_impl(context, builder, sig, args):
[ty] = sig.args
[val] = args
res = context.cast(builder, val, ty, sig.return_type)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(complex, types.VarArg(types.Any))
def complex_impl(context, builder, sig, args):
complex_type = sig.return_type
float_type = complex_type.underlying_float
if len(sig.args) == 1:
[argty] = sig.args
[arg] = args
if isinstance(argty, types.Complex):
# Cast Complex* to Complex*
res = context.cast(builder, arg, argty, complex_type)
return impl_ret_untracked(context, builder, sig.return_type, res)
else:
real = context.cast(builder, arg, argty, float_type)
imag = context.get_constant(float_type, 0)
elif len(sig.args) == 2:
[realty, imagty] = sig.args
[real, imag] = args
real = context.cast(builder, real, realty, float_type)
imag = context.cast(builder, imag, imagty, float_type)
cmplx = context.make_complex(builder, complex_type)
cmplx.real = real
cmplx.imag = imag
res = cmplx._getvalue()
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(types.NumberClass, types.Any)
def number_constructor(context, builder, sig, args):
"""
Call a number class, e.g. np.int32(...)
"""
if isinstance(sig.return_type, types.Array):
# Array constructor
dt = sig.return_type.dtype
def foo(*arg_hack):
return np.array(arg_hack, dtype=dt)
res = context.compile_internal(builder, foo, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
else:
# Scalar constructor
[val] = args
[valty] = sig.args
return context.cast(builder, val, valty, sig.return_type)
#-------------------------------------------------------------------------------
# Constants
@lower_constant(types.Dummy)
def constant_dummy(context, builder, ty, pyval):
# This handles None, etc.
return context.get_dummy_value()
@lower_constant(types.ExternalFunctionPointer)
def constant_function_pointer(context, builder, ty, pyval):
ptrty = context.get_function_pointer_type(ty)
ptrval = context.add_dynamic_addr(builder, ty.get_pointer(pyval),
info=str(pyval))
return builder.bitcast(ptrval, ptrty)
@lower_constant(types.Optional)
def constant_optional(context, builder, ty, pyval):
if pyval is None:
return context.make_optional_none(builder, ty.type)
else:
return context.make_optional_value(builder, ty.type, pyval)
# -----------------------------------------------------------------------------
@lower_builtin(type, types.Any)
def type_impl(context, builder, sig, args):
"""
One-argument type() builtin.
"""
return context.get_dummy_value()
@lower_builtin(iter, types.IterableType)
def iter_impl(context, builder, sig, args):
ty, = sig.args
val, = args
iterval = call_getiter(context, builder, ty, val)
return iterval
@lower_builtin(next, types.IteratorType)
def next_impl(context, builder, sig, args):
iterty, = sig.args
iterval, = args
res = call_iternext(context, builder, iterty, iterval)
with builder.if_then(builder.not_(res.is_valid()), likely=False):
context.call_conv.return_user_exc(builder, StopIteration, ())
return res.yielded_value()
# -----------------------------------------------------------------------------
@lower_builtin("not in", types.Any, types.Any)
def not_in(context, builder, sig, args):
def in_impl(a, b):
return operator.contains(b, a)
res = context.compile_internal(builder, in_impl, sig, args)
return builder.not_(res)
# -----------------------------------------------------------------------------
@lower_builtin(len, types.ConstSized)
def constsized_len(context, builder, sig, args):
[ty] = sig.args
retty = sig.return_type
res = context.get_constant(retty, len(ty.types))
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(bool, types.Sized)
def sized_bool(context, builder, sig, args):
[ty] = sig.args
if len(ty):
return cgutils.true_bit
else:
return cgutils.false_bit
@lower_builtin(tuple)
def lower_empty_tuple(context, builder, sig, args):
retty = sig.return_type
res = context.get_constant_undef(retty)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(tuple, types.BaseTuple)
def lower_tuple(context, builder, sig, args):
val, = args
return impl_ret_untracked(context, builder, sig.return_type, val)
@overload(bool)
def bool_sequence(x):
valid_types = (
types.CharSeq,
types.UnicodeCharSeq,
types.DictType,
types.ListType,
types.UnicodeType,
types.Set,
)
if isinstance(x, valid_types):
def bool_impl(x):
return len(x) > 0
return bool_impl
@overload(bool, inline='always')
def bool_none(x):
if isinstance(x, types.NoneType) or x is None:
return lambda x: False
# -----------------------------------------------------------------------------
def get_type_max_value(typ):
if isinstance(typ, types.Float):
return np.inf
if isinstance(typ, types.Integer):
return typ.maxval
raise NotImplementedError("Unsupported type")
def get_type_min_value(typ):
if isinstance(typ, types.Float):
return -np.inf
if isinstance(typ, types.Integer):
return typ.minval
raise NotImplementedError("Unsupported type")
@lower_builtin(get_type_min_value, types.NumberClass)
@lower_builtin(get_type_min_value, types.DType)
def lower_get_type_min_value(context, builder, sig, args):
typ = sig.args[0].dtype
bw = typ.bitwidth
if isinstance(typ, types.Integer):
lty = ir.IntType(bw)
val = typ.minval
res = ir.Constant(lty, val)
elif isinstance(typ, types.Float):
if bw == 32:
lty = ir.FloatType()
elif bw == 64:
lty = ir.DoubleType()
else:
raise NotImplementedError("llvmlite only supports 32 and 64 bit floats")
npty = getattr(np, 'float{}'.format(bw))
res = ir.Constant(lty, -np.inf)
return impl_ret_untracked(context, builder, lty, res)
@lower_builtin(get_type_max_value, types.NumberClass)
@lower_builtin(get_type_max_value, types.DType)
def lower_get_type_max_value(context, builder, sig, args):
typ = sig.args[0].dtype
bw = typ.bitwidth
if isinstance(typ, types.Integer):
lty = ir.IntType(bw)
val = typ.maxval
res = ir.Constant(lty, val)
elif isinstance(typ, types.Float):
if bw == 32:
lty = ir.FloatType()
elif bw == 64:
lty = ir.DoubleType()
else:
raise NotImplementedError("llvmlite only supports 32 and 64 bit floats")
npty = getattr(np, 'float{}'.format(bw))
res = ir.Constant(lty, np.inf)
return impl_ret_untracked(context, builder, lty, res)
# -----------------------------------------------------------------------------
from numba.core.typing.builtins import IndexValue, IndexValueType
from numba.extending import overload, register_jitable
@lower_builtin(IndexValue, types.intp, types.Type)
@lower_builtin(IndexValue, types.uintp, types.Type)
def impl_index_value(context, builder, sig, args):
typ = sig.return_type
index, value = args
index_value = cgutils.create_struct_proxy(typ)(context, builder)
index_value.index = index
index_value.value = value
return index_value._getvalue()
@overload(min)
def indval_min(indval1, indval2):
if isinstance(indval1, IndexValueType) and \
isinstance(indval2, IndexValueType):
def min_impl(indval1, indval2):
if indval1.value > indval2.value:
return indval2
return indval1
return min_impl
@overload(max)
def indval_max(indval1, indval2):
if isinstance(indval1, IndexValueType) and \
isinstance(indval2, IndexValueType):
def max_impl(indval1, indval2):
if indval2.value > indval1.value:
return indval2
return indval1
return max_impl
greater_than = register_jitable(lambda a, b: a > b)
less_than = register_jitable(lambda a, b: a < b)
@register_jitable
def min_max_impl(iterable, op):
if isinstance(iterable, types.IterableType):
def impl(iterable):
it = iter(iterable)
return_val = next(it)
for val in it:
if op(val, return_val):
return_val = val
return return_val
return impl
@overload(min)
def iterable_min(iterable):
return min_max_impl(iterable, less_than)
@overload(max)
def iterable_max(iterable):
return min_max_impl(iterable, greater_than)
@lower_builtin(types.TypeRef, types.VarArg(types.Any))
def redirect_type_ctor(context, builder, sig, args):
"""Redirect constructor implementation to `numba_typeref_ctor(cls, *args)`,
which should be overloaded by type implementator.
For example:
d = Dict()
`d` will be typed as `TypeRef[DictType]()`. Thus, it will call into this
implementation. We need to redirect the lowering to a function
named ``numba_typeref_ctor``.
"""
cls = sig.return_type
def call_ctor(cls, *args):
return numba_typeref_ctor(cls, *args)
# Pack arguments into a tuple for `*args`
ctor_args = types.Tuple.from_types(sig.args)
# Make signature T(TypeRef[T], *args) where T is cls
sig = typing.signature(cls, types.TypeRef(cls), ctor_args)
if len(ctor_args) > 0:
args = (context.get_dummy_value(), # Type object has no runtime repr.
context.make_tuple(builder, ctor_args, args))
else:
args = (context.get_dummy_value(), # Type object has no runtime repr.
context.make_tuple(builder, ctor_args, ()))
return context.compile_internal(builder, call_ctor, sig, args)
# ------------------------------------------------------------------------------
# map, filter, reduce
@overload(map)
def ol_map(func, iterable, *args):
def impl(func, iterable, *args):
for x in zip(iterable, *args):
yield func(*x)
return impl
@overload(filter)
def ol_filter(func, iterable):
if (func is None) or isinstance(func, types.NoneType):
def impl(func, iterable):
for x in iterable:
if x:
yield x
else:
def impl(func, iterable):
for x in iterable:
if func(x):
yield x
return impl
|
|
"""
Performance test for asset metadata in the modulestore.
"""
from path import Path as path
import unittest
from tempfile import mkdtemp
import itertools
from shutil import rmtree
from bson.code import Code
import datetime
import ddt
#from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
from xmodule.assetstore import AssetMetadata
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.xml_importer import import_course_from_xml
from xmodule.modulestore.xml_exporter import export_course_to_xml
from xmodule.modulestore.tests.utils import (
MODULESTORE_SETUPS,
SHORT_NAME_MAP,
TEST_DATA_DIR,
)
from xmodule.modulestore.perf_tests.generate_asset_xml import make_asset_xml, validate_xml, ASSET_XSD_FILE
# The dependency below needs to be installed manually from the development.txt file, which doesn't
# get installed during unit tests!
try:
from code_block_timer import CodeBlockTimer
except ImportError:
CodeBlockTimer = None
# Number of assets saved in the modulestore per test run.
ASSET_AMOUNT_PER_TEST = (0, 1, 10, 100, 1000, 10000)
# Use only this course in asset metadata performance testing.
COURSE_NAME = 'manual-testing-complete'
# A list of courses to test - only one.
TEST_COURSE = (COURSE_NAME, )
ALL_SORTS = (
('displayname', ModuleStoreEnum.SortOrder.ascending),
('displayname', ModuleStoreEnum.SortOrder.descending),
('uploadDate', ModuleStoreEnum.SortOrder.ascending),
('uploadDate', ModuleStoreEnum.SortOrder.descending),
)
# pylint: disable=invalid-name
TEST_DIR = path(__file__).dirname()
PLATFORM_ROOT = TEST_DIR.parent.parent.parent.parent.parent.parent
TEST_DATA_ROOT = PLATFORM_ROOT / TEST_DATA_DIR
COURSE_DATA_DIR = TEST_DATA_ROOT / COURSE_NAME
# Path where generated asset file is saved.
ASSET_XML_PATH = COURSE_DATA_DIR / AssetMetadata.EXPORTED_ASSET_DIR / AssetMetadata.EXPORTED_ASSET_FILENAME
# Path where asset XML schema definition file is located.
ASSET_XSD_PATH = PLATFORM_ROOT / "common" / "lib" / "xmodule" / "xmodule" / "assetstore" / "tests" / ASSET_XSD_FILE
@ddt.ddt
# Eventually, exclude this attribute from regular unittests while running *only* tests
# with this attribute during regular performance tests.
# @attr("perf_test")
@unittest.skip
class CrossStoreXMLRoundtrip(unittest.TestCase):
"""
This class exists to time XML import and export between different modulestore
classes with different amount of asset metadata.
"""
# Use this attribute to skip this test on regular unittest CI runs.
perf_test = True
def setUp(self):
super(CrossStoreXMLRoundtrip, self).setUp()
self.export_dir = mkdtemp()
self.addCleanup(rmtree, self.export_dir, ignore_errors=True)
@ddt.data(*itertools.product(
MODULESTORE_SETUPS,
MODULESTORE_SETUPS,
ASSET_AMOUNT_PER_TEST
))
@ddt.unpack
def test_generate_import_export_timings(self, source_ms, dest_ms, num_assets):
"""
Generate timings for different amounts of asset metadata and different modulestores.
"""
if CodeBlockTimer is None:
raise SkipTest("CodeBlockTimer undefined.")
desc = "XMLRoundTrip:{}->{}:{}".format(
SHORT_NAME_MAP[source_ms],
SHORT_NAME_MAP[dest_ms],
num_assets
)
with CodeBlockTimer(desc):
with CodeBlockTimer("fake_assets"):
# First, make the fake asset metadata.
make_asset_xml(num_assets, ASSET_XML_PATH)
validate_xml(ASSET_XSD_PATH, ASSET_XML_PATH)
with source_ms.build() as (source_content, source_store):
with dest_ms.build() as (dest_content, dest_store):
source_course_key = source_store.make_course_key('a', 'course', 'course')
dest_course_key = dest_store.make_course_key('a', 'course', 'course')
with CodeBlockTimer("initial_import"):
import_course_from_xml(
source_store,
'test_user',
TEST_DATA_ROOT,
source_dirs=TEST_COURSE,
static_content_store=source_content,
target_id=source_course_key,
create_if_not_present=True,
raise_on_failure=True,
)
with CodeBlockTimer("export"):
export_course_to_xml(
source_store,
source_content,
source_course_key,
self.export_dir,
'exported_source_course',
)
with CodeBlockTimer("second_import"):
import_course_from_xml(
dest_store,
'test_user',
self.export_dir,
source_dirs=['exported_source_course'],
static_content_store=dest_content,
target_id=dest_course_key,
create_if_not_present=True,
raise_on_failure=True,
)
@ddt.ddt
# Eventually, exclude this attribute from regular unittests while running *only* tests
# with this attribute during regular performance tests.
# @attr("perf_test")
@unittest.skip
class FindAssetTest(unittest.TestCase):
"""
This class exists to time asset finding in different modulestore
classes with different amounts of asset metadata.
"""
# Use this attr to skip this test on regular unittest CI runs.
perf_test = True
def setUp(self):
super(FindAssetTest, self).setUp()
self.export_dir = mkdtemp()
self.addCleanup(rmtree, self.export_dir, ignore_errors=True)
@ddt.data(*itertools.product(
MODULESTORE_SETUPS,
ASSET_AMOUNT_PER_TEST,
))
@ddt.unpack
def test_generate_find_timings(self, source_ms, num_assets):
"""
Generate timings for different amounts of asset metadata and different modulestores.
"""
if CodeBlockTimer is None:
raise SkipTest("CodeBlockTimer undefined.")
desc = "FindAssetTest:{}:{}".format(
SHORT_NAME_MAP[source_ms],
num_assets,
)
with CodeBlockTimer(desc):
with CodeBlockTimer("fake_assets"):
# First, make the fake asset metadata.
make_asset_xml(num_assets, ASSET_XML_PATH)
validate_xml(ASSET_XSD_PATH, ASSET_XML_PATH)
with source_ms.build() as (source_content, source_store):
source_course_key = source_store.make_course_key('a', 'course', 'course')
asset_key = source_course_key.make_asset_key(
AssetMetadata.GENERAL_ASSET_TYPE, 'silly_cat_picture.gif'
)
with CodeBlockTimer("initial_import"):
import_course_from_xml(
source_store,
'test_user',
TEST_DATA_ROOT,
source_dirs=TEST_COURSE,
static_content_store=source_content,
target_id=source_course_key,
create_if_not_present=True,
raise_on_failure=True,
)
with CodeBlockTimer("find_nonexistent_asset"):
# More correct would be using the AssetManager.find() - but since the test
# has created its own test modulestore, the AssetManager can't be used.
__ = source_store.find_asset_metadata(asset_key)
# Perform get_all_asset_metadata for each sort.
for sort in ALL_SORTS:
with CodeBlockTimer("get_asset_list:{}-{}".format(
sort[0],
'asc' if sort[1] == ModuleStoreEnum.SortOrder.ascending else 'desc'
)):
# Grab two ranges of 50 assets using different sorts.
# Why 50? That's how many are displayed on the current Studio "Files & Uploads" page.
start_middle = num_assets / 2
__ = source_store.get_all_asset_metadata(
source_course_key, 'asset', start=0, sort=sort, maxresults=50
)
__ = source_store.get_all_asset_metadata(
source_course_key, 'asset', start=start_middle, sort=sort, maxresults=50
)
@ddt.ddt
# Eventually, exclude this attribute from regular unittests while running *only* tests
# with this attribute during regular performance tests.
# @attr("perf_test")
@unittest.skip
class TestModulestoreAssetSize(unittest.TestCase):
"""
This class exists to measure the size of asset metadata in ifferent modulestore
classes with different amount of asset metadata.
"""
# Use this attribute to skip this test on regular unittest CI runs.
perf_test = True
test_run_time = datetime.datetime.now()
@ddt.data(*itertools.product(
MODULESTORE_SETUPS,
ASSET_AMOUNT_PER_TEST
))
@ddt.unpack
def test_asset_sizes(self, source_ms, num_assets):
"""
Generate timings for different amounts of asset metadata and different modulestores.
"""
# First, make the fake asset metadata.
make_asset_xml(num_assets, ASSET_XML_PATH)
validate_xml(ASSET_XSD_PATH, ASSET_XML_PATH)
with source_ms.build() as (source_content, source_store):
source_course_key = source_store.make_course_key('a', 'course', 'course')
import_course_from_xml(
source_store,
'test_user',
TEST_DATA_ROOT,
source_dirs=TEST_COURSE,
static_content_store=source_content,
target_id=source_course_key,
create_if_not_present=True,
raise_on_failure=True,
)
asset_collection = source_ms.asset_collection()
# Ensure the asset collection exists.
if asset_collection.name in asset_collection.database.collection_names():
# Map gets the size of each structure.
mapper = Code("""
function() { emit("size", (this == null) ? 0 : Object.bsonsize(this)) }
""")
# Reduce finds the largest structure size and returns only it.
reducer = Code("""
function(key, values) {
var max_size = 0;
for (var i=0; i < values.length; i++) {
if (values[i] > max_size) {
max_size = values[i];
}
}
return max_size;
}
""")
results = asset_collection.map_reduce(mapper, reducer, "size_results")
result_str = "{} - Store: {:<15} - Num Assets: {:>6} - Result: {}\n".format(
self.test_run_time, SHORT_NAME_MAP[source_ms], num_assets, [r for r in results.find()]
)
with open("bson_sizes.txt", "a") as f:
f.write(result_str)
|
|
'''File adapted from Keras'''
'''This script demonstrates how to build a variational autoencoder with Keras.
Reference: "Auto-Encoding Variational Bayes" https://arxiv.org/abs/1312.6114
'''
import numpy as np
import os
import matplotlib.pyplot as plt
from keras.layers import Input, Dense, Lambda
from keras.models import Model
from keras import backend as K
from keras import objectives
from keras.datasets import mnist
from keras.models import load_model
from keras.models import model_from_json
import cv2
import random
import gzip
import cPickle
import time
import glob
from tqdm import tqdm
from prettytensor.layers import xavier_init
img_size = 64
batch_size = 100
original_dim = img_size*img_size
latent_dim = 2
intermediate_dim = 256
nb_epoch = 50
images_path="/home/guido/Desktop/extero_images/"
models_path="/home/guido/datasets/"
dataset_path = models_path + "_nao_images.pkl.gz"
encoder_path = models_path + "_encoder.h5"
encoder_json_path = models_path + "_encoder.json"
decoder_path = models_path + "_decoder.h5"
decoder_json_path = models_path + "_decoder.json"
x = Input(batch_shape=(batch_size, original_dim))
h = Dense(intermediate_dim, activation='relu')(x)
z_mean = Dense(latent_dim)(h)
z_log_var = Dense(latent_dim)(h)
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0.)
return z_mean + K.exp(z_log_var / 2) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
# we instantiate these layers separately so as to reuse them later
decoder_h = Dense(intermediate_dim, activation='relu')
decoder_mean = Dense(original_dim, activation='sigmoid')
h_decoded = decoder_h(z)
x_decoded_mean = decoder_mean(h_decoded)
def vae_loss(x, x_decoded_mean):
xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
vae = Model(x, x_decoded_mean)
vae.compile(optimizer='rmsprop', loss=vae_loss)
# train the VAE on MNIST digits
#(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Is dataset available? If not, load images and create it
x_train=[]
x_val=[]
x_test=[]
if not(os.path.exists(dataset_path)):
print("Reading images...")
for img_file in tqdm(glob.glob(images_path + "*.jpg")):
#print(img_file)
#img = Image.open(path + "/" + file)
img = cv2.imread(img_file, 0) # 0 for loading in gray scale
#img = cv2.imread(img_file) # 0 for loading in gray scale
img = cv2.resize(img, (img_size, img_size))
rnd = random.random()
if (rnd < 0.1):
x_test.append(img)
elif ((rnd > 0.1) and (rnd < 0.2)):
x_val.append(img)
else:
x_train.append(img)
print("Images read.")
dataset = [x_train, x_val, x_test]
f = gzip.open(dataset_path,'wb')
cPickle.dump(dataset, f, protocol=-1)
f.close()
print("Dataset saved into " + dataset_path)
else:
print("Loading dataset: "+ dataset_path)
with gzip.open(dataset_path, 'rb') as f:
x_train, x_val, x_test = cPickle.load(f)
print "Dataset loaded."
#x_train = datasets.fetch_mldata(path)
print len(x_train), len(x_train)%batch_size, len(x_train[1:-(len(x_train)%batch_size)+1])
#x_train = np.asarray(x_train).astype('float32') / 255.
#x_test = np.asarray(x_test).astype('float32') / 255.
# len of x_train and x_test has to be %batch_size
x_train = np.asarray(x_train[1:-(len(x_train)%batch_size)+1]).astype('float32') / 255.
x_test = np.asarray(x_test[1:-(len(x_test)%batch_size)+1]).astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
encoder=[]
decoder=[]
if not(os.path.exists(encoder_path)) and not(os.path.exists(decoder_path)):
print "Training model..."
vae.fit(x_train, x_train,
shuffle=True,
nb_epoch=nb_epoch,
batch_size=batch_size,
validation_data=(x_test, x_test))
# build a model to project inputs on the latent space
encoder = Model(x, z_mean)
# encoder.compile(optimizer='rmsprop', loss=vae_loss)
encoder_json = encoder.to_json()
with open(encoder_json_path, "w") as json_file:
json_file.write(encoder_json)
encoder.save_weights(encoder_path)
# build a digit generator that can sample from the learned distribution
decoder_input = Input(shape=(latent_dim,))
_h_decoded = decoder_h(decoder_input)
_x_decoded_mean = decoder_mean(_h_decoded)
decoder = Model(decoder_input, _x_decoded_mean)
decoder_json = decoder.to_json()
with open(decoder_json_path, "w") as json_file:
json_file.write(decoder_json)
decoder.save_weights(decoder_path)
# decoder.compile(optimizer='rmsprop', loss=vae_loss)
# decoder.save(decoder_path)
print "Model trained"
else:
print "Loading model..."
# encoder = load_model(encoder_path)
# decoder = load_model(decoder_path)
enc_json_file = open(encoder_json_path, 'r')
loaded_enc_json = enc_json_file.read()
enc_json_file.close()
encoder = model_from_json(loaded_enc_json)
# load weights into new model
encoder.load_weights(encoder_path)
dec_json_file = open(decoder_json_path, 'r')
loaded_dec_json = dec_json_file.read()
dec_json_file.close()
decoder = model_from_json(loaded_dec_json)
# load weights into new model
decoder.load_weights(decoder_path)
print "Model loaded"
# display a 2D plot of the digit classes in the latent space
x_test_encoded = encoder.predict(x_test, batch_size=batch_size)
#print x_test_encoded[:,0]
print "min ", np.min(x_test_encoded[:,0]), " ", np.min(x_test_encoded[:,1])
print "max ", np.max(x_test_encoded[:,0]), " ", np.max(x_test_encoded[:,1])
print "mean ", np.mean(x_test_encoded[:,0]), " ", np.mean(x_test_encoded[:,1])
print "stddev ", np.std(x_test_encoded[:,0]), " ", np.std(x_test_encoded[:,1])
#print "min ", np.min(x_test_encoded[:,0]), " ", np.min(x_test_encoded[:,1]), np.min(x_test_encoded[:,2]), np.min(x_test_encoded[:,3])
#print "max ", np.max(x_test_encoded[:,0]), " ", np.max(x_test_encoded[:,1]), np.max(x_test_encoded[:,2]), np.max(x_test_encoded[:,3])
#print "mean ", np.mean(x_test_encoded[:,0]), " ", np.mean(x_test_encoded[:,1]), np.mean(x_test_encoded[:,2]), np.mean(x_test_encoded[:,3])
#print "stddev ", np.std(x_test_encoded[:,0]), " ", np.std(x_test_encoded[:,1]), np.std(x_test_encoded[:,2]), np.std(x_test_encoded[:,3])
plt.figure(figsize=(6, 6))
#plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test)
plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1])
#plt.colorbar()
plt.show()
#plt.figure(figsize=(6, 6))
##plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test)
#plt.scatter(x_test_encoded[:, 2], x_test_encoded[:, 3])
##plt.colorbar()
#plt.show()
# display a 2D manifold of the digits
#n = 15 # figure with 15x15 digits
#digit_size = 28
#digit_size = img_size
#figure = np.zeros((digit_size * n, digit_size * n))
# we will sample n points within [-15, 15] standard deviations
#grid_x = np.linspace(-15, 15, n)
#grid_y = np.linspace(-15, 15, n)
#print np.linspace(0., 1.5, 15)
#for i, yi in enumerate(grid_x):
# for j, xi in enumerate(grid_y):
# z_sample = np.array([[xi, yi]])
# x_decoded = generator.predict(z_sample)
# digit = x_decoded[0].reshape(digit_size, digit_size)
# figure[i * digit_size: (i + 1) * digit_size,
# j * digit_size: (j + 1) * digit_size] = digit
#plt.figure(figsize=(10, 10))
#plt.imshow(figure, cmap='Greys_r')
#plt.show()
input = np.zeros(latent_dim)
plt.figure(figsize=(10,10))
plt.ion()
for i in np.arange(100):
for idx,s in enumerate(input):
input[idx] = input[idx] + np.random.normal(0.0, 0.1)
#z_sample = np.array([[xi, yi]])
z_sample = np.array([input])
x_decoded = decoder.predict(z_sample)
generated_img = x_decoded[0].reshape(img_size, img_size)
plt.title(input)
plt.imshow(generated_img, cmap='Greys_r')
time.sleep(0.01)
plt.draw()
plt.show()
plt.clf()
|
|
"""
SAX-based adapter to copy trees from/to the Python standard library.
Use the `ElementTreeContentHandler` class to build an ElementTree from
SAX events.
Use the `ElementTreeProducer` class or the `saxify()` function to fire
the SAX events of an ElementTree against a SAX ContentHandler.
See http://codespeak.net/lxml/sax.html
"""
from xml.sax.handler import ContentHandler
from lxml import etree
from lxml.etree import ElementTree, SubElement
from lxml.etree import Comment, ProcessingInstruction
class SaxError(etree.LxmlError):
"""General SAX error.
"""
def _getNsTag(tag):
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
class ElementTreeContentHandler(ContentHandler):
"""Build an lxml ElementTree from SAX events.
"""
def __init__(self, makeelement=None):
ContentHandler.__init__(self)
self._root = None
self._root_siblings = []
self._element_stack = []
self._default_ns = None
self._ns_mapping = { None : [None] }
self._new_mappings = {}
if makeelement is None:
makeelement = etree.Element
self._makeelement = makeelement
def _get_etree(self):
"Contains the generated ElementTree after parsing is finished."
return ElementTree(self._root)
etree = property(_get_etree, doc=_get_etree.__doc__)
def setDocumentLocator(self, locator):
pass
def startDocument(self):
pass
def endDocument(self):
pass
def startPrefixMapping(self, prefix, uri):
self._new_mappings[prefix] = uri
try:
self._ns_mapping[prefix].append(uri)
except KeyError:
self._ns_mapping[prefix] = [uri]
if prefix is None:
self._default_ns = uri
def endPrefixMapping(self, prefix):
ns_uri_list = self._ns_mapping[prefix]
ns_uri_list.pop()
if prefix is None:
self._default_ns = ns_uri_list[-1]
def _buildTag(self, ns_name_tuple):
ns_uri, local_name = ns_name_tuple
if ns_uri:
el_tag = "{%s}%s" % ns_name_tuple
elif self._default_ns:
el_tag = "{%s}%s" % (self._default_ns, local_name)
else:
el_tag = local_name
return el_tag
def startElementNS(self, ns_name, qname, attributes=None):
el_name = self._buildTag(ns_name)
if attributes:
attrs = {}
try:
iter_attributes = attributes.iteritems()
except AttributeError:
iter_attributes = attributes.items()
for name_tuple, value in iter_attributes:
if name_tuple[0]:
attr_name = "{%s}%s" % name_tuple
else:
attr_name = name_tuple[1]
attrs[attr_name] = value
else:
attrs = None
element_stack = self._element_stack
if self._root is None:
element = self._root = \
self._makeelement(el_name, attrs, self._new_mappings)
if self._root_siblings and hasattr(element, 'addprevious'):
for sibling in self._root_siblings:
element.addprevious(sibling)
del self._root_siblings[:]
else:
element = SubElement(element_stack[-1], el_name,
attrs, self._new_mappings)
element_stack.append(element)
self._new_mappings.clear()
def processingInstruction(self, target, data):
pi = ProcessingInstruction(target, data)
if self._root is None:
self._root_siblings.append(pi)
else:
self._element_stack[-1].append(pi)
def endElementNS(self, ns_name, qname):
element = self._element_stack.pop()
el_tag = self._buildTag(ns_name)
if el_tag != element.tag:
raise SaxError("Unexpected element closed: " + el_tag)
def startElement(self, name, attributes=None):
if attributes:
attributes = dict(
[((None, k), v) for k, v in attributes.items()]
)
self.startElementNS((None, name), name, attributes)
def endElement(self, name):
self.endElementNS((None, name), name)
def characters(self, data):
last_element = self._element_stack[-1]
try:
# if there already is a child element, we must append to its tail
last_element = last_element[-1]
last_element.tail = (last_element.tail or '') + data
except IndexError:
# otherwise: append to the text
last_element.text = (last_element.text or '') + data
ignorableWhitespace = characters
class ElementTreeProducer(object):
"""Produces SAX events for an element and children.
"""
def __init__(self, element_or_tree, content_handler):
try:
element = element_or_tree.getroot()
except AttributeError:
element = element_or_tree
self._element = element
self._content_handler = content_handler
from xml.sax.xmlreader import AttributesNSImpl as attr_class
self._attr_class = attr_class
self._empty_attributes = attr_class({}, {})
def saxify(self):
self._content_handler.startDocument()
element = self._element
if hasattr(element, 'getprevious'):
siblings = []
sibling = element.getprevious()
while getattr(sibling, 'tag', None) is ProcessingInstruction:
siblings.append(sibling)
sibling = sibling.getprevious()
for sibling in siblings[::-1]:
self._recursive_saxify(sibling, {})
self._recursive_saxify(element, {})
if hasattr(element, 'getnext'):
sibling = element.getnext()
while getattr(sibling, 'tag', None) is ProcessingInstruction:
self._recursive_saxify(sibling, {})
sibling = sibling.getnext()
self._content_handler.endDocument()
def _recursive_saxify(self, element, prefixes):
content_handler = self._content_handler
tag = element.tag
if tag is Comment or tag is ProcessingInstruction:
if tag is ProcessingInstruction:
content_handler.processingInstruction(
element.target, element.text)
if element.tail:
content_handler.characters(element.tail)
return
new_prefixes = []
build_qname = self._build_qname
attribs = element.items()
if attribs:
attr_values = {}
attr_qnames = {}
for attr_ns_name, value in attribs:
attr_ns_tuple = _getNsTag(attr_ns_name)
attr_values[attr_ns_tuple] = value
attr_qnames[attr_ns_tuple] = build_qname(
attr_ns_tuple[0], attr_ns_tuple[1], prefixes, new_prefixes)
sax_attributes = self._attr_class(attr_values, attr_qnames)
else:
sax_attributes = self._empty_attributes
ns_uri, local_name = _getNsTag(tag)
qname = build_qname(ns_uri, local_name, prefixes, new_prefixes)
for prefix, uri in new_prefixes:
content_handler.startPrefixMapping(prefix, uri)
content_handler.startElementNS((ns_uri, local_name),
qname, sax_attributes)
if element.text:
content_handler.characters(element.text)
for child in element:
self._recursive_saxify(child, prefixes)
content_handler.endElementNS((ns_uri, local_name), qname)
for prefix, uri in new_prefixes:
content_handler.endPrefixMapping(prefix)
if element.tail:
content_handler.characters(element.tail)
def _build_qname(self, ns_uri, local_name, prefixes, new_prefixes):
if ns_uri is None:
return local_name
try:
prefix = prefixes[ns_uri]
except KeyError:
prefix = prefixes[ns_uri] = 'ns%02d' % len(prefixes)
new_prefixes.append( (prefix, ns_uri) )
return prefix + ':' + local_name
def saxify(element_or_tree, content_handler):
"""One-shot helper to generate SAX events from an XML tree and fire
them against a SAX ContentHandler.
"""
return ElementTreeProducer(element_or_tree, content_handler).saxify()
|
|
# -*- coding: utf-8 -*-
# Copyright 2012-2017 Luc Saffre
# License: BSD (see file COPYING for details)
"""Some diagnostic utilities."""
from __future__ import unicode_literals
from builtins import str
from builtins import object
import six
# from textwrap import fill
from atelier import rstgen
from atelier.utils import unindent
from django.conf import settings
from django.utils.encoding import force_text
from lino.core.layouts import BaseLayout
from lino.core.elems import Container, Wrapper, FieldElement
from lino.modlib.users.choicelists import UserTypes
from lino.core import actors
from lino.core import actions
from lino.core.utils import get_models
from lino.core.utils import full_model_name as fmn
from lino.api import dd
class Analyzer(object):
"The class of the :data:`lino.utils.diag.analyzer` object."
def __init__(self):
self._initialized = False
def analyze(self):
if self._initialized:
return
self._initialized = True
window_actions = dict()
self.custom_actions = []
for a in actors.actors_list:
for ba in a.get_actions():
if ba.action.is_window_action():
wl = ba.get_window_layout() or ba.action.params_layout
if wl is not None:
if isinstance(wl, six.string_types):
raise Exception("20150323 : {0}".format(ba))
# Was used to find Exception: 20150323 :
# <BoundAction(checkdata.Checkers,
# <ShowDetail detail (u'Detail')>)>
if wl not in window_actions:
# lh = wl.get_layout_handle(ui)
# for e in lh.main.walk():
# e.loosen_requirements(a)
window_actions[wl] = ba
else: # if ba.action.custom_handler:
self.custom_actions.append(ba)
l = list(window_actions.values())
def f(a):
return str(a.full_name())
self.window_actions = list(sorted(l, key=f))
self.custom_actions = list(sorted(self.custom_actions, key=f))
def show_window_fields(self):
self.analyze()
items = []
for ba in analyzer.window_actions:
items.append(
"{0} : {1}".format(
ba.full_name(), layout_fields(ba)))
return rstgen.ul(items)
def show_window_permissions(self):
self.analyze()
items = []
for ba in analyzer.window_actions:
items.append(
"{0} : visible for {1}".format(
ba.full_name(), visible_for(ba)))
return rstgen.ul(items)
def show_memo_commands(self, doctestfmt=False):
rst = ""
mp = settings.SITE.kernel.memo_parser
items = []
for cmd, func in sorted(mp.commands.items()):
doc = unindent(func.__doc__ or '')
if doc:
# doc = doc.splitlines()[0]
items.append(
"[{0} ...] : {1}".format(cmd, doc))
# rst += "\n**Commands**"
# rst += rstgen.boldheader("Commands")
rst += rstgen.ul(items)
if False:
items = []
for model, func in sorted(mp.renderers.items()):
doc = unindent(func.__doc__ or '')
if doc:
items.append(
"[{0} ...] : {1}".format(model, doc))
if len(items):
rst += "\n**Renderers**"
rst += rstgen.ul(items)
return rst
def show_dialog_actions(self, doctestfmt=False):
self.analyze()
items = []
for ba in analyzer.custom_actions + analyzer.window_actions:
if ba.action.parameters:
items.append(
"{0} : {1}".format(
ba.full_name(),
py2rst(ba.action, doctestfmt)))
print(rstgen.ul(items))
def show_action_permissions(self, *classes):
self.analyze()
items = []
for ba in analyzer.custom_actions + analyzer.window_actions:
if isinstance(ba.action, classes):
items.append(
"{0} : visible for {1}".format(
ba.full_name(), visible_for(ba)))
return rstgen.ul(items)
def show_database_structure(self):
"""Show a bullet list of all models and their fields."""
self.analyze()
items = []
for model in get_models():
names = []
# for f, m in model._meta.get_fields_with_model():
for f in model._meta.concrete_fields:
names.append(f.name)
items.append(
"{0} : {1}".format(fmn(model), ', '.join(names)))
items = sorted(items)
return rstgen.ul(items)
def show_fields(self, model, field_names=None, languages=None):
model = dd.resolve_model(model)
if field_names is not None:
field_names = dd.fields_list(model, field_names)
items = []
for f in model._meta.fields:
if field_names is None or f.name in field_names:
name = f.name
ref = model.__module__ + '.' + model.__name__ + '.' + name
verbose_name = force_text(f.verbose_name).strip()
help_text = force_text(f.help_text).replace('\n', ' ')
txt = "**{verbose_name}** (:attr:`{name} <{ref}>`) : " \
"{help_text}".format(**locals())
items.append(txt)
return rstgen.ul(items)
def show_db_overview(self):
"""Return a reStructredText-formatted "database overview" report.
Used by test cases in tested documents.
"""
from lino.core.utils import (full_model_name, sorted_models_list)
models_list = sorted_models_list()
apps = [p.app_label for p in settings.SITE.installed_plugins]
s = "%d apps: %s." % (len(apps), ", ".join(apps))
s += "\n%d models:\n" % len(models_list)
i = 0
headers = [
#~ "No.",
"Name",
"Default table",
#~ "M",
"#fields",
"#rows",
#~ ,"first","last"
]
rows = []
for model in models_list:
if True: # model._meta.managed:
i += 1
cells = []
#~ cells.append(str(i))
cells.append(full_model_name(model))
cells.append(model.get_default_table())
#~ cells.append(str(model))
#~ if model._meta.managed:
#~ cells.append('X')
#~ else:
#~ cells.append('')
cells.append(str(len(model._meta.concrete_fields)))
qs = model.objects.all()
n = qs.count()
cells.append(str(n))
#~ if n:
#~ cells.append(obj2str(qs[0]))
#~ cells.append(obj2str(qs[n-1]))
#~ else:
#~ cells.append('')
#~ cells.append('')
rows.append(cells)
s += rstgen.table(headers, rows)
return s
def show_foreign_keys(self):
"""Return a list that shows how database objects are being referred to
by some other database object. This information is important
(1) before deleting objects and (2) when merging them.
For every model we see a list of "delete handlers" and a list
of fields from other models that point to this model using
that delete handler.
Delete handlers are:
- PROTECT : refuse to delete when other objects refer to this object
- CASCADE : delete objects refering to this object
- set_on_delete : make other objects point to something else (or set
their pointer to None)
"""
self.analyze()
tdp = dict() # target model -> delete handler -> pointer list
for target in get_models():
dp = tdp.setdefault(target, dict())
for m, fk in target._lino_ddh.fklist:
k = fk.remote_field.on_delete
p = dp.setdefault(k, [])
p.append((m, fk))
def fk2str(mfk):
return "{0}.{1}".format(fmn(mfk[0]), mfk[1].name)
items1 = []
for target, dp in list(tdp.items()):
items2 = []
for dh, pl in list(dp.items()):
items2.append(
"{0} : {1}".format(
dh.__name__, ', '.join([fk2str(mfk) for mfk in pl])))
if len(items2):
items2 = sorted(items2)
items1.append("{0} :\n{1}".format(
fmn(target), rstgen.ul(items2)))
items1 = sorted(items1)
return rstgen.ul(items1)
def show_complexity_factors(self):
self.analyze()
items = []
items.append("{0} plugins".format(len(dd.plugins)))
items.append("{0} models".format(len(get_models())))
items.append("{0} views".format(len(actors.actors_list)))
items.append("{0} user types".format(len(UserTypes.objects())))
dialog_actions = [ba for ba in analyzer.custom_actions +
analyzer.window_actions if
ba.action.parameters]
items.append("{0} dialog actions".format(len(dialog_actions)))
return rstgen.ul(items)
def visible_for(ba):
"""Shows a list of user profiles for which this action is visible."""
if ba is None:
return "N/A"
visible = []
hidden = []
for p in UserTypes.objects():
name = p.name or p.value
if ba.get_view_permission(p):
visible.append(name)
else:
hidden.append(name)
if len(hidden) == 0:
return "all"
if len(visible) == 0:
return "nobody"
# if len(hidden) < len(visible):
# if len(hidden) <= 3:
# return "all except %s" % ', '.join(hidden)
return ' '.join(visible)
def layout_fields(ba):
wl = ba.get_window_layout() or ba.action.params_layout
if wl is None:
return ''
lh = wl.get_layout_handle(settings.SITE.kernel.default_ui)
elems = [str(f.name) for f in lh._store_fields]
return ', '.join(elems)
# return fill(' '.join(elems), 50)
def py2rst(self, doctestfmt=False):
"""Render any Python object as reStructuredText.
Where "any" currently means a layout or a layout element.
:class:`lino.core.layouts.BaseLayout`
:mod:`lino.modlib.extjs.elems`
If the optional argument `doctestfmt` is specified as `True`, then
output contains less blank lines which might be invalid
reStructuredText but is more doctest-friendly.
"""
if isinstance(self, actions.Action):
s = str(self)
if self.params_layout:
lh = self.params_layout.get_layout_handle(
settings.SITE.kernel.default_ui)
s += '\n'
s += py2rst(lh.main, doctestfmt)
return s
if isinstance(self, BaseLayout):
lh = self.get_layout_handle(settings.SITE.kernel.default_ui)
return py2rst(lh.main, doctestfmt)
if isinstance(self, Wrapper):
self = self.wrapped
if isinstance(self, FieldElement):
s = "**%s** (%s)" % (str(self.field.verbose_name), self.field.name)
elif self.label is None:
s = "(%s)" % self.name
else:
s = "**%s** (%s)" % (str(self.label), self.name)
if visible_for(self) != visible_for(self.parent):
s += " [visible for %s]" % visible_for(self)
if isinstance(self, Container):
use_ul = False
for e in self.elements:
if isinstance(e, Container):
use_ul = True
children = [py2rst(e, doctestfmt) for e in self.elements]
if len(children):
if use_ul:
s += ':\n'
if not doctestfmt:
s += '\n'
s += rstgen.ul(children)
else:
s += ": " + ', '.join(children)
return s
analyzer = Analyzer()
"""This is a docstring
"""
|
|
import unittest
import tethys_apps.base.workspace as base_workspace
import os
import shutil
from unittest import mock
from ... import UserFactory
from django.http import HttpRequest
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
import tethys_apps.base.app_base as tethys_app_base
from tethys_apps.base.workspace import user_workspace, app_workspace, _get_app_workspace, _get_user_workspace
from tethys_quotas.models import ResourceQuota
@user_workspace
def user_dec_controller(request, user_workspace):
return user_workspace
@app_workspace
def app_dec_controller(request, app_workspace):
return app_workspace
class TestUrlMap(unittest.TestCase):
def setUp(self):
self.root = os.path.abspath(os.path.dirname(__file__))
self.test_root = os.path.join(self.root, 'test_workspace')
self.test_root_a = os.path.join(self.test_root, 'test_workspace_a')
self.test_root2 = os.path.join(self.root, 'test_workspace2')
self.app = tethys_app_base.TethysAppBase()
self.user = UserFactory()
def tearDown(self):
if os.path.isdir(self.test_root):
shutil.rmtree(self.test_root)
if os.path.isdir(self.test_root2):
shutil.rmtree(self.test_root2)
def test_TethysWorkspace(self):
# Test Create new workspace folder test_workspace
result = base_workspace.TethysWorkspace(path=self.test_root)
workspace = '<TethysWorkspace path="{0}">'.format(self.test_root)
# Create new folder inside test_workspace
base_workspace.TethysWorkspace(path=self.test_root_a)
# Create new folder test_workspace2
base_workspace.TethysWorkspace(path=self.test_root2)
self.assertEqual(result.__repr__(), workspace)
self.assertEqual(result.path, self.test_root)
# Create Files
file_list = ['test1.txt', 'test2.txt']
for file_name in file_list:
# Create file
open(os.path.join(self.test_root, file_name), 'a').close()
# Test files with full path
result = base_workspace.TethysWorkspace(path=self.test_root).files(full_path=True)
for file_name in file_list:
self.assertIn(os.path.join(self.test_root, file_name), result)
# Test files without full path
result = base_workspace.TethysWorkspace(path=self.test_root).files()
for file_name in file_list:
self.assertIn(file_name, result)
# Test Directories with full path
result = base_workspace.TethysWorkspace(path=self.root).directories(full_path=True)
self.assertIn(self.test_root, result)
self.assertIn(self.test_root2, result)
# Test Directories without full path
result = base_workspace.TethysWorkspace(path=self.root).directories()
self.assertIn('test_workspace', result)
self.assertIn('test_workspace2', result)
self.assertNotIn(self.test_root, result)
self.assertNotIn(self.test_root2, result)
# Write to file
f = open(os.path.join(self.test_root, 'test2.txt'), 'w')
f.write('Hello World')
f.close()
# Test size greater than zero
workspace_size = base_workspace.TethysWorkspace(path=self.test_root).get_size()
self.assertTrue(workspace_size > 0)
# Test get size unit conversion
workspace_size_kb = base_workspace.TethysWorkspace(path=self.test_root).get_size('kb')
self.assertEquals(workspace_size/1024, workspace_size_kb)
# Test Remove file
base_workspace.TethysWorkspace(path=self.test_root).remove('test2.txt')
# Verify that the file has been remove
self.assertFalse(os.path.isfile(os.path.join(self.test_root, 'test2.txt')))
# Test Remove Directory
base_workspace.TethysWorkspace(path=self.root).remove(self.test_root2)
# Verify that the Directory has been remove
self.assertFalse(os.path.isdir(self.test_root2))
# Test Clear
base_workspace.TethysWorkspace(path=self.test_root).clear()
# Test size equal to zero
workspace_size = base_workspace.TethysWorkspace(path=self.test_root).get_size()
self.assertTrue(workspace_size == 0)
# Verify that the Directory has been remove
self.assertFalse(os.path.isdir(self.test_root_a))
# Verify that the File has been remove
self.assertFalse(os.path.isfile(os.path.join(self.test_root, 'test1.txt')))
# Test don't allow overwriting the path property
workspace = base_workspace.TethysWorkspace(path=self.test_root)
workspace.path = 'foo'
self.assertEqual(self.test_root, workspace.path)
@mock.patch('tethys_apps.base.workspace.TethysWorkspace')
def test_get_user_workspace(self, mock_tws):
user = self.user
_get_user_workspace(self.app, user)
# Check result
rts_call_args = mock_tws.call_args_list
self.assertIn('workspaces', rts_call_args[0][0][0])
self.assertIn('user_workspaces', rts_call_args[0][0][0])
self.assertIn(user.username, rts_call_args[0][0][0])
@mock.patch('tethys_apps.base.workspace.TethysWorkspace')
def test_get_user_workspace_http(self, mock_tws):
from django.http import HttpRequest
request = HttpRequest()
request.user = self.user
_get_user_workspace(self.app, request)
# Check result
rts_call_args = mock_tws.call_args_list
self.assertIn('workspaces', rts_call_args[0][0][0])
self.assertIn('user_workspaces', rts_call_args[0][0][0])
self.assertIn(self.user.username, rts_call_args[0][0][0])
@mock.patch('tethys_apps.base.workspace.TethysWorkspace')
def test_get_user_workspace_none(self, mock_tws):
_get_user_workspace(self.app, None)
# Check result
rts_call_args = mock_tws.call_args_list
self.assertIn('workspaces', rts_call_args[0][0][0])
self.assertIn('user_workspaces', rts_call_args[0][0][0])
self.assertIn('anonymous_user', rts_call_args[0][0][0])
def test_get_user_workspace_error(self):
with self.assertRaises(ValueError) as context:
_get_user_workspace(self.app, 'test')
self.assertEquals(
"Invalid type for argument 'user': must be either an User or HttpRequest object.", str(context.exception))
@mock.patch('tethys_apps.base.workspace.TethysWorkspace')
def test_get_app_workspace(self, mock_tws):
_get_app_workspace(self.app)
# Check result
rts_call_args = mock_tws.call_args_list
self.assertIn('workspaces', rts_call_args[0][0][0])
self.assertIn('app_workspace', rts_call_args[0][0][0])
self.assertNotIn('user_workspaces', rts_call_args[0][0][0])
@mock.patch('tethys_apps.base.workspace.log')
@mock.patch('tethys_quotas.models.ResourceQuota')
@mock.patch('tethys_apps.utilities.get_active_app')
@mock.patch('tethys_apps.base.workspace._get_user_workspace')
def test_user_workspace_user(self, mock_guw, _, mock_rq, mock_log):
user_workspace = mock.MagicMock()
mock_guw.return_value = user_workspace
mock_rq.objects.get.return_value = mock.MagicMock(codename='user_workspace_quota')
mock_rq.DoesNotExist = ResourceQuota.DoesNotExist
mock_request = mock.MagicMock(spec=HttpRequest, user=mock.MagicMock(spec=User))
ret = user_dec_controller(mock_request)
self.assertEqual(user_workspace, ret)
self.assertEqual(0, len(mock_log.warning.call_args_list))
@mock.patch('tethys_apps.base.workspace.log')
@mock.patch('tethys_quotas.models.ResourceQuota')
@mock.patch('tethys_apps.utilities.get_active_app')
@mock.patch('tethys_apps.base.workspace._get_user_workspace')
def test_user_workspace_rq_does_not_exist(self, _, __, mock_rq, mock_log):
mock_rq.objects.get.side_effect = ResourceQuota.DoesNotExist
mock_rq.DoesNotExist = ResourceQuota.DoesNotExist
mock_request = mock.MagicMock(spec=HttpRequest, user=mock.MagicMock(spec=User))
user_dec_controller(mock_request)
mock_log.warning.assert_called_with('ResourceQuota with codename user_workspace_quota does not exist.')
def test_user_workspace_no_HttpRequest(self):
mock_request = mock.MagicMock()
ret = None
with self.assertRaises(ValueError) as context:
ret = user_dec_controller(mock_request)
self.assertTrue(
'No request given. The user_workspace decorator only works on controllers.' in str(context.exception))
self.assertEqual(None, ret)
@mock.patch('tethys_apps.base.workspace.passes_quota')
@mock.patch('tethys_quotas.models.ResourceQuota')
@mock.patch('tethys_apps.utilities.get_active_app')
@mock.patch('tethys_apps.base.workspace._get_user_workspace')
def test_user_workspace_passes_quota_false(self, _, mock_app, mock_rq, mock_pq):
mock_rq.DoesNotExist = ResourceQuota.DoesNotExist
mock_rq.objects.get.return_value = mock.MagicMock(help='helpful message')
mock_request = mock.MagicMock(spec=HttpRequest, user=mock.MagicMock(spec=User))
mock_pq.return_value = False
ret = None
with self.assertRaises(PermissionDenied) as context:
ret = user_dec_controller(mock_request)
self.assertTrue("helpful message" in str(context.exception))
self.assertEqual(None, ret)
@mock.patch('tethys_apps.base.workspace.log')
@mock.patch('tethys_quotas.models.ResourceQuota')
@mock.patch('tethys_apps.utilities.get_active_app')
@mock.patch('tethys_apps.base.workspace._get_app_workspace')
def test_app_workspace_app(self, mock_gaw, _, mock_rq, mock_log):
app_workspace = mock.MagicMock()
mock_gaw.return_value = app_workspace
mock_rq.objects.get.return_value = mock.MagicMock(codename='app_workspace_quota')
mock_rq.DoesNotExist = ResourceQuota.DoesNotExist
mock_request = mock.MagicMock(spec=HttpRequest, user=mock.MagicMock(spec=User))
ret = app_dec_controller(mock_request)
self.assertEqual(app_workspace, ret)
self.assertEqual(0, len(mock_log.warning.call_args_list))
@mock.patch('tethys_quotas.utilities.log')
@mock.patch('tethys_apps.base.workspace.log')
@mock.patch('tethys_quotas.models.ResourceQuota')
@mock.patch('tethys_apps.utilities.get_active_app')
@mock.patch('tethys_apps.base.workspace._get_app_workspace')
def test_app_workspace_rq_does_not_exist(self, _, __, mock_rq, mock_log, ___):
mock_rq.objects.get.side_effect = ResourceQuota.DoesNotExist
mock_rq.DoesNotExist = ResourceQuota.DoesNotExist
mock_request = mock.MagicMock(spec=HttpRequest, user=mock.MagicMock(spec=User))
app_dec_controller(mock_request)
mock_log.warning.assert_called_with('ResourceQuota with codename app_workspace_quota does not exist.')
def test_app_workspace_no_HttpRequest(self):
mock_request = mock.MagicMock()
ret = None
with self.assertRaises(ValueError) as context:
ret = app_dec_controller(mock_request)
self.assertTrue(
'No request given. The app_workspace decorator only works on controllers.' in str(context.exception))
self.assertEqual(None, ret)
@mock.patch('tethys_apps.base.workspace.passes_quota')
@mock.patch('tethys_quotas.models.ResourceQuota')
@mock.patch('tethys_apps.utilities.get_active_app')
@mock.patch('tethys_apps.base.workspace._get_app_workspace')
def test_app_workspace_passes_quota_false(self, _, mock_app, mock_rq, mock_pq):
mock_rq.DoesNotExist = ResourceQuota.DoesNotExist
mock_rq.objects.get.return_value = mock.MagicMock(help='helpful message')
mock_request = mock.MagicMock(spec=HttpRequest, user=mock.MagicMock(spec=User))
mock_pq.return_value = False
ret = None
with self.assertRaises(PermissionDenied) as context:
ret = app_dec_controller(mock_request)
self.assertTrue("helpful message" in str(context.exception))
self.assertEqual(None, ret)
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""Test desitarget.brightmask.
"""
import unittest
from pkg_resources import resource_filename
import os
import fitsio
import numpy as np
import numpy.lib.recfunctions as rfn
from glob import glob
import healpy as hp
import tempfile
import shutil
from desitarget import brightmask, io
from desitarget.targetmask import desi_mask, targetid_mask
from desiutil import brick
class TestBRIGHTMASK(unittest.TestCase):
@classmethod
def setUpClass(cls):
# ADM set up the necessary environment variables.
cls.gaiadir_orig = os.getenv("GAIA_DIR")
testdir = 'desitarget.test'
os.environ["GAIA_DIR"] = resource_filename(testdir, 't4')
cls.tychodir_orig = os.getenv("TYCHO_DIR")
os.environ["TYCHO_DIR"] = resource_filename(testdir, 't4/tycho')
cls.uratdir_orig = os.getenv("URAT_DIR")
os.environ["URAT_DIR"] = resource_filename(testdir, 't4/urat')
# ADM a temporary output directory to test writing masks.
cls.maskdir = tempfile.mkdtemp()
# ADM allowed HEALPixels in the Tycho directory.
pixnum = []
fns = sorted(glob(os.path.join(os.environ["TYCHO_DIR"], 'healpix', '*fits')))
for fn in fns:
data, hdr = fitsio.read(fn, "TYCHOHPX", header=True)
nside = hdr["HPXNSIDE"]
theta, phi = np.radians(90-data["DEC"]), np.radians(data["RA"])
pixnum.append(list(set(hp.ang2pix(nside, theta, phi, nest=True))))
cls.pixnum = [i for eachlist in pixnum for i in eachlist]
cls.nside = nside
# ADM pick a faint maglim (as unit tests deal with few objects).
cls.maglim = 20.
# ADM also pick a reasonable epoch at which to make the mask.
cls.maskepoch = 2025.5
# ADM an example mask, made from all of the test HEALPixels.
cls.allmx = brightmask.make_bright_star_mask(
numproc=1, nside=cls.nside, pixels=cls.pixnum,
maglim=cls.maglim, maskepoch=cls.maskepoch)
# ADM read in some targets.
targdir = resource_filename(testdir, 't')
fn = os.path.join(targdir, 'sweep-320m005-330p000.fits')
ts = fitsio.read(fn)
# ADM targets are really sweeps objects, so add target fields.
zs = np.zeros(len(ts))
targs = rfn.append_fields(ts, ["DESI_TARGET", "TARGETID"], [zs, zs],
usemask=False, dtypes='>i8')
cls.targs = rfn.append_fields(targs, "BRICK_OBJID", zs, usemask=False,
dtypes='>i4')
cls.targs["BRICK_OBJID"] = cls.targs["OBJID"]
# ADM mask_targets checks for unique TARGETIDs, so create some.
cls.targs["TARGETID"] = np.arange(len(cls.targs))
# ADM invent a mask with various testing properties.
cls.mask = np.zeros(3, dtype=brightmask.maskdatamodel.dtype)
cls.mask["DEC"] = [0, 70, 35]
cls.mask["IN_RADIUS"] = [1, 20, 10]
cls.mask["E1"] = [0., 0., -0.3]
cls.mask["E2"] = [0., 0., 0.5]
cls.mask["TYPE"] = ['PSF', b'PSF ', 'PSF ']
@classmethod
def tearDownClass(cls):
# ADM remove the temporary output directory.
if os.path.exists(cls.maskdir):
shutil.rmtree(cls.maskdir)
# ADM reset the environment variables.
if cls.gaiadir_orig is not None:
os.environ["GAIA_DIR"] = cls.gaiadir_orig
if cls.tychodir_orig is not None:
os.environ["TYCHO_DIR"] = cls.tychodir_orig
if cls.uratdir_orig is not None:
os.environ["URAT_DIR"] = cls.uratdir_orig
def test_make_bright_star_mask(self):
"""Test the construction of a bright star mask.
"""
# ADM test making the mask in an individual pixel.
mx = brightmask.make_bright_star_mask_in_hp(
self.nside, self.pixnum[0],
maglim=self.maglim, maskepoch=self.maskepoch)
# ADM check that running across all pixels contains the subset
# ADM of masks in the single pixel.
self.assertTrue(len(set(mx["REF_ID"]) - set(self.allmx["REF_ID"])) == 0)
self.assertTrue(len(set(self.allmx["REF_ID"]) - set(mx["REF_ID"])) > 0)
def test_make_bright_star_mask_parallel(self):
"""Check running the mask-making code in parallel.
"""
# ADM run on two processors.
two = brightmask.make_bright_star_mask(
numproc=2, nside=self.nside, pixels=self.pixnum,
maglim=self.maglim, maskepoch=self.maskepoch)
# ADM check that running in parallel recovers the same masks as
# ADM running on one processor.
one = self.allmx[np.argsort(self.allmx["REF_ID"])]
two = two[np.argsort(two["REF_ID"])]
self.assertTrue(np.all(one == two))
def test_mask_write(self):
"""Test that masks are written to file correctly.
"""
# ADM some meaningless magnitude limits and mask epochs.
ml, me = 62.3, 2062.3
# ADM a keyword dictionary to write to the output file header.
extra = {'BLAT': 'blat', 'FOO': 'foo'}
# ADM test writing without HEALPixel-split.
_, mxdir = io.write_masks(self.maskdir, self.allmx, maglim=ml,
maskepoch=me, extra=extra)
# ADM test writing with HEALPixel-split.
_, mxdir = io.write_masks(self.maskdir, self.allmx, maglim=ml,
maskepoch=me, extra=extra, nside=self.nside)
# ADM construct the output directory and file name.
mxd = io.find_target_files(self.maskdir, flavor="masks",
maglim=ml, epoch=me)
mxfn = io.find_target_files(self.maskdir, flavor="masks",
maglim=ml, epoch=me, hp=self.pixnum[0])
# ADM check the output directory is as expected.
self.assertEqual(mxdir, mxd)
# ADM check all of the files were made in the correct place.
fns = sorted(glob(os.path.join(mxdir, "masks-hp*fits")))
self.assertEqual(len(fns), len(self.pixnum)+1)
# ADM check the extra kwargs were written to the header.
for key in extra:
hdr = fitsio.read_header(mxfn, "MASKS")
self.assertEqual(hdr[key].rstrip(), extra[key])
def test_mask_targets(self):
"""Test that targets in masks are flagged accordingly.
"""
# ADM create the output mask directory.
_, mxdir = io.write_masks(self.maskdir, self.allmx, maglim=self.maglim,
maskepoch=self.maskepoch, nside=self.nside)
# ADM make targets with the same coordinates as the masks.
# ADM remembering to select masks that actually have a radius.
ii = self.allmx["IN_RADIUS"] > 0
targs = self.targs.copy()
targs["RA"] = self.allmx["RA"][ii][:len(targs)]
targs["DEC"] = self.allmx["DEC"][ii][:len(targs)]
# ADM add mask information to DESI_TARGET.
mxt = brightmask.mask_targets(targs, mxdir, nside=self.nside,
pixlist=self.pixnum)
# ADM all the targs should have been masked.
nmasked = np.sum(mxt["DESI_TARGET"] & desi_mask["IN_BRIGHT_OBJECT"] != 0)
self.assertEqual(nmasked, len(targs))
# ADM and we should have added some safe targets that will be
# ADM "near" bright objects.
is_nbo = mxt["DESI_TARGET"] & desi_mask["NEAR_BRIGHT_OBJECT"] != 0
self.assertTrue(np.all(is_nbo))
def test_non_mask_targets(self):
"""Test targets that are NOT in masks are flagged accordingly.
"""
# ADM create the output mask directory.
_, mxdir = io.write_masks(self.maskdir, self.allmx, maglim=self.maglim,
maskepoch=self.maskepoch, nside=self.nside)
# ADM update DESI_TARGET for any targets in masks.
mxtargs = brightmask.mask_targets(self.targs, mxdir, nside=self.nside,
pixlist=self.pixnum)
# ADM none of the targets should be in a mask.
self.assertTrue(np.all(mxtargs["DESI_TARGET"] == 0))
def test_safe_locations(self):
"""Test SAFE/BADSKY locations are equidistant from mask centers.
"""
# ADM append SAFE locations around the perimeter of the mask.
safes = brightmask.get_safe_targets(self.targs, self.mask)
targs = np.concatenate([self.targs, safes])
# ADM restrict to just SAFE locations.
skybitset = ((targs["TARGETID"] & targetid_mask.SKY) != 0)
safes = targs[np.where(skybitset)]
# ADM for each mask location check that every safe location is
# ADM equidistant from the mask center.
from astropy.coordinates import SkyCoord
from astropy import units as u
c = SkyCoord(safes["RA"]*u.deg, safes["DEC"]*u.deg)
for i in range(2):
cent = SkyCoord(self.mask[i]["RA"]*u.deg, self.mask[i]["DEC"]*u.deg)
sep = cent.separation(c)
# ADM only things close to mask i
w = np.where(sep < np.min(sep)*1.002)
# ADM are these all the same distance to high precision?
self.assertTrue(np.max(sep[w] - sep[w[0]]) < 1e-15*u.deg)
def test_targetid(self):
"""Test SKY/RELEASE/BRICKID/OBJID are set correctly in TARGETID
and DESI_TARGET for SAFE/BADSKY locations.
"""
# ADM append SAFE locations around the perimeter of the mask.
safes = brightmask.get_safe_targets(self.targs, self.mask)
targs = np.concatenate([self.targs, safes])
# ADM first check the SKY and BADSKY bits are appropriately set.
skybitset = ((targs["TARGETID"] & targetid_mask.SKY) != 0)
badskybitset = ((targs["DESI_TARGET"] & desi_mask.BAD_SKY) != 0)
self.assertTrue(np.all(skybitset == badskybitset))
# ADM now check that the other bits are in the correct locations
# ADM first restrict to the ~half-dozen targets in BRICK 521233.
bid = 521233
ii = targs["BRICKID"] == bid
targs = targs[ii]
# ADM check that the TARGETIDs are unique.
s = set(targs["TARGETID"])
self.assertEqual(len(s), len(targs["TARGETID"]))
# ADM the TARGETIDs as a binary string.
bintargids = [np.binary_repr(targid) for targid in targs["TARGETID"]]
# ADM check the DR is set (in a way unlike the normal bit-setting
# in brightmask.py). Release should be zero for SAFE locations.
rmostbit = targetid_mask.RELEASE.bitnum
lmostbit = targetid_mask.RELEASE.bitnum + targetid_mask.RELEASE.nbits
drbitset = int(bintargids[0][-lmostbit:-rmostbit], 2)
drbitshould = targs["RELEASE"][0]
self.assertEqual(drbitset, drbitshould)
self.assertEqual(drbitset, 0)
# ADM check that the BRICKIDs are as restricted/requested.
rmostbit = targetid_mask.BRICKID.bitnum
lmostbit = targetid_mask.BRICKID.bitnum + targetid_mask.BRICKID.nbits
brickidset = np.array(
[int(bintargid[-lmostbit:-rmostbit], 2) for bintargid in bintargids])
self.assertTrue(np.all(brickidset == bid))
if __name__ == '__main__':
unittest.main()
def test_suite():
"""Allows testing of only this module with the command:
python setup.py test -m desitarget.test.test_brightmask
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
|
import os
import logging
from django.conf import settings
from django.contrib.gis.db import models
from django.db.models.query_utils import Q
from django.utils.translation import gettext_lazy as _
from django.utils.formats import date_format
from colorfield.fields import ColorField
from easy_thumbnails.alias import aliases
from easy_thumbnails.exceptions import InvalidImageFormatError
from easy_thumbnails.files import get_thumbnailer
from mapentity.models import MapEntityMixin
from mapentity.serializers import plain_text
from geotrek.authent.models import StructureRelated
from geotrek.core.models import Topology
from geotrek.common.mixins import (NoDeleteMixin, TimeStampedModelMixin,
PictogramMixin, OptionalPictogramMixin,
PublishableMixin, PicturesMixin,
AddPropertyMixin)
from geotrek.common.models import Theme, ReservationSystem
from geotrek.common.utils import intersecting
from geotrek.zoning.mixins import ZoningPropertiesMixin
from extended_choices import Choices
if 'modeltranslation' in settings.INSTALLED_APPS:
from modeltranslation.manager import MultilingualManager
else:
from django.db.models import Manager as MultilingualManager
logger = logging.getLogger(__name__)
class InformationDeskType(PictogramMixin):
label = models.CharField(verbose_name=_("Label"), max_length=128)
class Meta:
verbose_name = _("Information desk type")
verbose_name_plural = _("Information desk types")
ordering = ['label']
def __str__(self):
return self.label
class InformationDesk(models.Model):
name = models.CharField(verbose_name=_("Title"), max_length=256)
type = models.ForeignKey(InformationDeskType, verbose_name=_("Type"), on_delete=models.CASCADE,
related_name='desks')
description = models.TextField(verbose_name=_("Description"), blank=True,
help_text=_("Brief description"))
phone = models.CharField(verbose_name=_("Phone"), max_length=32,
blank=True, null=True)
email = models.EmailField(verbose_name=_("Email"), max_length=256,
blank=True, null=True)
website = models.URLField(verbose_name=_("Website"), max_length=256,
blank=True, null=True)
photo = models.FileField(verbose_name=_("Photo"), upload_to=settings.UPLOAD_DIR,
max_length=512, blank=True, null=True)
street = models.CharField(verbose_name=_("Street"), max_length=256,
blank=True, null=True)
postal_code = models.CharField(verbose_name=_("Postal code"), max_length=8,
blank=True, null=True)
municipality = models.CharField(verbose_name=_("Municipality"),
blank=True, null=True,
max_length=256)
geom = models.PointField(verbose_name=_("Emplacement"),
blank=True, null=True,
srid=settings.SRID, spatial_index=False)
objects = models.Manager()
class Meta:
verbose_name = _("Information desk")
verbose_name_plural = _("Information desks")
ordering = ['name']
def __str__(self):
return self.name
@property
def latitude(self):
return self.geom.transform(settings.API_SRID, clone=True).y if self.geom else None
@property
def longitude(self):
return self.geom.transform(settings.API_SRID, clone=True).x if self.geom else None
@property
def thumbnail(self):
if not self.photo:
return None
thumbnailer = get_thumbnailer(self.photo)
try:
return thumbnailer.get_thumbnail(aliases.get('thumbnail'))
except (IOError, InvalidImageFormatError):
logger.warning(_("Image %s invalid or missing from disk.") % self.photo)
return None
@property
def resized_picture(self):
if not self.photo:
return None
thumbnailer = get_thumbnailer(self.photo)
try:
return thumbnailer.get_thumbnail(aliases.get('medium'))
except (IOError, InvalidImageFormatError):
logger.warning(_("Image %s invalid or missing from disk.") % self.photo)
return None
@property
def photo_url(self):
thumbnail = self.thumbnail
if not thumbnail:
return None
return os.path.join(settings.MEDIA_URL, thumbnail.name)
GEOMETRY_TYPES = Choices(
('POINT', 'point', _('Point')),
('LINE', 'line', _('Line')),
('POLYGON', 'polygon', _('Polygon')),
('ANY', 'any', _('Any')),
)
class TouristicContentCategory(PictogramMixin):
label = models.CharField(verbose_name=_("Label"), max_length=128)
geometry_type = models.CharField(max_length=16, choices=GEOMETRY_TYPES, default=GEOMETRY_TYPES.POINT)
type1_label = models.CharField(verbose_name=_("First list label"), max_length=128,
blank=True)
type2_label = models.CharField(verbose_name=_("Second list label"), max_length=128,
blank=True)
order = models.IntegerField(verbose_name=_("Order"), null=True, blank=True,
help_text=_("Alphabetical order if blank"))
color = ColorField(verbose_name=_("Color"), default='#444444',
help_text=_("Color of the category, only used in mobile.")) # To be implemented in Geotrek-rando
id_prefix = 'C'
class Meta:
verbose_name = _("Touristic content category")
verbose_name_plural = _("Touristic content categories")
ordering = ['order', 'label']
def __str__(self):
return self.label
@property
def prefixed_id(self):
return '{prefix}{id}'.format(prefix=self.id_prefix, id=self.id)
class TouristicContentTypeFilteringManager(MultilingualManager):
def has_content_published_not_deleted_in_list(self, list_index, category=None, portals=None, language=None):
""" Retrieves content types for which there exists an event that is published and not deleted in list (type1 or type2)
"""
i = list_index
q_total = Q()
qs = super().get_queryset().filter(in_list=i)
# Building following logic :
# return type1 if:
# (contents1__portal__in==portals)
# & (contents1__category==category)
# & (contents1_published_fr | contents1_published_en)
# & not(contents1_deleted)
#
# q_total = q_portal
# & q_category
# & q_lang
# & q_deleted
q_portal = Q()
if portals:
portal_field_name = f"contents{i}__portal__in"
q_portal = Q(**{portal_field_name: portals})
q_category = Q()
if category:
category_field_name = f"contents{i}__category"
q_category = Q(**{category_field_name: category})
if language:
published_field_name = f"contents{i}__published_{language}"
q_lang = Q(**{published_field_name: True})
else:
q_lang = Q()
for lang in settings.MODELTRANSLATION_LANGUAGES:
published_field_name = f"contents{i}__published_{lang}"
q_lang |= Q(**{published_field_name: True})
deleted_field_name = f"contents{i}__deleted"
q_deleted = Q(**{deleted_field_name: False})
q_total = q_portal & q_category & q_lang & q_deleted
return qs.filter(q_total).distinct()
class TouristicContentType(OptionalPictogramMixin):
objects = TouristicContentTypeFilteringManager()
label = models.CharField(verbose_name=_("Label"), max_length=128)
category = models.ForeignKey(TouristicContentCategory, related_name='types', on_delete=models.CASCADE,
verbose_name=_("Category"))
# Choose in which list of choices this type will appear
in_list = models.IntegerField(choices=((1, _("First")), (2, _("Second"))))
class Meta:
verbose_name = _("Touristic content type")
verbose_name_plural = _("Touristic content type")
ordering = ['label']
def __str__(self):
return self.label
class TouristicContentType1Manager(MultilingualManager):
def get_queryset(self):
return super().get_queryset().filter(in_list=1)
class TouristicContentType2Manager(MultilingualManager):
def get_queryset(self):
return super().get_queryset().filter(in_list=2)
class TouristicContentType1(TouristicContentType):
objects = TouristicContentType1Manager()
def __init__(self, *args, **kwargs):
self._meta.get_field('in_list').default = 1
super().__init__(*args, **kwargs)
class Meta:
proxy = True
verbose_name = _("Type1")
verbose_name_plural = _("First list types")
class TouristicContentType2(TouristicContentType):
objects = TouristicContentType2Manager()
def __init__(self, *args, **kwargs):
self._meta.get_field('in_list').default = 2
super().__init__(*args, **kwargs)
class Meta:
proxy = True
verbose_name = _("Type2")
verbose_name_plural = _("Second list types")
class TouristicContent(ZoningPropertiesMixin, AddPropertyMixin, PublishableMixin, MapEntityMixin, StructureRelated,
TimeStampedModelMixin, PicturesMixin, NoDeleteMixin):
""" A generic touristic content (accomodation, museum, etc.) in the park
"""
description_teaser = models.TextField(verbose_name=_("Description teaser"), blank=True,
help_text=_("A brief summary"))
description = models.TextField(verbose_name=_("Description"), blank=True,
help_text=_("Complete description"))
themes = models.ManyToManyField(Theme, related_name="touristiccontents",
blank=True, verbose_name=_("Themes"),
help_text=_("Main theme(s)"))
geom = models.GeometryField(verbose_name=_("Location"), srid=settings.SRID)
category = models.ForeignKey(TouristicContentCategory, related_name='contents', on_delete=models.CASCADE,
verbose_name=_("Category"))
contact = models.TextField(verbose_name=_("Contact"), blank=True,
help_text=_("Address, phone, etc."))
email = models.EmailField(verbose_name=_("Email"), max_length=256,
blank=True, null=True)
website = models.URLField(verbose_name=_("Website"), max_length=256,
blank=True, null=True)
practical_info = models.TextField(verbose_name=_("Practical info"), blank=True,
help_text=_("Anything worth to know"))
type1 = models.ManyToManyField(TouristicContentType1, related_name='contents1',
verbose_name=_("Type 1"),
blank=True)
type2 = models.ManyToManyField(TouristicContentType2, related_name='contents2',
verbose_name=_("Type 2"),
blank=True)
source = models.ManyToManyField('common.RecordSource',
blank=True, related_name='touristiccontents',
verbose_name=_("Source"))
portal = models.ManyToManyField('common.TargetPortal',
blank=True, related_name='touristiccontents',
verbose_name=_("Portal"))
eid = models.CharField(verbose_name=_("External id"), max_length=1024, blank=True, null=True)
reservation_system = models.ForeignKey(ReservationSystem, verbose_name=_("Reservation system"),
on_delete=models.CASCADE, blank=True, null=True)
reservation_id = models.CharField(verbose_name=_("Reservation ID"), max_length=1024,
blank=True)
approved = models.BooleanField(verbose_name=_("Approved"), default=False)
class Meta:
verbose_name = _("Touristic content")
verbose_name_plural = _("Touristic contents")
def __str__(self):
return self.name
@property
def districts_display(self):
return ', '.join([str(d) for d in self.districts])
@property
def type1_label(self):
return self.category.type1_label
@property
def type2_label(self):
return self.category.type2_label
@property
def prefixed_category_id(self):
return self.category.prefixed_id
def distance(self, to_cls):
return settings.TOURISM_INTERSECTION_MARGIN
@property
def type(self):
"""Fake type to simulate POI for mobile app v1"""
return self.category
@property
def extent(self):
return self.geom.buffer(10).transform(settings.API_SRID, clone=True).extent
@property
def rando_url(self):
category_slug = _('touristic-content')
return '{}/{}/'.format(category_slug, self.slug)
@property
def meta_description(self):
return plain_text(self.description_teaser or self.description)[:500]
Topology.add_property('touristic_contents', lambda self: intersecting(TouristicContent, self).order_by(*settings.TOURISTIC_CONTENTS_API_ORDER), _("Touristic contents"))
Topology.add_property('published_touristic_contents', lambda self: intersecting(TouristicContent, self).filter(published=True).order_by(*settings.TOURISTIC_CONTENTS_API_ORDER), _("Published touristic contents"))
TouristicContent.add_property('touristic_contents', lambda self: intersecting(TouristicContent, self).order_by(*settings.TOURISTIC_CONTENTS_API_ORDER), _("Touristic contents"))
TouristicContent.add_property('published_touristic_contents', lambda self: intersecting(TouristicContent, self).filter(published=True).order_by(*settings.TOURISTIC_CONTENTS_API_ORDER), _("Published touristic contents"))
class TouristicEventType(OptionalPictogramMixin):
type = models.CharField(verbose_name=_("Type"), max_length=128)
class Meta:
verbose_name = _("Touristic event type")
verbose_name_plural = _("Touristic event types")
ordering = ['type']
def __str__(self):
return self.type
class TouristicEvent(ZoningPropertiesMixin, AddPropertyMixin, PublishableMixin, MapEntityMixin, StructureRelated,
PicturesMixin, TimeStampedModelMixin, NoDeleteMixin):
""" A touristic event (conference, workshop, etc.) in the park
"""
description_teaser = models.TextField(verbose_name=_("Description teaser"), blank=True,
help_text=_("A brief summary"))
description = models.TextField(verbose_name=_("Description"), blank=True,
help_text=_("Complete description"))
themes = models.ManyToManyField(Theme, related_name="touristic_events",
blank=True, verbose_name=_("Themes"),
help_text=_("Main theme(s)"))
geom = models.PointField(verbose_name=_("Location"), srid=settings.SRID)
begin_date = models.DateField(blank=True, null=True, verbose_name=_("Begin date"))
end_date = models.DateField(blank=True, null=True, verbose_name=_("End date"))
duration = models.CharField(verbose_name=_("Duration"), max_length=64, blank=True,
help_text=_("3 days, season, ..."))
meeting_point = models.CharField(verbose_name=_("Meeting point"), max_length=256, blank=True,
help_text=_("Where exactly ?"))
meeting_time = models.TimeField(verbose_name=_("Meeting time"), blank=True, null=True,
help_text=_("11:00, 23:30"))
contact = models.TextField(verbose_name=_("Contact"), blank=True)
email = models.EmailField(verbose_name=_("Email"), max_length=256,
blank=True, null=True)
website = models.URLField(verbose_name=_("Website"), max_length=256,
blank=True, null=True)
organizer = models.CharField(verbose_name=_("Organizer"), max_length=256, blank=True)
speaker = models.CharField(verbose_name=_("Speaker"), max_length=256, blank=True)
type = models.ForeignKey(TouristicEventType, verbose_name=_("Type"), blank=True, null=True, on_delete=models.CASCADE)
accessibility = models.CharField(verbose_name=_("Accessibility"), max_length=256, blank=True)
participant_number = models.CharField(verbose_name=_("Number of participants"), max_length=256, blank=True)
booking = models.TextField(verbose_name=_("Booking"), blank=True)
target_audience = models.CharField(verbose_name=_("Target audience"), max_length=128, blank=True, null=True)
practical_info = models.TextField(verbose_name=_("Practical info"), blank=True,
help_text=_("Recommandations / To plan / Advices"))
source = models.ManyToManyField('common.RecordSource',
blank=True, related_name='touristicevents',
verbose_name=_("Source"))
portal = models.ManyToManyField('common.TargetPortal',
blank=True, related_name='touristicevents',
verbose_name=_("Portal"))
eid = models.CharField(verbose_name=_("External id"), max_length=1024, blank=True, null=True)
approved = models.BooleanField(verbose_name=_("Approved"), default=False)
id_prefix = 'E'
class Meta:
verbose_name = _("Touristic event")
verbose_name_plural = _("Touristic events")
ordering = ['-begin_date']
def __str__(self):
return self.name
@property
def type1(self):
return [self.type] if self.type else []
@property
def districts_display(self):
return ', '.join([str(d) for d in self.districts])
@property
def dates_display(self):
if not self.begin_date and not self.end_date:
return ""
elif not self.end_date:
return _("starting from {begin}").format(
begin=date_format(self.begin_date, 'SHORT_DATE_FORMAT'))
elif not self.begin_date:
return _("up to {end}").format(
end=date_format(self.end_date, 'SHORT_DATE_FORMAT'))
elif self.begin_date == self.end_date:
return date_format(self.begin_date, 'SHORT_DATE_FORMAT')
else:
return _("from {begin} to {end}").format(
begin=date_format(self.begin_date, 'SHORT_DATE_FORMAT'),
end=date_format(self.end_date, 'SHORT_DATE_FORMAT'))
@property
def prefixed_category_id(self):
return self.id_prefix
def distance(self, to_cls):
return settings.TOURISM_INTERSECTION_MARGIN
@property
def rando_url(self):
category_slug = _('touristic-event')
return '{}/{}/'.format(category_slug, self.slug)
@property
def meta_description(self):
return plain_text(self.description_teaser or self.description)[:500]
TouristicEvent.add_property('touristic_contents', lambda self: intersecting(TouristicContent, self), _("Touristic contents"))
TouristicEvent.add_property('published_touristic_contents', lambda self: intersecting(TouristicContent, self).filter(published=True), _("Published touristic contents"))
Topology.add_property('touristic_events', lambda self: intersecting(TouristicEvent, self), _("Touristic events"))
Topology.add_property('published_touristic_events', lambda self: intersecting(TouristicEvent, self).filter(published=True), _("Published touristic events"))
TouristicContent.add_property('touristic_events', lambda self: intersecting(TouristicEvent, self), _("Touristic events"))
TouristicContent.add_property('published_touristic_events', lambda self: intersecting(TouristicEvent, self).filter(published=True), _("Published touristic events"))
TouristicEvent.add_property('touristic_events', lambda self: intersecting(TouristicEvent, self), _("Touristic events"))
TouristicEvent.add_property('published_touristic_events', lambda self: intersecting(TouristicEvent, self).filter(published=True), _("Published touristic events"))
|
|
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import argparse
import ConfigParser
#from ginkgo import Service
#from fabric.api import env
#from fabric.api import run
#from fabric.context_managers import settings
import eventlet
import os
import sys
# eventlet.monkey_patch(thread=False)
import uuid
import time
import errno
import socket
import subprocess
from vnc_api.vnc_api import *
import json
sys.path.insert(2, '/opt/stack/python-quantumclient')
from pprint import pformat
from quantumclient.quantum import client
from quantumclient.client import HTTPClient
from quantumclient.common import exceptions
class DemoCfg(object):
def __init__(self, args_str=None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
httpclient = HTTPClient(
username='admin', tenant_name='demo', password='contrail123',
# region_name=self._region_name,
auth_url='http://%s:5000/v2.0' % (self._args.api_server_ip))
httpclient.authenticate()
#OS_URL = httpclient.endpoint_url
OS_URL = 'http://%s:9696/' % (self._args.api_server_ip)
OS_TOKEN = httpclient.auth_token
self._quantum = client.Client(
'2.0', endpoint_url=OS_URL, token=OS_TOKEN)
self._vnc_lib = VncApi(self._args.admin_user,
self._args.admin_password,
self._args.admin_tenant_name,
self._args.api_server_ip,
self._args.api_server_port, '/')
self._create_vn('public', self._args.public_subnet)
self._policy_link_vns()
# end __init__
def _create_vn(self, vn_name, vn_subnet):
print "Creating network %s, subnet %s" % (vn_name, vn_subnet)
net_req = {'name': '%s' % (vn_name)}
net_rsp = self._quantum.create_network({'network': net_req})
net1_id = net_rsp['network']['id']
net1_fq_name = net_rsp['network']['fq_name']
net1_fq_name_str = ':'.join(net1_fq_name)
self._create_subnet(unicode(vn_subnet), net1_id)
# end _create_vn
def _policy_link_vns(self):
net1_id, net2_id, net1_fq_name, net2_fq_name = \
self._create_two_vns(vn1_name='front-end', vn2_name='back-end')
net1_fq_name_str = ':'.join(net1_fq_name)
net2_fq_name_str = ':'.join(net2_fq_name)
print "Creating policy front-end-to-back-end"
np_rules = [PolicyRuleType(
direction='<>',
action_list=ActionListType(simple_action='pass'), protocol='any',
src_addresses=[AddressType(virtual_network=net1_fq_name_str)],
src_ports=[PortType(-1, -1)],
dst_addresses=[AddressType(virtual_network=net2_fq_name_str)],
dst_ports=[PortType(-1, -1)])]
pol_entries = PolicyEntriesType(np_rules)
pol_entries_dict = \
json.loads(
json.dumps(
pol_entries,
default=lambda o: dict((k, v) for k,
v in o.__dict__.iteritems())))
policy_req = {'name': 'front-end-to-back-end',
'entries': pol_entries_dict}
policy_rsp = self._quantum.create_policy({'policy': policy_req})
policy1_fq_name = policy_rsp['policy']['fq_name']
print "Setting front-end policy to [front-end-to-back-end]"
net_req = {'policys': [policy1_fq_name]}
net_rsp = self._quantum.update_network(net1_id, {'network': net_req})
print "Setting back-end policy to [front-end-to-back-end]"
net_req = {'policys': [policy1_fq_name]}
net_rsp = self._quantum.update_network(net2_id, {'network': net_req})
# end _policy_link_vns
def _create_two_vns(self, vn1_name=None, vn1_tenant=None,
vn2_name=None, vn2_tenant=None):
if not vn1_name:
vn1_name = 'vn1'
if not vn2_name:
vn2_name = 'vn2'
print "Creating network %s, subnet 192.168.1.0/24" % (vn1_name)
net_req = {'name': vn1_name}
net_rsp = self._quantum.create_network({'network': net_req})
net1_id = net_rsp['network']['id']
net1_fq_name = net_rsp['network']['fq_name']
net1_fq_name_str = ':'.join(net1_fq_name)
self._create_subnet(u'192.168.1.0/24', net1_id)
print "Creating network %s, subnet 192.168.2.0/24" % (vn2_name)
net_req = {'name': vn2_name}
net_rsp = self._quantum.create_network({'network': net_req})
net2_id = net_rsp['network']['id']
net2_fq_name = net_rsp['network']['fq_name']
net2_fq_name_str = ':'.join(net2_fq_name)
self._create_subnet(u'192.168.2.0/24', net2_id)
return net1_id, net2_id, net1_fq_name, net2_fq_name
# end _create_two_vns
def _create_subnet(self, cidr, net_id, ipam_fq_name=None):
if not ipam_fq_name:
ipam_fq_name = NetworkIpam().get_fq_name()
subnet_req = {'network_id': net_id,
'cidr': cidr,
'ip_version': 4,
'ipam_fq_name': ipam_fq_name}
subnet_rsp = self._quantum.create_subnet({'subnet': subnet_req})
subnet_cidr = subnet_rsp['subnet']['cidr']
return subnet_rsp['subnet']['id']
# end _create_subnet
def _parse_args(self, args_str):
'''
Eg. python demo_cfg.py --api_server_ip 127.0.0.1
--api_server_port 8082
--public_subnet 10.84.41.0/24
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
}
ksopts = {
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'default-domain'
}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("DEFAULTS")))
if 'KEYSTONE' in config.sections():
ksopts.update(dict(config.items("KEYSTONE")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(ksopts)
parser.set_defaults(**defaults)
parser.add_argument(
"--api_server_ip", help="IP address of api server")
parser.add_argument("--api_server_port", help="Port of api server")
parser.add_argument("--public_subnet", help="Subnet for public VN")
parser.add_argument(
"--admin_user", help="Name of keystone admin user")
parser.add_argument(
"--admin_password", help="Password of keystone admin user")
parser.add_argument(
"--admin_tenant_name", help="Tenamt name for keystone admin user")
self._args = parser.parse_args(remaining_argv)
# end _parse_args
# end class DemoCfg
def main(args_str=None):
DemoCfg(args_str)
# end main
if __name__ == "__main__":
main()
|
|
from django.test import TestCase
from django.template import Template, Context, TemplateSyntaxError
from django.contrib.auth.models import AnonymousUser
from mock import patch
from waliki.models import ACLRule
from waliki.acl import check_perms
from .factories import UserFactory, GroupFactory, ACLRuleFactory
class TestGetUsersRules(TestCase):
def test_simple_user(self):
user = UserFactory()
ACLRuleFactory(slug='page', permissions=['view_page'], users=[user])
users = ACLRule.get_users_for('view_page', 'page')
self.assertEqual(set(users), {user})
def test_simple_group(self):
group_users = [UserFactory(), UserFactory()]
group = GroupFactory(users=group_users)
ACLRuleFactory(slug='page', permissions=['view_page'], groups=[group])
users = ACLRule.get_users_for('view_page', 'page')
self.assertEqual(set(users), set(group_users))
def test_mixing_group_and_users(self):
user = UserFactory()
group1_users = [UserFactory(), UserFactory()]
group2_users = [UserFactory(), UserFactory()]
group1 = GroupFactory(users=group1_users)
group2 = GroupFactory(users=group2_users)
ACLRuleFactory(slug='page', permissions=['view_page'],
groups=[group1, group2], users=[user])
users = ACLRule.get_users_for('view_page', 'page')
self.assertEqual(set(users), set(group1_users + group2_users + [user]))
def test_is_distinct(self):
user = UserFactory()
group1_users = [user]
group1 = GroupFactory(users=group1_users)
ACLRuleFactory(slug='page', permissions=['view_page'],
groups=[group1], users=[user])
users = ACLRule.get_users_for('view_page', 'page')
self.assertEqual(users.count(), 1)
self.assertEqual(set(users), set(group1_users))
def test_simple_user_for_multiples_perms(self):
user1 = UserFactory()
user2 = UserFactory()
ACLRuleFactory(
slug='page', permissions=['view_page'], users=[user1, user2])
ACLRuleFactory(slug='page', permissions=['view_page', 'change_page'], users=[user1])
users = ACLRule.get_users_for(['view_page', 'change_page'], 'page')
self.assertEqual(set(users), {user1})
def test_any_user(self):
user1 = UserFactory()
ACLRuleFactory(slug='page', permissions=['view_page'], apply_to=ACLRule.TO_ANY)
users = ACLRule.get_users_for(['view_page'], 'page')
self.assertIn(AnonymousUser(), users)
self.assertIn(user1, users)
def test_any_logged_user(self):
user1 = UserFactory()
ACLRuleFactory(slug='page', permissions=['view_page'], apply_to=ACLRule.TO_LOGGED)
users = ACLRule.get_users_for(['view_page'], 'page')
self.assertNotIn(AnonymousUser(), users)
self.assertIn(user1, users)
def test_to_staff(self):
UserFactory()
user2 = UserFactory(is_staff=True)
ACLRuleFactory(slug='page', permissions=['view_page'], apply_to=ACLRule.TO_STAFF)
users = ACLRule.get_users_for(['view_page'], 'page')
self.assertEqual(set(users), {user2})
def test_to_super(self):
UserFactory()
UserFactory(is_staff=True)
user2 = UserFactory(is_superuser=True)
ACLRuleFactory(slug='page', permissions=['change_page'], apply_to=ACLRule.TO_SUPERUSERS)
users = ACLRule.get_users_for(['change_page'], 'page')
self.assertEqual(set(users), {user2})
class TestNamespaces(TestCase):
def test_simple_namespace(self):
user1 = UserFactory()
user2 = UserFactory()
ACLRuleFactory(slug='user1-section', permissions=['change_page'],
as_namespace=True, users=[user1])
self.assertTrue(check_perms('change_page', user1, 'user1-section'))
self.assertTrue(check_perms('change_page', user1, 'user1-section/nested/page'))
self.assertFalse(check_perms('change_page', user2, 'user1-section'))
self.assertFalse(check_perms('change_page', user2, 'user1-section/nested/page'))
def test_two_levels_namespace(self):
user1 = UserFactory()
user2 = UserFactory()
ACLRuleFactory(slug='section/special', permissions=['change_page'],
as_namespace=True, users=[user1])
self.assertTrue(check_perms('change_page', user1, 'section/special'))
self.assertTrue(check_perms('change_page', user1, 'section/special/page'))
self.assertTrue(check_perms('change_page', user2, 'section/no-special'))
self.assertFalse(check_perms('change_page', user2, 'section/special/page'))
def test_a_section_for_staff(self):
user = UserFactory()
staff_member = UserFactory(is_staff=True)
ACLRuleFactory(slug='staff-section', permissions=['view_page', 'add_page', 'change_page'],
as_namespace=True, apply_to=ACLRule.TO_STAFF)
for perm in ['view_page', 'add_page', 'change_page']:
self.assertFalse(check_perms(perm, user, 'staff-section'))
self.assertFalse(check_perms(perm, user, 'staff-section/a-page'))
self.assertTrue(check_perms(perm, staff_member, 'staff-section'))
self.assertTrue(check_perms(perm, staff_member, 'staff-section/a-page'))
class CheckPermTagTest(TestCase):
def render_template(self, template, context):
"""
Returns rendered ``template`` with ``context``, which are given as string
and dict respectively.
"""
t = Template(template)
return t.render(Context(context))
def test_wrong_formats(self):
wrong_formats = (
# no quotes
'{% check_perms "view_page" for user in slug as has_perm %}',
# wrong quotes
'{% check_perms "view_page" for user in slug as \'has_perms" %}',
# wrong quotes
'{% check_perms view_page for user in slug as "has_perms" %}',
# wrong quotes
'{% check_perms "view_page, change_page for user in slug as "has_perms" %}',
# wrong quotes
'{% check_perms "view_page" user in slug as "has_perms" %}',
# no context_var
'{% check_perms "view_page" for user in slug as %}',
# no slug
'{% check_perms "view_page" for user as "has_perms" %}',
# no user
'{% check_perms "view_page" in slug as "has_perms" %}',
# no "for" bit
'{% check_perms "view_page, change_page" user in slug as "has_perms" %}',
# no "as" bit
'{% check_perms "view_page" for user in slug "has_perms" %}',
)
context = {'user': UserFactory(), 'slug': "any/slug"}
for wrong in wrong_formats:
fullwrong = '{% load waliki_tags %}' + wrong
with self.assertRaises(TemplateSyntaxError):
self.render_template(fullwrong, context)
def test_check_users_is_called(self):
template = """
{% load waliki_tags %}
{% check_perms "view_page" for user in slug as "has_perms" %}
{{ has_perms }}
"""
user = UserFactory()
slug = 'any/slug'
context = {'user': user, 'slug': slug}
with patch('waliki.templatetags.waliki_tags.check_perms_helper') as check:
check.return_value = "return_value"
output = self.render_template(template, context)
check.assert_called_once_with(["view_page"], user, slug)
self.assertEqual(output.strip(), 'return_value')
def test_check_users_is_called_with_multiple(self):
template = """
{% load waliki_tags %}
{% check_perms "x, y,z" for user in slug as "has_perms" %}
{{ has_perms }}
"""
user = UserFactory()
slug = 'any/slug'
context = {'user': user, 'slug': slug}
with patch('waliki.templatetags.waliki_tags.check_perms_helper') as check:
check.return_value = "return_value"
output = self.render_template(template, context)
check.assert_called_once_with(["x", "y", "z"], user, slug)
self.assertEqual(output.strip(), 'return_value')
def test_check_users_is_called_slug_literal(self):
template = """
{% load waliki_tags %}
{% check_perms "x, y,z" for user in "literal_slug" as "has_perms" %}
{{ has_perms }}
"""
user = UserFactory()
context = {'user': user}
with patch('waliki.templatetags.waliki_tags.check_perms_helper') as check:
check.return_value = "return_value"
output = self.render_template(template, context)
check.assert_called_once_with(["x", "y", "z"], user, "literal_slug")
self.assertEqual(output.strip(), 'return_value')
|
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import re
import six
from six import reraise
from six.moves import range
from impala.util import _escape
from impala.error import (Error, Warning, InterfaceError, DatabaseError,
InternalError, OperationalError, ProgrammingError,
IntegrityError, DataError, NotSupportedError)
class Connection(object):
# PEP 249
# Connection objects are associated with a TCLIService.Client thrift
# service
# it's instantiated with an alive TCLIService.Client
def close(self):
# PEP 249
raise NotImplementedError
def commit(self):
# PEP 249
raise NotImplementedError
def rollback(self):
# PEP 249
raise NotImplementedError
def cursor(self, session_handle=None, user=None, configuration=None):
# PEP 249
raise NotImplementedError
def reconnect(self):
raise NotImplementedError
def kerberized(self):
# returns bool whether underlying service is kerberized or not
from thrift_sasl import TSaslClientTransport
if isinstance(self.service._iprot.trans, TSaslClientTransport):
if self.service._iprot.trans.mechanism == 'GSSAPI':
return True
return False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
if exc_type is not None:
reraise(exc_type, exc_val, exc_tb)
# optional DB API addition to make the errors attributes of Connection
Error = Error
Warning = Warning
InterfaceError = InterfaceError
DatabaseError = DatabaseError
InternalError = InternalError
OperationalError = OperationalError
ProgrammingError = ProgrammingError
IntegrityError = IntegrityError
DataError = DataError
NotSupportedError = NotSupportedError
class Cursor(object):
"""Abstract representation of Cursor"""
def description(self):
raise NotImplementedError
def rowcount(self):
raise NotImplementedError
def query_string(self):
raise NotImplementedError
def get_arraysize(self):
raise NotImplementedError
def set_arraysize(self, arraysize):
raise NotImplementedError
def buffersize(self):
raise NotImplementedError
def has_result_set(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
def close_operation(self):
raise NotImplementedError
def execute(self, operation, parameters=None, configuration=None):
raise NotImplementedError
def _execute_sync(self, operation_fn):
raise NotImplementedError
def _reset_state(self):
raise NotImplementedError
def _wait_to_finish(self):
raise NotImplementedError
def executemany(self, operation, seq_of_parameters):
raise NotImplementedError
def fetchone(self):
raise NotImplementedError
def fetchmany(self, size=None):
raise NotImplementedError
def fetchall(self):
raise NotImplementedError
def setinputsizes(self, sizes):
raise NotImplementedError
def setoutputsize(self, size, column=None):
raise NotImplementedError
def __iter__(self):
raise NotImplementedError
def __next__(self):
raise NotImplementedError
def next(self):
# for py2 compat
return self.__next__()
def ping(self):
raise NotImplementedError
def get_log(self):
raise NotImplementedError
def get_profile(self):
raise NotImplementedError
def get_summary(self):
raise NotImplementedError
def build_summary_table(self, summary, idx, is_fragment_root, indent_level,
output):
raise NotImplementedError
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
if exc_type is not None:
reraise(exc_type, exc_val, exc_tb)
def _replace_numeric_markers(operation, string_parameters):
"""
Replaces qname and numeric markers in the given operation, from
the string_parameters list.
Raises ProgrammingError on wrong number of parameters or bindings
when using qmark. There is no error checking on numeric parameters.
"""
# replace qmark parameters
param_count = len(string_parameters)
qmark_index = 0
while operation.find('?') > -1:
if qmark_index < param_count:
operation = operation.replace('?',
string_parameters[qmark_index],
1)
qmark_index += 1
else:
raise ProgrammingError("Incorrect number of bindings "+
"supplied. The current statement uses "
"%d or more, and there are %d supplied." %
(qmark_index+1, param_count))
if qmark_index != 0 and qmark_index != param_count:
raise ProgrammingError("Incorrect number of bindings "+
"supplied. The current statement uses "
"%d or more, and there are %d supplied." %
(qmark_index+1, param_count))
# replace numbered parameters
# Go through them backwards so smaller numbers don't replace
# parts of larger ones
for index in range(param_count, 0, -1):
operation = operation.replace(':' + str(index),
string_parameters[index-1])
return operation
def _bind_parameters_list(operation, parameters):
string_parameters = []
for value in parameters:
if value is None:
string_parameters.append('NULL')
elif isinstance(value, six.string_types):
string_parameters.append("'" + _escape(value) + "'")
else:
string_parameters.append(str(value))
# replace qmark and numeric parameters
return _replace_numeric_markers(operation, string_parameters)
def _bind_parameters_dict(operation, parameters):
string_parameters = {}
for (name, value) in six.iteritems(parameters):
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, six.string_types):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
# replace named parameters by their pyformat equivalents
operation = re.sub(":([^\d\W]\w*)", "%(\g<1>)s", operation)
# replace pyformat parameters
return operation % string_parameters
def _bind_parameters(operation, parameters):
# If parameters is a list, assume either qmark or numeric
# format. If not, assume either named or pyformat parameters
if isinstance(parameters, (list, tuple)):
return _bind_parameters_list(operation, parameters)
elif isinstance(parameters, dict):
return _bind_parameters_dict(operation, parameters)
else:
raise ProgrammingError("Query parameters argument should be a "+
"list, tuple, or dict object")
|
|
"""Built-in template filters used with the ``|`` operator."""
import math
import random
import re
from collections import abc
from collections import namedtuple
from itertools import chain
from itertools import groupby
from markupsafe import escape
from markupsafe import Markup
from markupsafe import soft_str
from .exceptions import FilterArgumentError
from .runtime import Undefined
from .utils import htmlsafe_json_dumps
from .utils import pformat
from .utils import url_quote
from .utils import urlize
_word_re = re.compile(r"\w+")
_word_beginning_split_re = re.compile(r"([-\s({\[<]+)")
def contextfilter(f):
"""Decorator for marking context dependent filters. The current
:class:`Context` will be passed as first argument.
"""
f.contextfilter = True
return f
def evalcontextfilter(f):
"""Decorator for marking eval-context dependent filters. An eval
context object is passed as first argument. For more information
about the eval context, see :ref:`eval-context`.
.. versionadded:: 2.4
"""
f.evalcontextfilter = True
return f
def environmentfilter(f):
"""Decorator for marking environment dependent filters. The current
:class:`Environment` is passed to the filter as first argument.
"""
f.environmentfilter = True
return f
def ignore_case(value):
"""For use as a postprocessor for :func:`make_attrgetter`. Converts strings
to lowercase and returns other types as-is."""
return value.lower() if isinstance(value, str) else value
def make_attrgetter(environment, attribute, postprocess=None, default=None):
"""Returns a callable that looks up the given attribute from a
passed object with the rules of the environment. Dots are allowed
to access attributes of attributes. Integer parts in paths are
looked up as integers.
"""
attribute = _prepare_attribute_parts(attribute)
def attrgetter(item):
for part in attribute:
item = environment.getitem(item, part)
if default and isinstance(item, Undefined):
item = default
if postprocess is not None:
item = postprocess(item)
return item
return attrgetter
def make_multi_attrgetter(environment, attribute, postprocess=None):
"""Returns a callable that looks up the given comma separated
attributes from a passed object with the rules of the environment.
Dots are allowed to access attributes of each attribute. Integer
parts in paths are looked up as integers.
The value returned by the returned callable is a list of extracted
attribute values.
Examples of attribute: "attr1,attr2", "attr1.inner1.0,attr2.inner2.0", etc.
"""
attribute_parts = (
attribute.split(",") if isinstance(attribute, str) else [attribute]
)
attribute = [
_prepare_attribute_parts(attribute_part) for attribute_part in attribute_parts
]
def attrgetter(item):
items = [None] * len(attribute)
for i, attribute_part in enumerate(attribute):
item_i = item
for part in attribute_part:
item_i = environment.getitem(item_i, part)
if postprocess is not None:
item_i = postprocess(item_i)
items[i] = item_i
return items
return attrgetter
def _prepare_attribute_parts(attr):
if attr is None:
return []
elif isinstance(attr, str):
return [int(x) if x.isdigit() else x for x in attr.split(".")]
else:
return [attr]
def do_forceescape(value):
"""Enforce HTML escaping. This will probably double escape variables."""
if hasattr(value, "__html__"):
value = value.__html__()
return escape(str(value))
def do_urlencode(value):
"""Quote data for use in a URL path or query using UTF-8.
Basic wrapper around :func:`urllib.parse.quote` when given a
string, or :func:`urllib.parse.urlencode` for a dict or iterable.
:param value: Data to quote. A string will be quoted directly. A
dict or iterable of ``(key, value)`` pairs will be joined as a
query string.
When given a string, "/" is not quoted. HTTP servers treat "/" and
"%2F" equivalently in paths. If you need quoted slashes, use the
``|replace("/", "%2F")`` filter.
.. versionadded:: 2.7
"""
if isinstance(value, str) or not isinstance(value, abc.Iterable):
return url_quote(value)
if isinstance(value, dict):
items = value.items()
else:
items = iter(value)
return "&".join(
f"{url_quote(k, for_qs=True)}={url_quote(v, for_qs=True)}" for k, v in items
)
@evalcontextfilter
def do_replace(eval_ctx, s, old, new, count=None):
"""Return a copy of the value with all occurrences of a substring
replaced with a new one. The first argument is the substring
that should be replaced, the second is the replacement string.
If the optional third argument ``count`` is given, only the first
``count`` occurrences are replaced:
.. sourcecode:: jinja
{{ "Hello World"|replace("Hello", "Goodbye") }}
-> Goodbye World
{{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
-> d'oh, d'oh, aaargh
"""
if count is None:
count = -1
if not eval_ctx.autoescape:
return str(s).replace(str(old), str(new), count)
if (
hasattr(old, "__html__")
or hasattr(new, "__html__")
and not hasattr(s, "__html__")
):
s = escape(s)
else:
s = soft_str(s)
return s.replace(soft_str(old), soft_str(new), count)
def do_upper(s):
"""Convert a value to uppercase."""
return soft_str(s).upper()
def do_lower(s):
"""Convert a value to lowercase."""
return soft_str(s).lower()
@evalcontextfilter
def do_xmlattr(_eval_ctx, d, autospace=True):
"""Create an SGML/XML attribute string based on the items in a dict.
All values that are neither `none` nor `undefined` are automatically
escaped:
.. sourcecode:: html+jinja
<ul{{ {'class': 'my_list', 'missing': none,
'id': 'list-%d'|format(variable)}|xmlattr }}>
...
</ul>
Results in something like this:
.. sourcecode:: html
<ul class="my_list" id="list-42">
...
</ul>
As you can see it automatically prepends a space in front of the item
if the filter returned something unless the second parameter is false.
"""
rv = " ".join(
f'{escape(key)}="{escape(value)}"'
for key, value in d.items()
if value is not None and not isinstance(value, Undefined)
)
if autospace and rv:
rv = " " + rv
if _eval_ctx.autoescape:
rv = Markup(rv)
return rv
def do_capitalize(s):
"""Capitalize a value. The first character will be uppercase, all others
lowercase.
"""
return soft_str(s).capitalize()
def do_title(s):
"""Return a titlecased version of the value. I.e. words will start with
uppercase letters, all remaining characters are lowercase.
"""
return "".join(
[
item[0].upper() + item[1:].lower()
for item in _word_beginning_split_re.split(soft_str(s))
if item
]
)
def do_dictsort(value, case_sensitive=False, by="key", reverse=False):
"""Sort a dict and yield (key, value) pairs. Because python dicts are
unsorted you may want to use this function to order them by either
key or value:
.. sourcecode:: jinja
{% for item in mydict|dictsort %}
sort the dict by key, case insensitive
{% for item in mydict|dictsort(reverse=true) %}
sort the dict by key, case insensitive, reverse order
{% for item in mydict|dictsort(true) %}
sort the dict by key, case sensitive
{% for item in mydict|dictsort(false, 'value') %}
sort the dict by value, case insensitive
"""
if by == "key":
pos = 0
elif by == "value":
pos = 1
else:
raise FilterArgumentError('You can only sort by either "key" or "value"')
def sort_func(item):
value = item[pos]
if not case_sensitive:
value = ignore_case(value)
return value
return sorted(value.items(), key=sort_func, reverse=reverse)
@environmentfilter
def do_sort(environment, value, reverse=False, case_sensitive=False, attribute=None):
"""Sort an iterable using Python's :func:`sorted`.
.. sourcecode:: jinja
{% for city in cities|sort %}
...
{% endfor %}
:param reverse: Sort descending instead of ascending.
:param case_sensitive: When sorting strings, sort upper and lower
case separately.
:param attribute: When sorting objects or dicts, an attribute or
key to sort by. Can use dot notation like ``"address.city"``.
Can be a list of attributes like ``"age,name"``.
The sort is stable, it does not change the relative order of
elements that compare equal. This makes it is possible to chain
sorts on different attributes and ordering.
.. sourcecode:: jinja
{% for user in users|sort(attribute="name")
|sort(reverse=true, attribute="age") %}
...
{% endfor %}
As a shortcut to chaining when the direction is the same for all
attributes, pass a comma separate list of attributes.
.. sourcecode:: jinja
{% for user users|sort(attribute="age,name") %}
...
{% endfor %}
.. versionchanged:: 2.11.0
The ``attribute`` parameter can be a comma separated list of
attributes, e.g. ``"age,name"``.
.. versionchanged:: 2.6
The ``attribute`` parameter was added.
"""
key_func = make_multi_attrgetter(
environment, attribute, postprocess=ignore_case if not case_sensitive else None
)
return sorted(value, key=key_func, reverse=reverse)
@environmentfilter
def do_unique(environment, value, case_sensitive=False, attribute=None):
"""Returns a list of unique items from the given iterable.
.. sourcecode:: jinja
{{ ['foo', 'bar', 'foobar', 'FooBar']|unique|list }}
-> ['foo', 'bar', 'foobar']
The unique items are yielded in the same order as their first occurrence in
the iterable passed to the filter.
:param case_sensitive: Treat upper and lower case strings as distinct.
:param attribute: Filter objects with unique values for this attribute.
"""
getter = make_attrgetter(
environment, attribute, postprocess=ignore_case if not case_sensitive else None
)
seen = set()
for item in value:
key = getter(item)
if key not in seen:
seen.add(key)
yield item
def _min_or_max(environment, value, func, case_sensitive, attribute):
it = iter(value)
try:
first = next(it)
except StopIteration:
return environment.undefined("No aggregated item, sequence was empty.")
key_func = make_attrgetter(
environment, attribute, postprocess=ignore_case if not case_sensitive else None
)
return func(chain([first], it), key=key_func)
@environmentfilter
def do_min(environment, value, case_sensitive=False, attribute=None):
"""Return the smallest item from the sequence.
.. sourcecode:: jinja
{{ [1, 2, 3]|min }}
-> 1
:param case_sensitive: Treat upper and lower case strings as distinct.
:param attribute: Get the object with the min value of this attribute.
"""
return _min_or_max(environment, value, min, case_sensitive, attribute)
@environmentfilter
def do_max(environment, value, case_sensitive=False, attribute=None):
"""Return the largest item from the sequence.
.. sourcecode:: jinja
{{ [1, 2, 3]|max }}
-> 3
:param case_sensitive: Treat upper and lower case strings as distinct.
:param attribute: Get the object with the max value of this attribute.
"""
return _min_or_max(environment, value, max, case_sensitive, attribute)
def do_default(value, default_value="", boolean=False):
"""If the value is undefined it will return the passed default value,
otherwise the value of the variable:
.. sourcecode:: jinja
{{ my_variable|default('my_variable is not defined') }}
This will output the value of ``my_variable`` if the variable was
defined, otherwise ``'my_variable is not defined'``. If you want
to use default with variables that evaluate to false you have to
set the second parameter to `true`:
.. sourcecode:: jinja
{{ ''|default('the string was empty', true) }}
.. versionchanged:: 2.11
It's now possible to configure the :class:`~jinja2.Environment` with
:class:`~jinja2.ChainableUndefined` to make the `default` filter work
on nested elements and attributes that may contain undefined values
in the chain without getting an :exc:`~jinja2.UndefinedError`.
"""
if isinstance(value, Undefined) or (boolean and not value):
return default_value
return value
@evalcontextfilter
def do_join(eval_ctx, value, d="", attribute=None):
"""Return a string which is the concatenation of the strings in the
sequence. The separator between elements is an empty string per
default, you can define it with the optional parameter:
.. sourcecode:: jinja
{{ [1, 2, 3]|join('|') }}
-> 1|2|3
{{ [1, 2, 3]|join }}
-> 123
It is also possible to join certain attributes of an object:
.. sourcecode:: jinja
{{ users|join(', ', attribute='username') }}
.. versionadded:: 2.6
The `attribute` parameter was added.
"""
if attribute is not None:
value = map(make_attrgetter(eval_ctx.environment, attribute), value)
# no automatic escaping? joining is a lot easier then
if not eval_ctx.autoescape:
return str(d).join(map(str, value))
# if the delimiter doesn't have an html representation we check
# if any of the items has. If yes we do a coercion to Markup
if not hasattr(d, "__html__"):
value = list(value)
do_escape = False
for idx, item in enumerate(value):
if hasattr(item, "__html__"):
do_escape = True
else:
value[idx] = str(item)
if do_escape:
d = escape(d)
else:
d = str(d)
return d.join(value)
# no html involved, to normal joining
return soft_str(d).join(map(soft_str, value))
def do_center(value, width=80):
"""Centers the value in a field of a given width."""
return str(value).center(width)
@environmentfilter
def do_first(environment, seq):
"""Return the first item of a sequence."""
try:
return next(iter(seq))
except StopIteration:
return environment.undefined("No first item, sequence was empty.")
@environmentfilter
def do_last(environment, seq):
"""
Return the last item of a sequence.
Note: Does not work with generators. You may want to explicitly
convert it to a list:
.. sourcecode:: jinja
{{ data | selectattr('name', '==', 'Jinja') | list | last }}
"""
try:
return next(iter(reversed(seq)))
except StopIteration:
return environment.undefined("No last item, sequence was empty.")
@contextfilter
def do_random(context, seq):
"""Return a random item from the sequence."""
try:
return random.choice(seq)
except IndexError:
return context.environment.undefined("No random item, sequence was empty.")
def do_filesizeformat(value, binary=False):
"""Format the value like a 'human-readable' file size (i.e. 13 kB,
4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega,
Giga, etc.), if the second parameter is set to `True` the binary
prefixes are used (Mebi, Gibi).
"""
bytes = float(value)
base = 1024 if binary else 1000
prefixes = [
("KiB" if binary else "kB"),
("MiB" if binary else "MB"),
("GiB" if binary else "GB"),
("TiB" if binary else "TB"),
("PiB" if binary else "PB"),
("EiB" if binary else "EB"),
("ZiB" if binary else "ZB"),
("YiB" if binary else "YB"),
]
if bytes == 1:
return "1 Byte"
elif bytes < base:
return f"{int(bytes)} Bytes"
else:
for i, prefix in enumerate(prefixes):
unit = base ** (i + 2)
if bytes < unit:
return f"{base * bytes / unit:.1f} {prefix}"
return f"{base * bytes / unit:.1f} {prefix}"
def do_pprint(value):
"""Pretty print a variable. Useful for debugging."""
return pformat(value)
@evalcontextfilter
def do_urlize(
eval_ctx, value, trim_url_limit=None, nofollow=False, target=None, rel=None
):
"""Converts URLs in plain text into clickable links.
If you pass the filter an additional integer it will shorten the urls
to that number. Also a third argument exists that makes the urls
"nofollow":
.. sourcecode:: jinja
{{ mytext|urlize(40, true) }}
links are shortened to 40 chars and defined with rel="nofollow"
If *target* is specified, the ``target`` attribute will be added to the
``<a>`` tag:
.. sourcecode:: jinja
{{ mytext|urlize(40, target='_blank') }}
.. versionchanged:: 2.8
The ``target`` parameter was added.
"""
policies = eval_ctx.environment.policies
rel = set((rel or "").split() or [])
if nofollow:
rel.add("nofollow")
rel.update((policies["urlize.rel"] or "").split())
if target is None:
target = policies["urlize.target"]
rel = " ".join(sorted(rel)) or None
rv = urlize(value, trim_url_limit, rel=rel, target=target)
if eval_ctx.autoescape:
rv = Markup(rv)
return rv
def do_indent(s, width=4, first=False, blank=False):
"""Return a copy of the string with each line indented by 4 spaces. The
first line and blank lines are not indented by default.
:param width: Number of spaces to indent by.
:param first: Don't skip indenting the first line.
:param blank: Don't skip indenting empty lines.
.. versionchanged:: 2.10
Blank lines are not indented by default.
Rename the ``indentfirst`` argument to ``first``.
"""
indention = " " * width
newline = "\n"
if isinstance(s, Markup):
indention = Markup(indention)
newline = Markup(newline)
s += newline # this quirk is necessary for splitlines method
if blank:
rv = (newline + indention).join(s.splitlines())
else:
lines = s.splitlines()
rv = lines.pop(0)
if lines:
rv += newline + newline.join(
indention + line if line else line for line in lines
)
if first:
rv = indention + rv
return rv
@environmentfilter
def do_truncate(env, s, length=255, killwords=False, end="...", leeway=None):
"""Return a truncated copy of the string. The length is specified
with the first parameter which defaults to ``255``. If the second
parameter is ``true`` the filter will cut the text at length. Otherwise
it will discard the last word. If the text was in fact
truncated it will append an ellipsis sign (``"..."``). If you want a
different ellipsis sign than ``"..."`` you can specify it using the
third parameter. Strings that only exceed the length by the tolerance
margin given in the fourth parameter will not be truncated.
.. sourcecode:: jinja
{{ "foo bar baz qux"|truncate(9) }}
-> "foo..."
{{ "foo bar baz qux"|truncate(9, True) }}
-> "foo ba..."
{{ "foo bar baz qux"|truncate(11) }}
-> "foo bar baz qux"
{{ "foo bar baz qux"|truncate(11, False, '...', 0) }}
-> "foo bar..."
The default leeway on newer Jinja versions is 5 and was 0 before but
can be reconfigured globally.
"""
if leeway is None:
leeway = env.policies["truncate.leeway"]
assert length >= len(end), f"expected length >= {len(end)}, got {length}"
assert leeway >= 0, f"expected leeway >= 0, got {leeway}"
if len(s) <= length + leeway:
return s
if killwords:
return s[: length - len(end)] + end
result = s[: length - len(end)].rsplit(" ", 1)[0]
return result + end
@environmentfilter
def do_wordwrap(
environment,
s,
width=79,
break_long_words=True,
wrapstring=None,
break_on_hyphens=True,
):
"""Wrap a string to the given width. Existing newlines are treated
as paragraphs to be wrapped separately.
:param s: Original text to wrap.
:param width: Maximum length of wrapped lines.
:param break_long_words: If a word is longer than ``width``, break
it across lines.
:param break_on_hyphens: If a word contains hyphens, it may be split
across lines.
:param wrapstring: String to join each wrapped line. Defaults to
:attr:`Environment.newline_sequence`.
.. versionchanged:: 2.11
Existing newlines are treated as paragraphs wrapped separately.
.. versionchanged:: 2.11
Added the ``break_on_hyphens`` parameter.
.. versionchanged:: 2.7
Added the ``wrapstring`` parameter.
"""
import textwrap
if not wrapstring:
wrapstring = environment.newline_sequence
# textwrap.wrap doesn't consider existing newlines when wrapping.
# If the string has a newline before width, wrap will still insert
# a newline at width, resulting in a short line. Instead, split and
# wrap each paragraph individually.
return wrapstring.join(
[
wrapstring.join(
textwrap.wrap(
line,
width=width,
expand_tabs=False,
replace_whitespace=False,
break_long_words=break_long_words,
break_on_hyphens=break_on_hyphens,
)
)
for line in s.splitlines()
]
)
def do_wordcount(s):
"""Count the words in that string."""
return len(_word_re.findall(soft_str(s)))
def do_int(value, default=0, base=10):
"""Convert the value into an integer. If the
conversion doesn't work it will return ``0``. You can
override this default using the first parameter. You
can also override the default base (10) in the second
parameter, which handles input with prefixes such as
0b, 0o and 0x for bases 2, 8 and 16 respectively.
The base is ignored for decimal numbers and non-string values.
"""
try:
if isinstance(value, str):
return int(value, base)
return int(value)
except (TypeError, ValueError):
# this quirk is necessary so that "42.23"|int gives 42.
try:
return int(float(value))
except (TypeError, ValueError):
return default
def do_float(value, default=0.0):
"""Convert the value into a floating point number. If the
conversion doesn't work it will return ``0.0``. You can
override this default using the first parameter.
"""
try:
return float(value)
except (TypeError, ValueError):
return default
def do_format(value, *args, **kwargs):
"""Apply the given values to a `printf-style`_ format string, like
``string % values``.
.. sourcecode:: jinja
{{ "%s, %s!"|format(greeting, name) }}
Hello, World!
In most cases it should be more convenient and efficient to use the
``%`` operator or :meth:`str.format`.
.. code-block:: text
{{ "%s, %s!" % (greeting, name) }}
{{ "{}, {}!".format(greeting, name) }}
.. _printf-style: https://docs.python.org/library/stdtypes.html
#printf-style-string-formatting
"""
if args and kwargs:
raise FilterArgumentError(
"can't handle positional and keyword arguments at the same time"
)
return soft_str(value) % (kwargs or args)
def do_trim(value, chars=None):
"""Strip leading and trailing characters, by default whitespace."""
return soft_str(value).strip(chars)
def do_striptags(value):
"""Strip SGML/XML tags and replace adjacent whitespace by one space."""
if hasattr(value, "__html__"):
value = value.__html__()
return Markup(str(value)).striptags()
def do_slice(value, slices, fill_with=None):
"""Slice an iterator and return a list of lists containing
those items. Useful if you want to create a div containing
three ul tags that represent columns:
.. sourcecode:: html+jinja
<div class="columnwrapper">
{%- for column in items|slice(3) %}
<ul class="column-{{ loop.index }}">
{%- for item in column %}
<li>{{ item }}</li>
{%- endfor %}
</ul>
{%- endfor %}
</div>
If you pass it a second argument it's used to fill missing
values on the last iteration.
"""
seq = list(value)
length = len(seq)
items_per_slice = length // slices
slices_with_extra = length % slices
offset = 0
for slice_number in range(slices):
start = offset + slice_number * items_per_slice
if slice_number < slices_with_extra:
offset += 1
end = offset + (slice_number + 1) * items_per_slice
tmp = seq[start:end]
if fill_with is not None and slice_number >= slices_with_extra:
tmp.append(fill_with)
yield tmp
def do_batch(value, linecount, fill_with=None):
"""
A filter that batches items. It works pretty much like `slice`
just the other way round. It returns a list of lists with the
given number of items. If you provide a second parameter this
is used to fill up missing items. See this example:
.. sourcecode:: html+jinja
<table>
{%- for row in items|batch(3, ' ') %}
<tr>
{%- for column in row %}
<td>{{ column }}</td>
{%- endfor %}
</tr>
{%- endfor %}
</table>
"""
tmp = []
for item in value:
if len(tmp) == linecount:
yield tmp
tmp = []
tmp.append(item)
if tmp:
if fill_with is not None and len(tmp) < linecount:
tmp += [fill_with] * (linecount - len(tmp))
yield tmp
def do_round(value, precision=0, method="common"):
"""Round the number to a given precision. The first
parameter specifies the precision (default is ``0``), the
second the rounding method:
- ``'common'`` rounds either up or down
- ``'ceil'`` always rounds up
- ``'floor'`` always rounds down
If you don't specify a method ``'common'`` is used.
.. sourcecode:: jinja
{{ 42.55|round }}
-> 43.0
{{ 42.55|round(1, 'floor') }}
-> 42.5
Note that even if rounded to 0 precision, a float is returned. If
you need a real integer, pipe it through `int`:
.. sourcecode:: jinja
{{ 42.55|round|int }}
-> 43
"""
if method not in {"common", "ceil", "floor"}:
raise FilterArgumentError("method must be common, ceil or floor")
if method == "common":
return round(value, precision)
func = getattr(math, method)
return func(value * (10 ** precision)) / (10 ** precision)
# Use a regular tuple repr here. This is what we did in the past and we
# really want to hide this custom type as much as possible. In particular
# we do not want to accidentally expose an auto generated repr in case
# people start to print this out in comments or something similar for
# debugging.
_GroupTuple = namedtuple("_GroupTuple", ["grouper", "list"])
_GroupTuple.__repr__ = tuple.__repr__ # type: ignore
_GroupTuple.__str__ = tuple.__str__ # type: ignore
@environmentfilter
def do_groupby(environment, value, attribute):
"""Group a sequence of objects by an attribute using Python's
:func:`itertools.groupby`. The attribute can use dot notation for
nested access, like ``"address.city"``. Unlike Python's ``groupby``,
the values are sorted first so only one group is returned for each
unique value.
For example, a list of ``User`` objects with a ``city`` attribute
can be rendered in groups. In this example, ``grouper`` refers to
the ``city`` value of the group.
.. sourcecode:: html+jinja
<ul>{% for city, items in users|groupby("city") %}
<li>{{ city }}
<ul>{% for user in items %}
<li>{{ user.name }}
{% endfor %}</ul>
</li>
{% endfor %}</ul>
``groupby`` yields namedtuples of ``(grouper, list)``, which
can be used instead of the tuple unpacking above. ``grouper`` is the
value of the attribute, and ``list`` is the items with that value.
.. sourcecode:: html+jinja
<ul>{% for group in users|groupby("city") %}
<li>{{ group.grouper }}: {{ group.list|join(", ") }}
{% endfor %}</ul>
.. versionchanged:: 2.6
The attribute supports dot notation for nested access.
"""
expr = make_attrgetter(environment, attribute)
return [
_GroupTuple(key, list(values))
for key, values in groupby(sorted(value, key=expr), expr)
]
@environmentfilter
def do_sum(environment, iterable, attribute=None, start=0):
"""Returns the sum of a sequence of numbers plus the value of parameter
'start' (which defaults to 0). When the sequence is empty it returns
start.
It is also possible to sum up only certain attributes:
.. sourcecode:: jinja
Total: {{ items|sum(attribute='price') }}
.. versionchanged:: 2.6
The `attribute` parameter was added to allow suming up over
attributes. Also the `start` parameter was moved on to the right.
"""
if attribute is not None:
iterable = map(make_attrgetter(environment, attribute), iterable)
return sum(iterable, start)
def do_list(value):
"""Convert the value into a list. If it was a string the returned list
will be a list of characters.
"""
return list(value)
def do_mark_safe(value):
"""Mark the value as safe which means that in an environment with automatic
escaping enabled this variable will not be escaped.
"""
return Markup(value)
def do_mark_unsafe(value):
"""Mark a value as unsafe. This is the reverse operation for :func:`safe`."""
return str(value)
def do_reverse(value):
"""Reverse the object or return an iterator that iterates over it the other
way round.
"""
if isinstance(value, str):
return value[::-1]
try:
return reversed(value)
except TypeError:
try:
rv = list(value)
rv.reverse()
return rv
except TypeError:
raise FilterArgumentError("argument must be iterable")
@environmentfilter
def do_attr(environment, obj, name):
"""Get an attribute of an object. ``foo|attr("bar")`` works like
``foo.bar`` just that always an attribute is returned and items are not
looked up.
See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
"""
try:
name = str(name)
except UnicodeError:
pass
else:
try:
value = getattr(obj, name)
except AttributeError:
pass
else:
if environment.sandboxed and not environment.is_safe_attribute(
obj, name, value
):
return environment.unsafe_undefined(obj, name)
return value
return environment.undefined(obj=obj, name=name)
@contextfilter
def do_map(*args, **kwargs):
"""Applies a filter on a sequence of objects or looks up an attribute.
This is useful when dealing with lists of objects but you are really
only interested in a certain value of it.
The basic usage is mapping on an attribute. Imagine you have a list
of users but you are only interested in a list of usernames:
.. sourcecode:: jinja
Users on this page: {{ users|map(attribute='username')|join(', ') }}
You can specify a ``default`` value to use if an object in the list
does not have the given attribute.
.. sourcecode:: jinja
{{ users|map(attribute="username", default="Anonymous")|join(", ") }}
Alternatively you can let it invoke a filter by passing the name of the
filter and the arguments afterwards. A good example would be applying a
text conversion filter on a sequence:
.. sourcecode:: jinja
Users on this page: {{ titles|map('lower')|join(', ') }}
Similar to a generator comprehension such as:
.. code-block:: python
(u.username for u in users)
(u.username or "Anonymous" for u in users)
(do_lower(x) for x in titles)
.. versionchanged:: 2.11.0
Added the ``default`` parameter.
.. versionadded:: 2.7
"""
seq, func = prepare_map(args, kwargs)
if seq:
for item in seq:
yield func(item)
@contextfilter
def do_select(*args, **kwargs):
"""Filters a sequence of objects by applying a test to each object,
and only selecting the objects with the test succeeding.
If no test is specified, each object will be evaluated as a boolean.
Example usage:
.. sourcecode:: jinja
{{ numbers|select("odd") }}
{{ numbers|select("odd") }}
{{ numbers|select("divisibleby", 3) }}
{{ numbers|select("lessthan", 42) }}
{{ strings|select("equalto", "mystring") }}
Similar to a generator comprehension such as:
.. code-block:: python
(n for n in numbers if test_odd(n))
(n for n in numbers if test_divisibleby(n, 3))
.. versionadded:: 2.7
"""
return select_or_reject(args, kwargs, lambda x: x, False)
@contextfilter
def do_reject(*args, **kwargs):
"""Filters a sequence of objects by applying a test to each object,
and rejecting the objects with the test succeeding.
If no test is specified, each object will be evaluated as a boolean.
Example usage:
.. sourcecode:: jinja
{{ numbers|reject("odd") }}
Similar to a generator comprehension such as:
.. code-block:: python
(n for n in numbers if not test_odd(n))
.. versionadded:: 2.7
"""
return select_or_reject(args, kwargs, lambda x: not x, False)
@contextfilter
def do_selectattr(*args, **kwargs):
"""Filters a sequence of objects by applying a test to the specified
attribute of each object, and only selecting the objects with the
test succeeding.
If no test is specified, the attribute's value will be evaluated as
a boolean.
Example usage:
.. sourcecode:: jinja
{{ users|selectattr("is_active") }}
{{ users|selectattr("email", "none") }}
Similar to a generator comprehension such as:
.. code-block:: python
(u for user in users if user.is_active)
(u for user in users if test_none(user.email))
.. versionadded:: 2.7
"""
return select_or_reject(args, kwargs, lambda x: x, True)
@contextfilter
def do_rejectattr(*args, **kwargs):
"""Filters a sequence of objects by applying a test to the specified
attribute of each object, and rejecting the objects with the test
succeeding.
If no test is specified, the attribute's value will be evaluated as
a boolean.
.. sourcecode:: jinja
{{ users|rejectattr("is_active") }}
{{ users|rejectattr("email", "none") }}
Similar to a generator comprehension such as:
.. code-block:: python
(u for user in users if not user.is_active)
(u for user in users if not test_none(user.email))
.. versionadded:: 2.7
"""
return select_or_reject(args, kwargs, lambda x: not x, True)
@evalcontextfilter
def do_tojson(eval_ctx, value, indent=None):
"""Dumps a structure to JSON so that it's safe to use in ``<script>``
tags. It accepts the same arguments and returns a JSON string. Note that
this is available in templates through the ``|tojson`` filter which will
also mark the result as safe. Due to how this function escapes certain
characters this is safe even if used outside of ``<script>`` tags.
The following characters are escaped in strings:
- ``<``
- ``>``
- ``&``
- ``'``
This makes it safe to embed such strings in any place in HTML with the
notable exception of double quoted attributes. In that case single
quote your attributes or HTML escape it in addition.
The indent parameter can be used to enable pretty printing. Set it to
the number of spaces that the structures should be indented with.
Note that this filter is for use in HTML contexts only.
.. versionadded:: 2.9
"""
policies = eval_ctx.environment.policies
dumper = policies["json.dumps_function"]
options = policies["json.dumps_kwargs"]
if indent is not None:
options = dict(options)
options["indent"] = indent
return htmlsafe_json_dumps(value, dumper=dumper, **options)
def prepare_map(args, kwargs):
context = args[0]
seq = args[1]
if len(args) == 2 and "attribute" in kwargs:
attribute = kwargs.pop("attribute")
default = kwargs.pop("default", None)
if kwargs:
raise FilterArgumentError(
f"Unexpected keyword argument {next(iter(kwargs))!r}"
)
func = make_attrgetter(context.environment, attribute, default=default)
else:
try:
name = args[2]
args = args[3:]
except LookupError:
raise FilterArgumentError("map requires a filter argument")
def func(item):
return context.environment.call_filter(
name, item, args, kwargs, context=context
)
return seq, func
def prepare_select_or_reject(args, kwargs, modfunc, lookup_attr):
context = args[0]
seq = args[1]
if lookup_attr:
try:
attr = args[2]
except LookupError:
raise FilterArgumentError("Missing parameter for attribute name")
transfunc = make_attrgetter(context.environment, attr)
off = 1
else:
off = 0
def transfunc(x):
return x
try:
name = args[2 + off]
args = args[3 + off :]
def func(item):
return context.environment.call_test(name, item, args, kwargs)
except LookupError:
func = bool
return seq, lambda item: modfunc(func(transfunc(item)))
def select_or_reject(args, kwargs, modfunc, lookup_attr):
seq, func = prepare_select_or_reject(args, kwargs, modfunc, lookup_attr)
if seq:
for item in seq:
if func(item):
yield item
FILTERS = {
"abs": abs,
"attr": do_attr,
"batch": do_batch,
"capitalize": do_capitalize,
"center": do_center,
"count": len,
"d": do_default,
"default": do_default,
"dictsort": do_dictsort,
"e": escape,
"escape": escape,
"filesizeformat": do_filesizeformat,
"first": do_first,
"float": do_float,
"forceescape": do_forceescape,
"format": do_format,
"groupby": do_groupby,
"indent": do_indent,
"int": do_int,
"join": do_join,
"last": do_last,
"length": len,
"list": do_list,
"lower": do_lower,
"map": do_map,
"min": do_min,
"max": do_max,
"pprint": do_pprint,
"random": do_random,
"reject": do_reject,
"rejectattr": do_rejectattr,
"replace": do_replace,
"reverse": do_reverse,
"round": do_round,
"safe": do_mark_safe,
"select": do_select,
"selectattr": do_selectattr,
"slice": do_slice,
"sort": do_sort,
"string": soft_str,
"striptags": do_striptags,
"sum": do_sum,
"title": do_title,
"trim": do_trim,
"truncate": do_truncate,
"unique": do_unique,
"upper": do_upper,
"urlencode": do_urlencode,
"urlize": do_urlize,
"wordcount": do_wordcount,
"wordwrap": do_wordwrap,
"xmlattr": do_xmlattr,
"tojson": do_tojson,
}
|
|
"""Tests for transfer.py."""
import string
import mock
import six
from six.moves import http_client
import unittest2
from apitools.base.py import base_api
from apitools.base.py import http_wrapper
from apitools.base.py import transfer
class TransferTest(unittest2.TestCase):
def assertRangeAndContentRangeCompatible(self, request, response):
request_prefix = 'bytes='
self.assertIn('range', request.headers)
self.assertTrue(request.headers['range'].startswith(request_prefix))
request_range = request.headers['range'][len(request_prefix):]
response_prefix = 'bytes '
self.assertIn('content-range', response.info)
response_header = response.info['content-range']
self.assertTrue(response_header.startswith(response_prefix))
response_range = (
response_header[len(response_prefix):].partition('/')[0])
msg = ('Request range ({0}) not a prefix of '
'response_range ({1})').format(
request_range, response_range)
self.assertTrue(response_range.startswith(request_range), msg=msg)
def testComputeEndByte(self):
total_size = 100
chunksize = 10
download = transfer.Download.FromStream(
six.StringIO(), chunksize=chunksize, total_size=total_size)
self.assertEqual(chunksize - 1,
download._Download__ComputeEndByte(0, end=50))
def testComputeEndByteReturnNone(self):
download = transfer.Download.FromStream(six.StringIO())
self.assertIsNone(
download._Download__ComputeEndByte(0, use_chunks=False))
def testComputeEndByteNoChunks(self):
total_size = 100
download = transfer.Download.FromStream(
six.StringIO(), chunksize=10, total_size=total_size)
for end in (None, 1000):
self.assertEqual(
total_size - 1,
download._Download__ComputeEndByte(0, end=end,
use_chunks=False),
msg='Failed on end={0}'.format(end))
def testComputeEndByteNoTotal(self):
download = transfer.Download.FromStream(six.StringIO())
default_chunksize = download.chunksize
for chunksize in (100, default_chunksize):
download.chunksize = chunksize
for start in (0, 10):
self.assertEqual(
download.chunksize + start - 1,
download._Download__ComputeEndByte(start),
msg='Failed on start={0}, chunksize={1}'.format(
start, chunksize))
def testComputeEndByteSmallTotal(self):
total_size = 100
download = transfer.Download.FromStream(six.StringIO(),
total_size=total_size)
for start in (0, 10):
self.assertEqual(total_size - 1,
download._Download__ComputeEndByte(start),
msg='Failed on start={0}'.format(start))
def testNonChunkedDownload(self):
bytes_http = object()
http = object()
download_stream = six.StringIO()
download = transfer.Download.FromStream(download_stream, total_size=52)
download.bytes_http = bytes_http
base_url = 'https://part.one/'
with mock.patch.object(http_wrapper, 'MakeRequest',
autospec=True) as make_request:
make_request.return_value = http_wrapper.Response(
info={
'content-range': 'bytes 0-51/52',
'status': http_client.OK,
},
content=string.ascii_lowercase * 2,
request_url=base_url,
)
request = http_wrapper.Request(url='https://part.one/')
download.InitializeDownload(request, http=http)
self.assertEqual(1, make_request.call_count)
received_request = make_request.call_args[0][1]
self.assertEqual(base_url, received_request.url)
self.assertRangeAndContentRangeCompatible(
received_request, make_request.return_value)
download_stream.seek(0)
self.assertEqual(string.ascii_lowercase * 2,
download_stream.getvalue())
def testChunkedDownload(self):
bytes_http = object()
http = object()
download_stream = six.StringIO()
download = transfer.Download.FromStream(
download_stream, chunksize=26, total_size=52)
download.bytes_http = bytes_http
# Setting autospec on a mock with an iterable side_effect is
# currently broken (http://bugs.python.org/issue17826), so
# instead we write a little function.
def _ReturnBytes(unused_http, http_request,
*unused_args, **unused_kwds):
url = http_request.url
if url == 'https://part.one/':
return http_wrapper.Response(
info={
'content-location': 'https://part.two/',
'content-range': 'bytes 0-25/52',
'status': http_client.PARTIAL_CONTENT,
},
content=string.ascii_lowercase,
request_url='https://part.one/',
)
elif url == 'https://part.two/':
return http_wrapper.Response(
info={
'content-range': 'bytes 26-51/52',
'status': http_client.OK,
},
content=string.ascii_uppercase,
request_url='https://part.two/',
)
else:
self.fail('Unknown URL requested: %s' % url)
with mock.patch.object(http_wrapper, 'MakeRequest',
autospec=True) as make_request:
make_request.side_effect = _ReturnBytes
request = http_wrapper.Request(url='https://part.one/')
download.InitializeDownload(request, http=http)
self.assertEqual(2, make_request.call_count)
for call in make_request.call_args_list:
self.assertRangeAndContentRangeCompatible(
call[0][1], _ReturnBytes(*call[0]))
download_stream.seek(0)
self.assertEqual(string.ascii_lowercase + string.ascii_uppercase,
download_stream.getvalue())
def testFromEncoding(self):
# Test a specific corner case in multipart encoding.
# Python's mime module by default encodes lines that start with
# "From " as ">From ", which we need to make sure we don't run afoul
# of when sending content that isn't intended to be so encoded. This
# test calls out that we get this right. We test for both the
# multipart and non-multipart case.
multipart_body = '{"body_field_one": 7}'
upload_contents = 'line one\nFrom \nline two'
upload_config = base_api.ApiUploadInfo(
accept=['*/*'],
max_size=None,
resumable_multipart=True,
resumable_path=u'/resumable/upload',
simple_multipart=True,
simple_path=u'/upload',
)
url_builder = base_api._UrlBuilder('http://www.uploads.com')
# Test multipart: having a body argument in http_request forces
# multipart here.
upload = transfer.Upload.FromStream(
six.StringIO(upload_contents),
'text/plain',
total_size=len(upload_contents))
http_request = http_wrapper.Request(
'http://www.uploads.com',
headers={'content-type': 'text/plain'},
body=multipart_body)
upload.ConfigureRequest(upload_config, http_request, url_builder)
self.assertEqual(url_builder.query_params['uploadType'], 'multipart')
rewritten_upload_contents = '\n'.join(
http_request.body.split('--')[2].splitlines()[1:])
self.assertTrue(rewritten_upload_contents.endswith(upload_contents))
# Test non-multipart (aka media): no body argument means this is
# sent as media.
upload = transfer.Upload.FromStream(
six.StringIO(upload_contents),
'text/plain',
total_size=len(upload_contents))
http_request = http_wrapper.Request(
'http://www.uploads.com',
headers={'content-type': 'text/plain'})
upload.ConfigureRequest(upload_config, http_request, url_builder)
self.assertEqual(url_builder.query_params['uploadType'], 'media')
rewritten_upload_contents = http_request.body
self.assertTrue(rewritten_upload_contents.endswith(upload_contents))
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class SharedCostList(ListResource):
def __init__(self, version, account_sid, country_code):
"""
Initialize the SharedCostList
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param country_code: The ISO-3166-1 country code of the country.
:returns: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostList
:rtype: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostList
"""
super(SharedCostList, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, 'country_code': country_code, }
self._uri = '/Accounts/{account_sid}/AvailablePhoneNumbers/{country_code}/SharedCost.json'.format(**self._solution)
def stream(self, area_code=values.unset, contains=values.unset,
sms_enabled=values.unset, mms_enabled=values.unset,
voice_enabled=values.unset,
exclude_all_address_required=values.unset,
exclude_local_address_required=values.unset,
exclude_foreign_address_required=values.unset, beta=values.unset,
near_number=values.unset, near_lat_long=values.unset,
distance=values.unset, in_postal_code=values.unset,
in_region=values.unset, in_rate_center=values.unset,
in_lata=values.unset, in_locality=values.unset,
fax_enabled=values.unset, limit=None, page_size=None):
"""
Streams SharedCostInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode area_code: The area code of the phone numbers to read
:param unicode contains: The pattern on which to match phone numbers
:param bool sms_enabled: Whether the phone numbers can receive text messages
:param bool mms_enabled: Whether the phone numbers can receive MMS messages
:param bool voice_enabled: Whether the phone numbers can receive calls.
:param bool exclude_all_address_required: Whether to exclude phone numbers that require an Address
:param bool exclude_local_address_required: Whether to exclude phone numbers that require a local address
:param bool exclude_foreign_address_required: Whether to exclude phone numbers that require a foreign address
:param bool beta: Whether to read phone numbers new to the Twilio platform
:param unicode near_number: Given a phone number, find a geographically close number within distance miles. (US/Canada only)
:param unicode near_lat_long: Given a latitude/longitude pair lat,long find geographically close numbers within distance miles. (US/Canada only)
:param unicode distance: The search radius, in miles, for a near_ query. (US/Canada only)
:param unicode in_postal_code: Limit results to a particular postal code. (US/Canada only)
:param unicode in_region: Limit results to a particular region. (US/Canada only)
:param unicode in_rate_center: Limit results to a specific rate center, or given a phone number search within the same rate center as that number. (US/Canada only)
:param unicode in_lata: Limit results to a specific local access and transport area. (US/Canada only)
:param unicode in_locality: Limit results to a particular locality
:param bool fax_enabled: Whether the phone numbers can receive faxes
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
area_code=area_code,
contains=contains,
sms_enabled=sms_enabled,
mms_enabled=mms_enabled,
voice_enabled=voice_enabled,
exclude_all_address_required=exclude_all_address_required,
exclude_local_address_required=exclude_local_address_required,
exclude_foreign_address_required=exclude_foreign_address_required,
beta=beta,
near_number=near_number,
near_lat_long=near_lat_long,
distance=distance,
in_postal_code=in_postal_code,
in_region=in_region,
in_rate_center=in_rate_center,
in_lata=in_lata,
in_locality=in_locality,
fax_enabled=fax_enabled,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'])
def list(self, area_code=values.unset, contains=values.unset,
sms_enabled=values.unset, mms_enabled=values.unset,
voice_enabled=values.unset, exclude_all_address_required=values.unset,
exclude_local_address_required=values.unset,
exclude_foreign_address_required=values.unset, beta=values.unset,
near_number=values.unset, near_lat_long=values.unset,
distance=values.unset, in_postal_code=values.unset,
in_region=values.unset, in_rate_center=values.unset,
in_lata=values.unset, in_locality=values.unset,
fax_enabled=values.unset, limit=None, page_size=None):
"""
Lists SharedCostInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode area_code: The area code of the phone numbers to read
:param unicode contains: The pattern on which to match phone numbers
:param bool sms_enabled: Whether the phone numbers can receive text messages
:param bool mms_enabled: Whether the phone numbers can receive MMS messages
:param bool voice_enabled: Whether the phone numbers can receive calls.
:param bool exclude_all_address_required: Whether to exclude phone numbers that require an Address
:param bool exclude_local_address_required: Whether to exclude phone numbers that require a local address
:param bool exclude_foreign_address_required: Whether to exclude phone numbers that require a foreign address
:param bool beta: Whether to read phone numbers new to the Twilio platform
:param unicode near_number: Given a phone number, find a geographically close number within distance miles. (US/Canada only)
:param unicode near_lat_long: Given a latitude/longitude pair lat,long find geographically close numbers within distance miles. (US/Canada only)
:param unicode distance: The search radius, in miles, for a near_ query. (US/Canada only)
:param unicode in_postal_code: Limit results to a particular postal code. (US/Canada only)
:param unicode in_region: Limit results to a particular region. (US/Canada only)
:param unicode in_rate_center: Limit results to a specific rate center, or given a phone number search within the same rate center as that number. (US/Canada only)
:param unicode in_lata: Limit results to a specific local access and transport area. (US/Canada only)
:param unicode in_locality: Limit results to a particular locality
:param bool fax_enabled: Whether the phone numbers can receive faxes
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostInstance]
"""
return list(self.stream(
area_code=area_code,
contains=contains,
sms_enabled=sms_enabled,
mms_enabled=mms_enabled,
voice_enabled=voice_enabled,
exclude_all_address_required=exclude_all_address_required,
exclude_local_address_required=exclude_local_address_required,
exclude_foreign_address_required=exclude_foreign_address_required,
beta=beta,
near_number=near_number,
near_lat_long=near_lat_long,
distance=distance,
in_postal_code=in_postal_code,
in_region=in_region,
in_rate_center=in_rate_center,
in_lata=in_lata,
in_locality=in_locality,
fax_enabled=fax_enabled,
limit=limit,
page_size=page_size,
))
def page(self, area_code=values.unset, contains=values.unset,
sms_enabled=values.unset, mms_enabled=values.unset,
voice_enabled=values.unset, exclude_all_address_required=values.unset,
exclude_local_address_required=values.unset,
exclude_foreign_address_required=values.unset, beta=values.unset,
near_number=values.unset, near_lat_long=values.unset,
distance=values.unset, in_postal_code=values.unset,
in_region=values.unset, in_rate_center=values.unset,
in_lata=values.unset, in_locality=values.unset,
fax_enabled=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of SharedCostInstance records from the API.
Request is executed immediately
:param unicode area_code: The area code of the phone numbers to read
:param unicode contains: The pattern on which to match phone numbers
:param bool sms_enabled: Whether the phone numbers can receive text messages
:param bool mms_enabled: Whether the phone numbers can receive MMS messages
:param bool voice_enabled: Whether the phone numbers can receive calls.
:param bool exclude_all_address_required: Whether to exclude phone numbers that require an Address
:param bool exclude_local_address_required: Whether to exclude phone numbers that require a local address
:param bool exclude_foreign_address_required: Whether to exclude phone numbers that require a foreign address
:param bool beta: Whether to read phone numbers new to the Twilio platform
:param unicode near_number: Given a phone number, find a geographically close number within distance miles. (US/Canada only)
:param unicode near_lat_long: Given a latitude/longitude pair lat,long find geographically close numbers within distance miles. (US/Canada only)
:param unicode distance: The search radius, in miles, for a near_ query. (US/Canada only)
:param unicode in_postal_code: Limit results to a particular postal code. (US/Canada only)
:param unicode in_region: Limit results to a particular region. (US/Canada only)
:param unicode in_rate_center: Limit results to a specific rate center, or given a phone number search within the same rate center as that number. (US/Canada only)
:param unicode in_lata: Limit results to a specific local access and transport area. (US/Canada only)
:param unicode in_locality: Limit results to a particular locality
:param bool fax_enabled: Whether the phone numbers can receive faxes
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SharedCostInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostPage
"""
data = values.of({
'AreaCode': area_code,
'Contains': contains,
'SmsEnabled': sms_enabled,
'MmsEnabled': mms_enabled,
'VoiceEnabled': voice_enabled,
'ExcludeAllAddressRequired': exclude_all_address_required,
'ExcludeLocalAddressRequired': exclude_local_address_required,
'ExcludeForeignAddressRequired': exclude_foreign_address_required,
'Beta': beta,
'NearNumber': near_number,
'NearLatLong': near_lat_long,
'Distance': distance,
'InPostalCode': in_postal_code,
'InRegion': in_region,
'InRateCenter': in_rate_center,
'InLata': in_lata,
'InLocality': in_locality,
'FaxEnabled': fax_enabled,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return SharedCostPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of SharedCostInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of SharedCostInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return SharedCostPage(self._version, response, self._solution)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.SharedCostList>'
class SharedCostPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the SharedCostPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The account_sid
:param country_code: The ISO-3166-1 country code of the country.
:returns: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostPage
:rtype: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostPage
"""
super(SharedCostPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of SharedCostInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostInstance
"""
return SharedCostInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
country_code=self._solution['country_code'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.SharedCostPage>'
class SharedCostInstance(InstanceResource):
def __init__(self, version, payload, account_sid, country_code):
"""
Initialize the SharedCostInstance
:returns: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.shared_cost.SharedCostInstance
"""
super(SharedCostInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'friendly_name': payload.get('friendly_name'),
'phone_number': payload.get('phone_number'),
'lata': payload.get('lata'),
'locality': payload.get('locality'),
'rate_center': payload.get('rate_center'),
'latitude': deserialize.decimal(payload.get('latitude')),
'longitude': deserialize.decimal(payload.get('longitude')),
'region': payload.get('region'),
'postal_code': payload.get('postal_code'),
'iso_country': payload.get('iso_country'),
'address_requirements': payload.get('address_requirements'),
'beta': payload.get('beta'),
'capabilities': payload.get('capabilities'),
}
# Context
self._context = None
self._solution = {'account_sid': account_sid, 'country_code': country_code, }
@property
def friendly_name(self):
"""
:returns: A formatted version of the phone number
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def phone_number(self):
"""
:returns: The phone number in E.164 format
:rtype: unicode
"""
return self._properties['phone_number']
@property
def lata(self):
"""
:returns: The LATA of this phone number
:rtype: unicode
"""
return self._properties['lata']
@property
def locality(self):
"""
:returns: The locality or city of this phone number's location
:rtype: unicode
"""
return self._properties['locality']
@property
def rate_center(self):
"""
:returns: The rate center of this phone number
:rtype: unicode
"""
return self._properties['rate_center']
@property
def latitude(self):
"""
:returns: The latitude of this phone number's location
:rtype: unicode
"""
return self._properties['latitude']
@property
def longitude(self):
"""
:returns: The longitude of this phone number's location
:rtype: unicode
"""
return self._properties['longitude']
@property
def region(self):
"""
:returns: The two-letter state or province abbreviation of this phone number's location
:rtype: unicode
"""
return self._properties['region']
@property
def postal_code(self):
"""
:returns: The postal or ZIP code of this phone number's location
:rtype: unicode
"""
return self._properties['postal_code']
@property
def iso_country(self):
"""
:returns: The ISO country code of this phone number
:rtype: unicode
"""
return self._properties['iso_country']
@property
def address_requirements(self):
"""
:returns: The type of Address resource the phone number requires
:rtype: unicode
"""
return self._properties['address_requirements']
@property
def beta(self):
"""
:returns: Whether the phone number is new to the Twilio platform
:rtype: bool
"""
return self._properties['beta']
@property
def capabilities(self):
"""
:returns: Whether a phone number can receive calls or messages
:rtype: unicode
"""
return self._properties['capabilities']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.SharedCostInstance>'
|
|
import warnings
import six
import sys
import qrcode
import qrcode.util
import qrcode.image.svg
try:
import qrcode.image.pure
import pymaging_png # ensure that PNG support is installed
except ImportError: # pragma: no cover
pymaging_png = None
import qrcode
from qrcode.image.base import BaseImage
from qrcode.exceptions import DataOverflowError
from qrcode.util import (
QRData, MODE_NUMBER, MODE_ALPHA_NUM, MODE_8BIT_BYTE)
from qrcode.tests.svg import SvgImageWhite
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from unittest import mock
except ImportError:
import mock
UNICODE_TEXT = u'\u03b1\u03b2\u03b3'
class QRCodeTests(unittest.TestCase):
def test_basic(self):
qr = qrcode.QRCode(version=1)
qr.add_data('a')
qr.make(fit=False)
def test_large(self):
qr = qrcode.QRCode(version=27)
qr.add_data('a')
qr.make(fit=False)
def test_invalid_version(self):
qr = qrcode.QRCode(version=41)
self.assertRaises(ValueError, qr.make, fit=False)
def test_overflow(self):
qr = qrcode.QRCode(version=1)
qr.add_data('abcdefghijklmno')
self.assertRaises(DataOverflowError, qr.make, fit=False)
def test_add_qrdata(self):
qr = qrcode.QRCode(version=1)
data = QRData('a')
qr.add_data(data)
qr.make(fit=False)
def test_fit(self):
qr = qrcode.QRCode()
qr.add_data('a')
qr.make()
self.assertEqual(qr.version, 1)
qr.add_data('bcdefghijklmno')
qr.make()
self.assertEqual(qr.version, 2)
def test_mode_number(self):
qr = qrcode.QRCode()
qr.add_data('1234567890123456789012345678901234', optimize=0)
qr.make()
self.assertEqual(qr.version, 1)
self.assertEqual(qr.data_list[0].mode, MODE_NUMBER)
def test_mode_alpha(self):
qr = qrcode.QRCode()
qr.add_data('ABCDEFGHIJ1234567890', optimize=0)
qr.make()
self.assertEqual(qr.version, 1)
self.assertEqual(qr.data_list[0].mode, MODE_ALPHA_NUM)
def test_regression_mode_comma(self):
qr = qrcode.QRCode()
qr.add_data(',', optimize=0)
qr.make()
self.assertEqual(qr.data_list[0].mode, MODE_8BIT_BYTE)
def test_mode_8bit(self):
qr = qrcode.QRCode()
qr.add_data(u'abcABC' + UNICODE_TEXT, optimize=0)
qr.make()
self.assertEqual(qr.version, 1)
self.assertEqual(qr.data_list[0].mode, MODE_8BIT_BYTE)
def test_mode_8bit_newline(self):
qr = qrcode.QRCode()
qr.add_data('ABCDEFGHIJ1234567890\n', optimize=0)
qr.make()
self.assertEqual(qr.data_list[0].mode, MODE_8BIT_BYTE)
def test_render_pil(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image()
img.save(six.BytesIO())
def test_qrcode_bad_factory(self):
self.assertRaises(
TypeError, qrcode.QRCode, image_factory='not_BaseImage')
self.assertRaises(
AssertionError, qrcode.QRCode, image_factory=dict)
def test_qrcode_factory(self):
class MockFactory(BaseImage):
drawrect = mock.Mock()
qr = qrcode.QRCode(image_factory=MockFactory)
qr.add_data(UNICODE_TEXT)
qr.make_image()
self.assertTrue(MockFactory.drawrect.called)
def test_render_svg(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=qrcode.image.svg.SvgImage)
img.save(six.BytesIO())
def test_render_svg_path(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=qrcode.image.svg.SvgPathImage)
img.save(six.BytesIO())
def test_render_svg_fragment(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=qrcode.image.svg.SvgFragmentImage)
img.save(six.BytesIO())
def test_render_svg_with_background(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=SvgImageWhite)
img.save(six.BytesIO())
@unittest.skipIf(not pymaging_png, "Requires pymaging with PNG support")
def test_render_pymaging_png(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=qrcode.image.pure.PymagingImage)
with warnings.catch_warnings():
if six.PY3:
warnings.simplefilter('ignore', DeprecationWarning)
img.save(six.BytesIO())
@unittest.skipIf(not pymaging_png, "Requires pymaging")
def test_render_pymaging_png_bad_kind(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=qrcode.image.pure.PymagingImage)
self.assertRaises(ValueError, img.save, six.BytesIO(), kind='FISH')
def test_optimize(self):
qr = qrcode.QRCode()
text = 'A1abc12345def1HELLOa'
qr.add_data(text, optimize=4)
qr.make()
self.assertEqual(len(qr.data_list), 5)
self.assertEqual(qr.data_list[0].mode, MODE_8BIT_BYTE)
self.assertEqual(qr.data_list[1].mode, MODE_NUMBER)
self.assertEqual(qr.data_list[2].mode, MODE_8BIT_BYTE)
self.assertEqual(qr.data_list[3].mode, MODE_ALPHA_NUM)
self.assertEqual(qr.data_list[4].mode, MODE_8BIT_BYTE)
self.assertEqual(qr.version, 2)
def test_optimize_size(self):
text = 'A1abc12345123451234512345def1HELLOHELLOHELLOHELLOa' * 5
qr = qrcode.QRCode()
qr.add_data(text)
qr.make()
self.assertEqual(qr.version, 10)
qr = qrcode.QRCode()
qr.add_data(text, optimize=0)
qr.make()
self.assertEqual(qr.version, 11)
def test_qrdata_repr(self):
data = b'hello'
data_obj = qrcode.util.QRData(data)
self.assertEqual(repr(data_obj), repr(data))
def test_print_ascii_stdout(self):
qr = qrcode.QRCode()
stdout_encoding = sys.stdout.encoding
with mock.patch('sys.stdout') as fake_stdout:
# Python 2.6 needs sys.stdout.encoding to be a real string.
sys.stdout.encoding = stdout_encoding
fake_stdout.isatty.return_value = None
self.assertRaises(OSError, qr.print_ascii, tty=True)
self.assertTrue(fake_stdout.isatty.called)
def test_print_ascii(self):
qr = qrcode.QRCode(border=0)
f = six.StringIO()
qr.print_ascii(out=f)
printed = f.getvalue()
f.close()
expected = u'\u2588\u2580\u2580\u2580\u2580\u2580\u2588'
self.assertEqual(printed[:len(expected)], expected)
f = six.StringIO()
f.isatty = lambda: True
qr.print_ascii(out=f, tty=True)
printed = f.getvalue()
f.close()
expected = (
u'\x1b[48;5;232m\x1b[38;5;255m' +
u'\xa0\u2584\u2584\u2584\u2584\u2584\xa0')
self.assertEqual(printed[:len(expected)], expected)
def test_print_tty_stdout(self):
qr = qrcode.QRCode()
with mock.patch('sys.stdout') as fake_stdout:
fake_stdout.isatty.return_value = None
self.assertRaises(OSError, qr.print_tty)
self.assertTrue(fake_stdout.isatty.called)
def test_print_tty(self):
qr = qrcode.QRCode()
f = six.StringIO()
f.isatty = lambda: True
qr.print_tty(out=f)
printed = f.getvalue()
f.close()
BOLD_WHITE_BG = '\x1b[1;47m'
BLACK_BG = '\x1b[40m'
WHITE_BLOCK = BOLD_WHITE_BG + ' ' + BLACK_BG
EOL = '\x1b[0m\n'
expected = (
BOLD_WHITE_BG + ' '*23 + EOL +
WHITE_BLOCK + ' '*7 + WHITE_BLOCK)
self.assertEqual(printed[:len(expected)], expected)
def test_get_matrix(self):
qr = qrcode.QRCode(border=0)
qr.add_data('1')
self.assertEqual(qr.get_matrix(), qr.modules)
def test_get_matrix_border(self):
qr = qrcode.QRCode(border=1)
qr.add_data('1')
matrix = [row[1:-1] for row in qr.get_matrix()[1:-1]]
self.assertEqual(matrix, qr.modules)
def test_negative_size_at_construction(self):
self.assertRaises(ValueError, qrcode.QRCode, box_size=-1)
def test_negative_size_at_usage(self):
qr = qrcode.QRCode()
qr.box_size = -1
self.assertRaises(ValueError, qr.make_image)
class ShortcutTest(unittest.TestCase):
def runTest(self):
qrcode.make('image')
|
|
import datetime
import math
import urlparse
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import connection
from django.utils.encoding import smart_str
import jinja2
from jingo import env, register
from tower import ugettext as _
from tower import ugettext_lazy as _lazy
import mkt
from mkt.access import acl
from mkt.reviewers.models import EscalationQueue, QUEUE_TARAKO, ReviewerScore
from mkt.reviewers.utils import (AppsReviewing, clean_sort_param,
create_sort_link)
from mkt.search.serializers import es_to_datetime
from mkt.site.helpers import mkt_breadcrumbs, page_title
from mkt.versions.models import Version
from mkt.webapps.helpers import new_context
@register.function
@jinja2.contextfunction
def reviewers_breadcrumbs(context, queue=None, items=None):
"""
Wrapper function for ``breadcrumbs``. Prepends 'Editor Tools'
breadcrumbs.
**queue**
Explicit queue type to set.
**items**
list of [(url, label)] to be inserted after Add-on.
"""
crumbs = [(reverse('reviewers.home'), _('Reviewer Tools'))]
if queue:
queues = {'pending': _('Apps'),
'rereview': _('Re-reviews'),
'updates': _('Updates'),
'escalated': _('Escalations'),
'device': _('Device'),
'moderated': _('Moderated Reviews'),
'abuse': _('Abuse Reports'),
'reviewing': _('Reviewing'),
'region': _('Regional Queues')}
if items:
url = reverse('reviewers.apps.queue_%s' % queue)
else:
# The Addon is the end of the trail.
url = None
crumbs.append((url, queues[queue]))
if items:
crumbs.extend(items)
return mkt_breadcrumbs(context, items=crumbs, add_default=True)
@register.function
@jinja2.contextfunction
def reviewers_page_title(context, title=None):
section = _lazy('Reviewer Tools')
title = u'%s | %s' % (title, section) if title else section
return page_title(context, title)
@register.function
@jinja2.contextfunction
def queue_tabnav(context):
"""
Returns tuple of tab navigation for the queue pages.
Each tuple contains three elements: (url, tab_code, tab_text)
"""
request = context['request']
counts = context['queue_counts']
apps_reviewing = AppsReviewing(request).get_apps()
# Apps.
if acl.action_allowed(request, 'Apps', 'Review'):
rv = [
(reverse('reviewers.apps.queue_pending'), 'pending',
_('Apps ({0})', counts['pending']).format(counts['pending'])),
(reverse('reviewers.apps.queue_rereview'), 'rereview',
_('Re-reviews ({0})', counts['rereview']).format(
counts['rereview'])),
(reverse('reviewers.apps.queue_updates'), 'updates',
_('Updates ({0})', counts['updates']).format(counts['updates'])),
]
if acl.action_allowed(request, 'Apps', 'ReviewEscalated'):
rv.append((reverse('reviewers.apps.queue_escalated'), 'escalated',
_('Escalations ({0})', counts['escalated']).format(
counts['escalated'])))
rv.append(
(reverse('reviewers.apps.apps_reviewing'), 'reviewing',
_('Reviewing ({0})').format(len(apps_reviewing))),
)
if acl.action_allowed(request, 'Apps', 'ReviewRegionCN'):
url_ = reverse('reviewers.apps.queue_region',
args=[mkt.regions.CHN.slug])
rv.append((url_, 'region',
_('China ({0})').format(counts['region_cn'])))
if acl.action_allowed(request, 'Apps', 'ReviewTarako'):
url_ = reverse('reviewers.apps.additional_review',
args=[QUEUE_TARAKO])
rv.append((url_, 'additional',
_('Tarako ({0})').format(counts['additional_tarako'])))
else:
rv = []
if acl.action_allowed(request, 'Apps', 'ModerateReview'):
rv.append(
(reverse('reviewers.apps.queue_moderated'), 'moderated',
_('Moderated Reviews ({0})', counts['moderated'])
.format(counts['moderated'])),
)
if acl.action_allowed(request, 'Apps', 'ReadAbuse'):
rv.append(
(reverse('reviewers.apps.queue_abuse'), 'abuse',
_('Abuse Reports ({0})', counts['abuse'])
.format(counts['abuse'])),
)
return rv
@register.function
@jinja2.contextfunction
def logs_tabnav(context):
"""
Returns tuple of tab navigation for the log pages.
Each tuple contains three elements: (named url, tab_code, tab_text)
"""
request = context['request']
if acl.action_allowed(request, 'Apps', 'Review'):
rv = [('reviewers.apps.logs', 'logs', _('Reviews'))]
else:
rv = []
if acl.action_allowed(request, 'Apps', 'ModerateReview'):
rv.append(('reviewers.apps.moderatelog',
'moderatelog', _('Moderated Reviews')))
return rv
@register.function
@jinja2.contextfunction
def sort_link(context, pretty_name, sort_field):
"""Get table header sort links.
pretty_name -- name displayed on table header
sort_field -- name of get parameter, referenced to in views
"""
request = context['request']
sort, order = clean_sort_param(request)
# Copy search/filter GET parameters.
get_params = [(k, v) for k, v in
urlparse.parse_qsl(smart_str(request.META['QUERY_STRING']))
if k not in ('sort', 'order')]
return create_sort_link(pretty_name, sort_field, get_params,
sort, order)
@register.function
def file_compare(file_obj, version):
return version.files.all()[0]
@register.function
def file_review_status(addon, file):
if file.status in [mkt.STATUS_DISABLED, mkt.STATUS_REJECTED]:
if file.reviewed is not None:
return _(u'Rejected')
# Can't assume that if the reviewed date is missing its
# unreviewed. Especially for versions.
else:
return _(u'Rejected or Unreviewed')
return mkt.STATUS_CHOICES[file.status]
@register.function
def version_status(addon, version):
return ','.join(unicode(s) for s in version.status)
@register.inclusion_tag('reviewers/includes/reviewers_score_bar.html')
@jinja2.contextfunction
def reviewers_score_bar(context, types=None):
user = context.get('user')
return new_context(dict(
request=context.get('request'),
mkt=mkt, settings=settings,
points=ReviewerScore.get_recent(user),
total=ReviewerScore.get_total(user),
**ReviewerScore.get_leaderboards(user, types=types)))
@register.filter
def mobile_reviewers_paginator(pager):
# Paginator for non-responsive version of Reviewer Tools.
t = env.get_template('reviewers/includes/reviewers_paginator.html')
return jinja2.Markup(t.render({'pager': pager}))
def get_avg_app_waiting_time():
"""
Returns the rolling average from the past 30 days of the time taken for a
pending app to become public.
"""
cursor = connection.cursor()
cursor.execute('''
SELECT AVG(DATEDIFF(reviewed, nomination)) FROM versions
RIGHT JOIN addons ON versions.addon_id = addons.id
WHERE status = %s AND reviewed >= DATE_SUB(NOW(), INTERVAL 30 DAY)
''', (mkt.STATUS_PUBLIC, ))
row = cursor.fetchone()
days = 0
if row:
try:
days = math.ceil(float(row[0]))
except TypeError:
pass
return days
@register.function
def get_position(addon):
excluded_ids = EscalationQueue.objects.values_list('addon', flat=True)
# Look at all regular versions of webapps which have pending files.
# This includes both new apps and updates to existing apps, to combine
# both the regular and updates queue in one big list (In theory, it
# should take the same time for reviewers to process an app in either
# queue). Escalated apps are excluded just like in reviewer tools.
qs = (Version.objects.filter(addon__disabled_by_user=False,
files__status=mkt.STATUS_PENDING,
deleted=False)
.exclude(addon__status__in=(mkt.STATUS_DISABLED,
mkt.STATUS_DELETED, mkt.STATUS_NULL))
.exclude(addon__id__in=excluded_ids)
.order_by('nomination', 'created').select_related('addon')
.no_transforms().values_list('addon_id', 'nomination'))
position = 0
nomination_date = None
for idx, (addon_id, nomination) in enumerate(qs, start=1):
if addon_id == addon.id:
position = idx
nomination_date = nomination
break
total = qs.count()
days = 1
days_in_queue = 0
if nomination_date:
# Estimated waiting time is calculated from the rolling average of
# the queue waiting time in the past 30 days but subtracting from
# it the number of days this app has already spent in the queue.
days_in_queue = (datetime.datetime.now() - nomination_date).days
days = max(get_avg_app_waiting_time() - days_in_queue, days)
return {'days': int(days), 'days_in_queue': int(days_in_queue),
'pos': position, 'total': total}
@register.filter
def es2datetime(s):
"""
Returns a datetime given an Elasticsearch date/datetime field.
"""
return es_to_datetime(s)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains integration with Azure Data Lake.
AzureDataLakeHook communicates via a REST API compatible with WebHDFS. Make sure that a
Airflow connection of type `azure_data_lake` exists. Authorization can be done by supplying a
login (=Client ID), password (=Client Secret) and extra fields tenant (Tenant) and account_name (Account Name)
(see connection `azure_data_lake_default` for an example).
"""
from typing import Any, Dict, Optional
from azure.datalake.store import core, lib, multithread
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
class AzureDataLakeHook(BaseHook):
"""
Interacts with Azure Data Lake.
Client ID and client secret should be in user and password parameters.
Tenant and account name should be extra field as
{"tenant": "<TENANT>", "account_name": "ACCOUNT_NAME"}.
:param azure_data_lake_conn_id: Reference to the :ref:`Azure Data Lake connection<howto/connection:adl>`.
:type azure_data_lake_conn_id: str
"""
conn_name_attr = 'azure_data_lake_conn_id'
default_conn_name = 'azure_data_lake_default'
conn_type = 'azure_data_lake'
hook_name = 'Azure Data Lake'
@staticmethod
def get_connection_form_widgets() -> Dict[str, Any]:
"""Returns connection widgets to add to connection form"""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
return {
"extra__azure_data_lake__tenant": StringField(
lazy_gettext('Azure Tenant ID'), widget=BS3TextFieldWidget()
),
"extra__azure_data_lake__account_name": StringField(
lazy_gettext('Azure DataLake Store Name'), widget=BS3TextFieldWidget()
),
}
@staticmethod
def get_ui_field_behaviour() -> Dict:
"""Returns custom field behaviour"""
return {
"hidden_fields": ['schema', 'port', 'host', 'extra'],
"relabeling": {
'login': 'Azure Client ID',
'password': 'Azure Client Secret',
},
"placeholders": {
'login': 'client id',
'password': 'secret',
'extra__azure_data_lake__tenant': 'tenant id',
'extra__azure_data_lake__account_name': 'datalake store',
},
}
def __init__(self, azure_data_lake_conn_id: str = default_conn_name) -> None:
super().__init__()
self.conn_id = azure_data_lake_conn_id
self._conn: Optional[core.AzureDLFileSystem] = None
self.account_name: Optional[str] = None
def get_conn(self) -> core.AzureDLFileSystem:
"""Return a AzureDLFileSystem object."""
if not self._conn:
conn = self.get_connection(self.conn_id)
service_options = conn.extra_dejson
self.account_name = service_options.get('account_name') or service_options.get(
'extra__azure_data_lake__account_name'
)
tenant = service_options.get('tenant') or service_options.get('extra__azure_data_lake__tenant')
adl_creds = lib.auth(tenant_id=tenant, client_secret=conn.password, client_id=conn.login)
self._conn = core.AzureDLFileSystem(adl_creds, store_name=self.account_name)
self._conn.connect()
return self._conn
def check_for_file(self, file_path: str) -> bool:
"""
Check if a file exists on Azure Data Lake.
:param file_path: Path and name of the file.
:type file_path: str
:return: True if the file exists, False otherwise.
:rtype: bool
"""
try:
files = self.get_conn().glob(file_path, details=False, invalidate_cache=True)
return len(files) == 1
except FileNotFoundError:
return False
def upload_file(
self,
local_path: str,
remote_path: str,
nthreads: int = 64,
overwrite: bool = True,
buffersize: int = 4194304,
blocksize: int = 4194304,
**kwargs,
) -> None:
"""
Upload a file to Azure Data Lake.
:param local_path: local path. Can be single file, directory (in which case,
upload recursively) or glob pattern. Recursive glob patterns using `**`
are not supported.
:type local_path: str
:param remote_path: Remote path to upload to; if multiple files, this is the
directory root to write within.
:type remote_path: str
:param nthreads: Number of threads to use. If None, uses the number of cores.
:type nthreads: int
:param overwrite: Whether to forcibly overwrite existing files/directories.
If False and remote path is a directory, will quit regardless if any files
would be overwritten or not. If True, only matching filenames are actually
overwritten.
:type overwrite: bool
:param buffersize: int [2**22]
Number of bytes for internal buffer. This block cannot be bigger than
a chunk and cannot be smaller than a block.
:type buffersize: int
:param blocksize: int [2**22]
Number of bytes for a block. Within each chunk, we write a smaller
block for each API call. This block cannot be bigger than a chunk.
:type blocksize: int
"""
multithread.ADLUploader(
self.get_conn(),
lpath=local_path,
rpath=remote_path,
nthreads=nthreads,
overwrite=overwrite,
buffersize=buffersize,
blocksize=blocksize,
**kwargs,
)
def download_file(
self,
local_path: str,
remote_path: str,
nthreads: int = 64,
overwrite: bool = True,
buffersize: int = 4194304,
blocksize: int = 4194304,
**kwargs,
) -> None:
"""
Download a file from Azure Blob Storage.
:param local_path: local path. If downloading a single file, will write to this
specific file, unless it is an existing directory, in which case a file is
created within it. If downloading multiple files, this is the root
directory to write within. Will create directories as required.
:type local_path: str
:param remote_path: remote path/globstring to use to find remote files.
Recursive glob patterns using `**` are not supported.
:type remote_path: str
:param nthreads: Number of threads to use. If None, uses the number of cores.
:type nthreads: int
:param overwrite: Whether to forcibly overwrite existing files/directories.
If False and remote path is a directory, will quit regardless if any files
would be overwritten or not. If True, only matching filenames are actually
overwritten.
:type overwrite: bool
:param buffersize: int [2**22]
Number of bytes for internal buffer. This block cannot be bigger than
a chunk and cannot be smaller than a block.
:type buffersize: int
:param blocksize: int [2**22]
Number of bytes for a block. Within each chunk, we write a smaller
block for each API call. This block cannot be bigger than a chunk.
:type blocksize: int
"""
multithread.ADLDownloader(
self.get_conn(),
lpath=local_path,
rpath=remote_path,
nthreads=nthreads,
overwrite=overwrite,
buffersize=buffersize,
blocksize=blocksize,
**kwargs,
)
def list(self, path: str) -> list:
"""
List files in Azure Data Lake Storage
:param path: full path/globstring to use to list files in ADLS
:type path: str
"""
if "*" in path:
return self.get_conn().glob(path)
else:
return self.get_conn().walk(path)
def remove(self, path: str, recursive: bool = False, ignore_not_found: bool = True) -> None:
"""
Remove files in Azure Data Lake Storage
:param path: A directory or file to remove in ADLS
:type path: str
:param recursive: Whether to loop into directories in the location and remove the files
:type recursive: bool
:param ignore_not_found: Whether to raise error if file to delete is not found
:type ignore_not_found: bool
"""
try:
self.get_conn().remove(path=path, recursive=recursive)
except FileNotFoundError:
if ignore_not_found:
self.log.info("File %s not found", path)
else:
raise AirflowException(f"File {path} not found")
|
|
from unittest import TestCase
import time
import datetime
import os
import glob
from shutil import rmtree
class FunctionalityTest(TestCase):
"""
Test Suite:
----Single File----
1.1. Single file.
1.2. Single file and an empty folder.
1.3. Single file in a folder.
1.4. Single file in a folder and an empty folder.
1.5. Single file in a folder in a folder.
----Two Files----
2.1. Two files with different modification time.
2.2. Two files with same modification time.
2.3. Two files with different modification time and a folder.
2.4. Two files with same modification time and a folder.
2.5. Two files in a folder.
2.6. Two files in a folder in a folder.
2.7. A file and a file in a folder.
2.8. A file in a folder and a file in a folder in a folder.
2.9. A file and a file in a folder in a folder.
"""
def __init__(self, *args, **kwargs):
self.dst_path = 'simpholib/tests/files_for_tests/dst'
super(FunctionalityTest, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
# Create dirs for testing.
os.mkdir('simpholib/tests/files_for_tests')
os.mkdir('simpholib/tests/files_for_tests/src')
os.mkdir('simpholib/tests/files_for_tests/dst')
# Install library.
os.system('pip install simpholib')
super(FunctionalityTest, cls).setUpClass()
@classmethod
def tearDownClass(cls):
# Remove dirs for testing.
rmtree('simpholib/tests/files_for_tests')
super(FunctionalityTest, cls).tearDownClass()
def setUp(self):
# Check that ./files_for_tests is empty.
self.assertFalse(glob.glob('{path}/*'.format(path=self.dst_path)))
super(FunctionalityTest, self).setUp()
def tearDown(self):
# Clean up dst_path.
for child in os.listdir('{path}'.format(path=self.dst_path)):
if os.path.isdir('{path}/{child}'.format(path=self.dst_path, child=child)):
rmtree('{path}/{child}'.format(path=self.dst_path, child=child))
else:
os.remove('{path}/{child}'.format(path=self.dst_path, child=child))
super(FunctionalityTest, self).tearDown()
# ----Single File----
def test_single_file(self):
"""
1.1. Single file.
1.1.1. Create an image file.
1.1.2. Check that image file is created.
1.1.3. Set 'modified time' for that image file.
1.1.4. Run 'simpholib'.
1.1.5. Check new folder structure.
"""
# 1.1.1. Create an image file.
picture = open('{path}/1.jpg'.format(path=self.dst_path), 'w')
picture.close()
# 1.1.2. Check that image file is created.
self.assertItemsEqual(os.listdir(self.dst_path), ['1.jpg'])
# 1.1.3. Set 'modified time' for that image file.
modified_time = time.mktime(datetime.datetime(2012, 06, 14, 12, 00).timetuple())
os.utime('{path}/1.jpg'.format(path=self.dst_path), (modified_time, modified_time))
# 1.1.4. Run 'simpholib'.
os.system('simpho ./{dst_path}/'.format(dst_path=self.dst_path))
# 1.1.5. Check new folder structure.
self.assertItemsEqual(os.listdir(self.dst_path), ['albums'])
self.assertItemsEqual(os.listdir('{path}/albums'.format(path=self.dst_path)), ['2012'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}'.format(path=self.dst_path, year='2012')), ['06'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}'.format(path=self.dst_path,
year='2012',
month='06')), ['14'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}/{day}'.format(path=self.dst_path,
year='2012',
month='06',
day='14')), ['2012-06-14_0.jpg'])
def test_single_file_and_an_empty_folder(self):
"""
1.2. Single file and an empty folder.
1.2.1. Create an image file.
1.2.2. Create an empty folder.
1.2.3. Check that image file and folder are created.
1.2.4. Set 'modified time' for that image file.
1.2.5. Run 'simpholib'.
1.2.6. Check new folder structure.
"""
# 1.2.1. Create an image file.
picture = open('{path}/1.jpg'.format(path=self.dst_path), 'w')
picture.close()
# 1.2.2. Create an empty folder.
os.mkdir('{path}/empty_folder'.format(path=self.dst_path))
# 1.2.3. Check that image file and folder are created.
self.assertItemsEqual(os.listdir(self.dst_path), ['1.jpg', 'empty_folder'])
# 1.2.4. Set 'modified time' for that image file.
modified_time = time.mktime(datetime.datetime(2012, 06, 14, 12, 00).timetuple())
os.utime('{path}/1.jpg'.format(path=self.dst_path), (modified_time, modified_time))
# 1.2.5. Run 'simpholib'.
os.system('simpho ./{dst_path}/'.format(dst_path=self.dst_path))
# 1.2.6. Check new folder structure.
self.assertItemsEqual(os.listdir(self.dst_path), ['albums'])
self.assertItemsEqual(os.listdir('{path}/albums'.format(path=self.dst_path)), ['2012'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}'.format(path=self.dst_path, year='2012')), ['06'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}'.format(path=self.dst_path,
year='2012',
month='06')), ['14'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}/{day}'.format(path=self.dst_path,
year='2012',
month='06',
day='14')), ['2012-06-14_0.jpg'])
def test_single_file_in_a_folder(self):
"""
1.3. Single file in a folder.
1.3.1. Create an empty folder.
1.3.2. Create an image file in a folder.
1.3.3. Check that image file is created.
1.3.4. Set 'modified time' for that image file.
1.3.5. Run 'simpholib'.
1.3.6. Check new folder structure.
"""
# 1.3.1. Create an empty folder.
os.mkdir('{path}/folder'.format(path=self.dst_path))
# 1.3.2. Create an image file in a folder.
picture = open('{path}/folder/1.jpg'.format(path=self.dst_path), 'w')
picture.close()
# 1.3.3. Check that image file is created.
self.assertItemsEqual(os.listdir(self.dst_path), ['folder'])
self.assertItemsEqual(os.listdir('{path}/folder'.format(path=self.dst_path)), ['1.jpg'])
# 1.3.4. Set 'modified time' for that image file.
modified_time = time.mktime(datetime.datetime(2012, 06, 14, 12, 00).timetuple())
os.utime('{path}/folder/1.jpg'.format(path=self.dst_path), (modified_time, modified_time))
# 1.3.5. Run 'simpholib'.
os.system('simpho ./{dst_path}/'.format(dst_path=self.dst_path))
# 1.3.6. Check new folder structure.
self.assertItemsEqual(os.listdir(self.dst_path), ['albums'])
self.assertItemsEqual(os.listdir('{path}/albums'.format(path=self.dst_path)), ['2012'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}'.format(path=self.dst_path, year='2012')), ['06'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}'.format(path=self.dst_path,
year='2012',
month='06')), ['14'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}/{day}'.format(path=self.dst_path,
year='2012',
month='06',
day='14')), ['2012-06-14_0.jpg'])
def test_single_file_in_a_folder_and_an_empty_folder(self):
"""
1.4. Single file in a folder and an empty folder.
1.4.1. Create 2 empty folders.
1.4.2. Check that folders are created.
1.4.3. Create an image file in one of the folders.
1.4.4. Check that image file is created.
1.4.5. Set 'modified time' for that image file.
1.4.6. Run 'simpholib'.
1.4.7. Check new folder structure.
"""
# 1.4.1. Create 2 empty folders.
os.mkdir('{path}/folder_with_file'.format(path=self.dst_path))
os.mkdir('{path}/empty_folder'.format(path=self.dst_path))
# 1.4.2. Check that folders are created.
self.assertItemsEqual(os.listdir(self.dst_path), ['empty_folder', 'folder_with_file'])
# 1.4.3. Create an image file in one of the folders.
picture = open('{path}/folder_with_file/1.jpg'.format(path=self.dst_path), 'w')
picture.close()
# 1.4.4. Check that image file is created.
self.assertItemsEqual(os.listdir('{path}/folder_with_file'.format(path=self.dst_path)), ['1.jpg'])
# 1.4.5. Set 'modified time' for that image file.
modified_time = time.mktime(datetime.datetime(2012, 06, 14, 12, 00).timetuple())
os.utime('{path}/folder_with_file/1.jpg'.format(path=self.dst_path), (modified_time, modified_time))
# 1.4.6. Run 'simpholib'.
os.system('simpho ./{dst_path}/'.format(dst_path=self.dst_path))
# 1.4.7. Check new folder structure.
self.assertItemsEqual(os.listdir(self.dst_path), ['albums'])
self.assertItemsEqual(os.listdir('{path}/albums'.format(path=self.dst_path)), ['2012'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}'.format(path=self.dst_path, year='2012')), ['06'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}'.format(path=self.dst_path,
year='2012',
month='06')), ['14'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}/{day}'.format(path=self.dst_path,
year='2012',
month='06',
day='14')), ['2012-06-14_0.jpg'])
def test_single_file_in_a_folder_in_a_folder(self):
"""
1.5. Single file in a folder in a folder.
1.5.1. Create an empty folder in a folder.
1.5.2. Check that folders are created.
1.5.3. Create an image file in a folder.
1.5.4. Check that image file is created.
1.5.5. Set 'modified time' for that image file.
1.5.6. Run 'simpholib'.
1.5.7. Check new folder structure.
"""
# 1.5.1. Create an empty folder in a folder.
os.mkdir('{path}/folder_1/'.format(path=self.dst_path))
os.mkdir('{path}/folder_1/folder_2/'.format(path=self.dst_path))
# 1.5.2. Check that folders are created.
self.assertItemsEqual(os.listdir('{path}/'.format(path=self.dst_path)), ['folder_1'])
self.assertItemsEqual(os.listdir('{path}/folder_1/'.format(path=self.dst_path)), ['folder_2'])
# 1.5.3. Create an image file in a folder.
picture = open('{path}/folder_1/folder_2/1.jpg'.format(path=self.dst_path), 'w')
picture.close()
# 1.5.4. Check that image file is created.
self.assertItemsEqual(os.listdir('{path}/folder_1/folder_2/'.format(path=self.dst_path)), ['1.jpg'])
# 1.5.5. Set 'modified time' for that image file.
modified_time = time.mktime(datetime.datetime(2012, 06, 14, 12, 00).timetuple())
os.utime('{path}/folder_1/folder_2/1.jpg'.format(path=self.dst_path), (modified_time, modified_time))
# 1.5.6. Run 'simpholib'.
os.system('simpho ./{dst_path}/'.format(dst_path=self.dst_path))
# 1.5.7. Check new folder structure.
self.assertItemsEqual(os.listdir(self.dst_path), ['albums'])
self.assertItemsEqual(os.listdir('{path}/albums'.format(path=self.dst_path)), ['2012'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}'.format(path=self.dst_path, year='2012')), ['06'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}'.format(path=self.dst_path,
year='2012',
month='06')), ['14'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}/{day}'.format(path=self.dst_path,
year='2012',
month='06',
day='14')), ['2012-06-14_0.jpg'])
# ----Two Files----
def test_two_files_with_different_mod_time(self):
"""
2.1. Two image files with different modification time.
2.1.1. Create two image files.
2.1.2. Check that folders and files are created.
2.1.3. Set different 'modified time' for image files.
2.1.4. Run 'simpholib'.
2.1.5. Check new folder structure.
"""
# 2.1.1. Create two image files.
picture_1 = open('{path}/1.jpg'.format(path=self.dst_path), 'w')
picture_1.close()
picture_2 = open('{path}/2.jpg'.format(path=self.dst_path), 'w')
picture_2.close()
# 2.1.2. Check that files are created.
self.assertItemsEqual(os.listdir('{path}/'.format(path=self.dst_path)), ['1.jpg', '2.jpg'])
# 2.1.3. Set different 'modified time' for image files.
modified_time = time.mktime(datetime.datetime(2012, 06, 14, 12, 00).timetuple())
os.utime('{path}/1.jpg'.format(path=self.dst_path), (modified_time, modified_time))
modified_time = time.mktime(datetime.datetime(2013, 07, 06, 12, 00).timetuple())
os.utime('{path}/2.jpg'.format(path=self.dst_path), (modified_time, modified_time))
# 2.1.4. Run 'simpholib'.
os.system('simpho ./{dst_path}/'.format(dst_path=self.dst_path))
# 2.1.5. Check new folder structure.
self.assertItemsEqual(os.listdir(self.dst_path), ['albums'])
self.assertItemsEqual(os.listdir('{path}/albums'.format(path=self.dst_path)), ['2012', '2013'])
# Check folder structure for first picture file.
self.assertItemsEqual(os.listdir('{path}/albums/{year}'.format(path=self.dst_path, year='2012')), ['06'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}'.format(path=self.dst_path,
year='2012',
month='06')), ['14'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}/{day}'.format(path=self.dst_path,
year='2012',
month='06',
day='14')), ['2012-06-14_0.jpg'])
# Check folder structure for second picture file.
self.assertItemsEqual(os.listdir('{path}/albums/{year}'.format(path=self.dst_path, year='2013')), ['07'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}'.format(path=self.dst_path,
year='2013',
month='07')), ['06'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}/{day}'.format(path=self.dst_path,
year='2013',
month='07',
day='06')), ['2013-07-06_1.jpg'])
def test_two_files_with_same_mod_time(self):
"""
2.2. Two image files with same modification time.
2.2.1. Create two image files.
2.2.2. Check that folders and files are created.
2.2.3. Set same 'modified time' for image files.
2.2.4. Run 'simpholib'.
2.2.5. Check new folder structure.
"""
# 2.2.2. Create two image files.
picture_1 = open('{path}/1.jpg'.format(path=self.dst_path), 'w')
picture_1.close()
picture_2 = open('{path}/2.jpg'.format(path=self.dst_path), 'w')
picture_2.close()
# 2.2.3. Check that folders and files are created.
self.assertItemsEqual(os.listdir('{path}/'.format(path=self.dst_path)), ['1.jpg', '2.jpg'])
# 2.2.4. Set different 'modified time' for image files.
modified_time = time.mktime(datetime.datetime(2012, 06, 14, 12, 00).timetuple())
os.utime('{path}/1.jpg'.format(path=self.dst_path), (modified_time, modified_time))
os.utime('{path}/2.jpg'.format(path=self.dst_path), (modified_time, modified_time))
# 2.2.5. Run 'simpholib'.
os.system('simpho ./{dst_path}/'.format(dst_path=self.dst_path))
# 2.2.6. Check new folder structure.
self.assertItemsEqual(os.listdir(self.dst_path), ['albums'])
self.assertItemsEqual(os.listdir('{path}/albums'.format(path=self.dst_path)), ['2012'])
# Check folder structure for first picture file.
self.assertItemsEqual(os.listdir('{path}/albums/{year}'.format(path=self.dst_path, year='2012')), ['06'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}'.format(path=self.dst_path,
year='2012',
month='06')), ['14'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}/{day}'.format(path=self.dst_path,
year='2012',
month='06',
day='14')), ['2012-06-14_0.jpg',
'2012-06-14_1.jpg'])
def test_two_files_with_different_mod_time_and_empty_folders(self):
"""
2.3. Two image files with different modification time.
2.3.1. Create empty folders - just to mess up a little bit.
2.3.2. Create two image files.
2.3.3. Check that folders and files are created.
2.3.4. Set different 'modified time' for image files.
2.3.5. Run 'simpholib'.
2.3.6. Check new folder structure.
"""
# 2.3.1. Create empty folders - just to mess up a little bit.
os.mkdir('{path}/folder_1/'.format(path=self.dst_path))
os.mkdir('{path}/folder_1/folder_2/'.format(path=self.dst_path))
# 2.3.2. Create two image files.
picture_1 = open('{path}/1.jpg'.format(path=self.dst_path), 'w')
picture_1.close()
picture_2 = open('{path}/2.jpg'.format(path=self.dst_path), 'w')
picture_2.close()
# 2.3.3. Check that folders and files are created.
self.assertItemsEqual(os.listdir('{path}/'.format(path=self.dst_path)), ['1.jpg', '2.jpg', 'folder_1'])
self.assertItemsEqual(os.listdir('{path}/folder_1/'.format(path=self.dst_path)), ['folder_2'])
# 2.3.4. Set different 'modified time' for image files.
modified_time = time.mktime(datetime.datetime(2012, 06, 14, 12, 00).timetuple())
os.utime('{path}/1.jpg'.format(path=self.dst_path), (modified_time, modified_time))
modified_time = time.mktime(datetime.datetime(2013, 07, 06, 12, 00).timetuple())
os.utime('{path}/2.jpg'.format(path=self.dst_path), (modified_time, modified_time))
# 2.3.5. Run 'simpholib'.
os.system('simpho ./{dst_path}/'.format(dst_path=self.dst_path))
# 2.3.6. Check new folder structure.
self.assertItemsEqual(os.listdir(self.dst_path), ['albums'])
self.assertItemsEqual(os.listdir('{path}/albums'.format(path=self.dst_path)), ['2012', '2013'])
# Check folder structure for first picture file.
self.assertItemsEqual(os.listdir('{path}/albums/{year}'.format(path=self.dst_path, year='2012')), ['06'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}'.format(path=self.dst_path,
year='2012',
month='06')), ['14'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}/{day}'.format(path=self.dst_path,
year='2012',
month='06',
day='14')), ['2012-06-14_0.jpg'])
# Check folder structure for second picture file.
self.assertItemsEqual(os.listdir('{path}/albums/{year}'.format(path=self.dst_path, year='2013')), ['07'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}'.format(path=self.dst_path,
year='2013',
month='07')), ['06'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}/{day}'.format(path=self.dst_path,
year='2013',
month='07',
day='06')), ['2013-07-06_1.jpg'])
def test_two_files_with_same_mod_time_and_empty_folders(self):
"""
2.4. Two image files with same modification time.
2.4.1. Create empty folders - just to mess up a little bit.
2.4.2. Create two image files.
2.4.3. Check that folders and files are created.
2.4.4. Set same 'modified time' for image files.
2.4.5. Run 'simpholib'.
2.4.6. Check new folder structure.
"""
# 2.4.1. Create empty folders - just to mess up a little bit.
os.mkdir('{path}/folder_1/'.format(path=self.dst_path))
os.mkdir('{path}/folder_1/folder_2/'.format(path=self.dst_path))
# 2.4.2. Create two image files.
picture_1 = open('{path}/1.jpg'.format(path=self.dst_path), 'w')
picture_1.close()
picture_2 = open('{path}/2.jpg'.format(path=self.dst_path), 'w')
picture_2.close()
# 2.4.3. Check that folders and files are created.
self.assertItemsEqual(os.listdir('{path}/'.format(path=self.dst_path)), ['1.jpg', '2.jpg', 'folder_1'])
self.assertItemsEqual(os.listdir('{path}/folder_1/'.format(path=self.dst_path)), ['folder_2'])
# 2.4.4. Set different 'modified time' for image files.
modified_time = time.mktime(datetime.datetime(2012, 06, 14, 12, 00).timetuple())
os.utime('{path}/1.jpg'.format(path=self.dst_path), (modified_time, modified_time))
os.utime('{path}/2.jpg'.format(path=self.dst_path), (modified_time, modified_time))
# 2.4.5. Run 'simpholib'.
os.system('simpho ./{dst_path}/'.format(dst_path=self.dst_path))
# 2.4.6. Check new folder structure.
self.assertItemsEqual(os.listdir(self.dst_path), ['albums'])
self.assertItemsEqual(os.listdir('{path}/albums'.format(path=self.dst_path)), ['2012'])
# Check folder structure for first picture file.
self.assertItemsEqual(os.listdir('{path}/albums/{year}'.format(path=self.dst_path, year='2012')), ['06'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}'.format(path=self.dst_path,
year='2012',
month='06')), ['14'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}/{day}'.format(path=self.dst_path,
year='2012',
month='06',
day='14')), ['2012-06-14_0.jpg',
'2012-06-14_1.jpg'])
def test_two_files_in_a_folder(self):
"""
2.5. Two files in a folder.
2.5.1. Create an empty folder.
2.5.2. Create an image file in a folder.
2.5.3. Check that image file is created.
2.5.4. Set 'modified time' for that image file.
2.5.5. Run 'simpholib'.
2.5.6. Check new folder structure.
"""
# 2.5.1. Create an empty folder.
os.mkdir('{path}/folder'.format(path=self.dst_path))
# 2.5.2. Create two image files in a folder.
picture = open('{path}/folder/1.jpg'.format(path=self.dst_path), 'w')
picture.close()
picture = open('{path}/folder/2.jpg'.format(path=self.dst_path), 'w')
picture.close()
# 2.5.3. Check that image files and folder are created.
self.assertItemsEqual(os.listdir(self.dst_path), ['folder'])
self.assertItemsEqual(os.listdir('{path}/folder'.format(path=self.dst_path)), ['1.jpg', '2.jpg'])
# 2.5.4. Set 'modified time' for image files.
modified_time = time.mktime(datetime.datetime(2012, 06, 14, 12, 00).timetuple())
os.utime('{path}/folder/1.jpg'.format(path=self.dst_path), (modified_time, modified_time))
os.utime('{path}/folder/2.jpg'.format(path=self.dst_path), (modified_time, modified_time))
# 2.5.5. Run 'simpholib'.
os.system('simpho ./{dst_path}/'.format(dst_path=self.dst_path))
# 2.5.6. Check new folder structure.
self.assertItemsEqual(os.listdir(self.dst_path), ['albums'])
self.assertItemsEqual(os.listdir('{path}/albums'.format(path=self.dst_path)), ['2012'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}'.format(path=self.dst_path, year='2012')), ['06'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}'.format(path=self.dst_path,
year='2012',
month='06')), ['14'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}/{day}'.format(path=self.dst_path,
year='2012',
month='06',
day='14')), ['2012-06-14_0.jpg',
'2012-06-14_1.jpg'])
def test_two_files_in_a_folder_in_a_folder(self):
"""
2.6. Single file in a folder in a folder.
2.6.1. Create an empty folder in a folder.
2.6.2. Check that folders are created.
2.6.3. Create an image file in a folder.
2.6.4. Check that image file is created.
2.6.5. Set 'modified time' for that image file.
2.6.6. Run 'simpholib'.
2.6.7. Check new folder structure.
"""
# 2.6.1. Create an empty folder in a folder.
os.mkdir('{path}/folder_1/'.format(path=self.dst_path))
os.mkdir('{path}/folder_1/folder_2/'.format(path=self.dst_path))
# 2.6.2. Check that folders are created.
self.assertItemsEqual(os.listdir('{path}/'.format(path=self.dst_path)), ['folder_1'])
self.assertItemsEqual(os.listdir('{path}/folder_1/'.format(path=self.dst_path)), ['folder_2'])
# 2.6.3. Create an two image files in a folder.
picture = open('{path}/folder_1/folder_2/1.jpg'.format(path=self.dst_path), 'w')
picture.close()
picture = open('{path}/folder_1/folder_2/2.jpg'.format(path=self.dst_path), 'w')
picture.close()
# 2.6.4. Check that image file is created.
self.assertItemsEqual(os.listdir('{path}/folder_1/folder_2/'.format(path=self.dst_path)), ['1.jpg', '2.jpg'])
# 2.6.5. Set 'modified time' for that image file.
modified_time = time.mktime(datetime.datetime(2012, 06, 14, 12, 00).timetuple())
os.utime('{path}/folder_1/folder_2/1.jpg'.format(path=self.dst_path), (modified_time, modified_time))
os.utime('{path}/folder_1/folder_2/2.jpg'.format(path=self.dst_path), (modified_time, modified_time))
# 2.6.6. Run 'simpholib'.
os.system('simpho ./{dst_path}/'.format(dst_path=self.dst_path))
# 1.5.7. Check new folder structure.
self.assertItemsEqual(os.listdir(self.dst_path), ['albums'])
self.assertItemsEqual(os.listdir('{path}/albums'.format(path=self.dst_path)), ['2012'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}'.format(path=self.dst_path, year='2012')), ['06'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}'.format(path=self.dst_path,
year='2012',
month='06')), ['14'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}/{day}'.format(path=self.dst_path,
year='2012',
month='06',
day='14')), ['2012-06-14_0.jpg',
'2012-06-14_1.jpg'])
def test_a_file_and_a_file_in_a_folder(self):
"""
2.7. A file and a file in a folder.
2.7.1. Create an empty folder.
2.7.2. Check that folder is created.
2.7.3. Create an image file and and image file in a folder.
2.7.4. Check that image files are created.
2.7.5. Set 'modified time' for image file.
2.7.6. Run 'simpholib'.
2.7.7. Check new folder structure.
"""
# 2.7.1. Create an empty folder.
os.mkdir('{path}/folder'.format(path=self.dst_path))
# 2.7.2. Check that folder is created.
self.assertItemsEqual(os.listdir(self.dst_path), ['folder'])
# 2.7.3. Create two image files in a folder.
picture = open('{path}/1.jpg'.format(path=self.dst_path), 'w')
picture.close()
picture = open('{path}/folder/2.jpg'.format(path=self.dst_path), 'w')
picture.close()
# 2.7.4. Check that image files and folder are created.
self.assertItemsEqual(os.listdir('{path}/'.format(path=self.dst_path)), ['1.jpg', 'folder'])
self.assertItemsEqual(os.listdir('{path}/folder'.format(path=self.dst_path)), ['2.jpg'])
# 2.7.5. Set 'modified time' for image files.
modified_time = time.mktime(datetime.datetime(2012, 06, 14, 12, 00).timetuple())
os.utime('{path}/1.jpg'.format(path=self.dst_path), (modified_time, modified_time))
os.utime('{path}/folder/2.jpg'.format(path=self.dst_path), (modified_time, modified_time))
# 2.7.6. Run 'simpholib'.
os.system('simpho ./{dst_path}/'.format(dst_path=self.dst_path))
# 2.7.7. Check new folder structure.
self.assertItemsEqual(os.listdir(self.dst_path), ['albums'])
self.assertItemsEqual(os.listdir('{path}/albums'.format(path=self.dst_path)), ['2012'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}'.format(path=self.dst_path, year='2012')), ['06'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}'.format(path=self.dst_path,
year='2012',
month='06')), ['14'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}/{day}'.format(path=self.dst_path,
year='2012',
month='06',
day='14')), ['2012-06-14_0.jpg',
'2012-06-14_1.jpg'])
def test_a_file_in_a_foler_and_a_file_in_a_folder_in_a_folder(self):
"""
2.8. A file in a folder and a file in a folder in a folder.
2.8.1. Create an empty folder in a folder.
2.8.2. Check that folder is created.
2.8.3. Create an image file and and image file in a folder.
2.8.4. Check that image files are created.
2.8.5. Set 'modified time' for image file.
2.8.6. Run 'simpholib'.
2.8.7. Check new folder structure.
"""
# 2.8.1. Create an empty folder.
os.mkdir('{path}/folder_1'.format(path=self.dst_path))
os.mkdir('{path}/folder_1/folder_2'.format(path=self.dst_path))
# 2.8.2. Check that folder is created.
self.assertItemsEqual(os.listdir(self.dst_path), ['folder_1'])
self.assertItemsEqual(os.listdir('{path}/folder_1'.format(path=self.dst_path)), ['folder_2'])
# 2.8.3. Create an image file in a folder and another in a folder in a folder.
picture = open('{path}/folder_1/1.jpg'.format(path=self.dst_path), 'w')
picture.close()
picture = open('{path}/folder_1/folder_2/2.jpg'.format(path=self.dst_path), 'w')
picture.close()
# 2.8.4. Check that image files and folder are created.
self.assertItemsEqual(os.listdir('{path}/folder_1'.format(path=self.dst_path)), ['1.jpg', 'folder_2'])
self.assertItemsEqual(os.listdir('{path}/folder_1/folder_2'.format(path=self.dst_path)), ['2.jpg'])
# 2.8.5. Set 'modified time' for image files.
modified_time = time.mktime(datetime.datetime(2012, 06, 14, 12, 00).timetuple())
os.utime('{path}/folder_1/1.jpg'.format(path=self.dst_path), (modified_time, modified_time))
os.utime('{path}/folder_1/folder_2/2.jpg'.format(path=self.dst_path), (modified_time, modified_time))
# 2.8.6. Run 'simpholib'.
os.system('simpho ./{dst_path}/'.format(dst_path=self.dst_path))
# 2.8.7. Check new folder structure.
self.assertItemsEqual(os.listdir(self.dst_path), ['albums'])
self.assertItemsEqual(os.listdir('{path}/albums'.format(path=self.dst_path)), ['2012'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}'.format(path=self.dst_path, year='2012')), ['06'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}'.format(path=self.dst_path,
year='2012',
month='06')), ['14'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}/{day}'.format(path=self.dst_path,
year='2012',
month='06',
day='14')), ['2012-06-14_0.jpg',
'2012-06-14_1.jpg'])
def test_a_file_and_a_file_in_a_folder_in_a_folder(self):
"""
2.9. A file and a file in a folder in a folder.
2.9.1. Create an empty folder in a folder.
2.9.2. Check that folder is created.
2.9.3. Create an image file and and image file in a folder.
2.9.4. Check that image files are created.
2.9.5. Set 'modified time' for image file.
2.9.6. Run 'simpholib'.
2.9.7. Check new folder structure.
"""
# 2.9.1. Create an empty folder.
os.mkdir('{path}/folder_1'.format(path=self.dst_path))
os.mkdir('{path}/folder_1/folder_2'.format(path=self.dst_path))
# 2.9.2. Check that folder is created.
self.assertItemsEqual(os.listdir(self.dst_path), ['folder_1'])
self.assertItemsEqual(os.listdir('{path}/folder_1'.format(path=self.dst_path)), ['folder_2'])
# 2.9.3. Create an image file in a folder and another in a folder in a folder.
picture = open('{path}/1.jpg'.format(path=self.dst_path), 'w')
picture.close()
picture = open('{path}/folder_1/folder_2/2.jpg'.format(path=self.dst_path), 'w')
picture.close()
# 2.9.4. Check that image files and folder are created.
self.assertItemsEqual(os.listdir('{path}/'.format(path=self.dst_path)), ['1.jpg', 'folder_1'])
self.assertItemsEqual(os.listdir('{path}/folder_1/folder_2'.format(path=self.dst_path)), ['2.jpg'])
# 2.9.5. Set 'modified time' for image files.
modified_time = time.mktime(datetime.datetime(2012, 06, 14, 12, 00).timetuple())
os.utime('{path}/1.jpg'.format(path=self.dst_path), (modified_time, modified_time))
os.utime('{path}/folder_1/folder_2/2.jpg'.format(path=self.dst_path), (modified_time, modified_time))
# 2.9.6. Run 'simpholib'.
os.system('simpho ./{dst_path}/'.format(dst_path=self.dst_path))
# 2.9.7. Check new folder structure.
self.assertItemsEqual(os.listdir(self.dst_path), ['albums'])
self.assertItemsEqual(os.listdir('{path}/albums'.format(path=self.dst_path)), ['2012'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}'.format(path=self.dst_path, year='2012')), ['06'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}'.format(path=self.dst_path,
year='2012',
month='06')), ['14'])
self.assertItemsEqual(os.listdir('{path}/albums/{year}/{month}/{day}'.format(path=self.dst_path,
year='2012',
month='06',
day='14')), ['2012-06-14_0.jpg',
'2012-06-14_1.jpg'])
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ChatRoom'
db.create_table('foundry_chatroom', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=32, db_index=True)),
))
db.send_create_signal('foundry', ['ChatRoom'])
# Deleting field 'Listing.display_category'
db.delete_column('foundry_listing', 'display_category')
def backwards(self, orm):
# Deleting model 'ChatRoom'
db.delete_table('foundry_chatroom')
# Adding field 'Listing.display_category'
db.add_column('foundry_listing', 'display_category', self.gf('django.db.models.fields.BooleanField')(default=True), keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'category.category': {
'Meta': {'ordering': "('title',)", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'category.tag': {
'Meta': {'ordering': "('title',)", 'object_name': 'Tag'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'foundry.chatroom': {
'Meta': {'object_name': 'ChatRoom'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'foundry.column': {
'Meta': {'object_name': 'Column'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'row': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Row']"}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'default': '8'})
},
'foundry.country': {
'Meta': {'ordering': "('title',)", 'object_name': 'Country'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'minimum_age': ('django.db.models.fields.PositiveIntegerField', [], {'default': '18'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'foundry.defaultavatar': {
'Meta': {'object_name': 'DefaultAvatar'},
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'defaultavatar_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'foundry.foundrycomment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'FoundryComment', '_ormbases': ['comments.Comment']},
'comment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['comments.Comment']", 'unique': 'True', 'primary_key': 'True'}),
'in_reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.FoundryComment']", 'null': 'True', 'blank': 'True'})
},
'foundry.link': {
'Meta': {'object_name': 'Link'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'foundry.listing': {
'Meta': {'object_name': 'Listing'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['jmbo.ModelBase']", 'null': 'True', 'blank': 'True'}),
'count': ('django.db.models.fields.IntegerField', [], {}),
'display_likes': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'foundry.member': {
'Meta': {'object_name': 'Member', '_ormbases': ['auth.User']},
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'member_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'twitter_username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'foundry.menu': {
'Meta': {'object_name': 'Menu'},
'display_title': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'foundry.menulinkposition': {
'Meta': {'ordering': "('position',)", 'object_name': 'MenuLinkPosition'},
'condition_expression': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Link']"}),
'menu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Menu']"}),
'position': ('django.db.models.fields.IntegerField', [], {})
},
'foundry.navbar': {
'Meta': {'object_name': 'Navbar'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'foundry.navbarlinkposition': {
'Meta': {'ordering': "('position',)", 'object_name': 'NavbarLinkPosition'},
'condition_expression': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Link']"}),
'navbar': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Navbar']"}),
'position': ('django.db.models.fields.IntegerField', [], {})
},
'foundry.page': {
'Meta': {'object_name': 'Page'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'foundry.row': {
'Meta': {'object_name': 'Row'},
'block_name': ('django.db.models.fields.CharField', [], {'default': "'content'", 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Page']"})
},
'foundry.tile': {
'Meta': {'object_name': 'Tile'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'column': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Column']"}),
'condition_expression': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'enable_ajax': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tile_target_content_type'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'jmbo.modelbase': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ModelBase'},
'anonymous_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'anonymous_likes': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'comments_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comments_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modelbase_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'likes_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'likes_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'primary_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'primary_modelbase_set'", 'null': 'True', 'to': "orm['category.Category']"}),
'publish_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publishers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['publisher.Publisher']", 'null': 'True', 'blank': 'True'}),
'retract_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'unpublished'", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Tag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'photologue.photoeffect': {
'Meta': {'object_name': 'PhotoEffect'},
'background_color': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '7'}),
'brightness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'color': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'contrast': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filters': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'reflection_size': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reflection_strength': ('django.db.models.fields.FloatField', [], {'default': '0.59999999999999998'}),
'sharpness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'transpose_method': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
'publisher.publisher': {
'Meta': {'object_name': 'Publisher'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'secretballot.vote': {
'Meta': {'unique_together': "(('token', 'content_type', 'object_id'),)", 'object_name': 'Vote'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['foundry']
|
|
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
from dataclasses import dataclass
from pathlib import Path
from typing import Any, ClassVar, Iterable, List, Optional, Tuple, Type, cast
from pants.base.build_environment import get_buildroot
from pants.base.build_root import BuildRoot
from pants.base.exiter import PANTS_SUCCEEDED_EXIT_CODE
from pants.base.specs import Specs
from pants.build_graph.build_configuration import BuildConfiguration
from pants.engine import desktop, environment, fs, platform, process
from pants.engine.console import Console
from pants.engine.environment import CompleteEnvironment
from pants.engine.fs import PathGlobs, Snapshot, Workspace
from pants.engine.goal import Goal
from pants.engine.internals import build_files, graph, options_parsing
from pants.engine.internals.native_engine import PyExecutor, PySessionCancellationLatch
from pants.engine.internals.parser import Parser
from pants.engine.internals.scheduler import Scheduler, SchedulerSession
from pants.engine.internals.selectors import Params
from pants.engine.internals.session import SessionValues
from pants.engine.process import InteractiveRunner
from pants.engine.rules import QueryRule, collect_rules, rule
from pants.engine.streaming_workunit_handler import rules as streaming_workunit_handler_rules
from pants.engine.target import RegisteredTargetTypes
from pants.engine.unions import UnionMembership
from pants.init import specs_calculator
from pants.option.global_options import DEFAULT_EXECUTION_OPTIONS, ExecutionOptions, GlobalOptions
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.option.subsystem import Subsystem
from pants.util.ordered_set import FrozenOrderedSet
from pants.vcs.changed import rules as changed_rules
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class GraphScheduler:
"""A thin wrapper around a Scheduler configured with @rules."""
scheduler: Scheduler
goal_map: Any
def new_session(
self,
build_id,
dynamic_ui: bool = False,
use_colors=True,
session_values: Optional[SessionValues] = None,
cancellation_latch: Optional[PySessionCancellationLatch] = None,
) -> GraphSession:
session = self.scheduler.new_session(
build_id,
dynamic_ui,
session_values=session_values,
cancellation_latch=cancellation_latch,
)
console = Console(use_colors=use_colors, session=session if dynamic_ui else None)
return GraphSession(session, console, self.goal_map)
@dataclass(frozen=True)
class GraphSession:
"""A thin wrapper around a SchedulerSession configured with @rules."""
scheduler_session: SchedulerSession
console: Console
goal_map: Any
# NB: Keep this in sync with the method `run_goal_rules`.
goal_param_types: ClassVar[Tuple[Type, ...]] = (Specs, Console, InteractiveRunner, Workspace)
def goal_consumed_subsystem_scopes(self, goal_name: str) -> Tuple[str, ...]:
"""Return the scopes of subsystems that could be consumed while running the given goal."""
goal_product = self.goal_map.get(goal_name)
if not goal_product:
return tuple()
consumed_types = self.goal_consumed_types(goal_product)
return tuple(
sorted({typ.options_scope for typ in consumed_types if issubclass(typ, Subsystem)}) # type: ignore[misc]
)
def goal_consumed_types(self, goal_product: type) -> set[type]:
"""Return the set of types that could possibly be consumed while running the given goal."""
return set(
self.scheduler_session.scheduler.rule_graph_consumed_types(
self.goal_param_types, goal_product
)
)
def run_goal_rules(
self,
*,
union_membership: UnionMembership,
goals: Iterable[str],
specs: Specs,
poll: bool = False,
poll_delay: Optional[float] = None,
) -> int:
"""Runs @goal_rules sequentially and interactively by requesting their implicit Goal
products.
For retryable failures, raises scheduler.ExecutionError.
:returns: An exit code.
"""
workspace = Workspace(self.scheduler_session)
interactive_runner = InteractiveRunner(self.scheduler_session)
for goal in goals:
goal_product = self.goal_map[goal]
# NB: We no-op for goals that have no implementation because no relevant backends are
# registered. We might want to reconsider the behavior to instead warn or error when
# trying to run something like `./pants run` without any backends registered.
is_implemented = union_membership.has_members_for_all(
goal_product.subsystem_cls.required_union_implementations
)
if not is_implemented:
continue
# NB: Keep this in sync with the property `goal_param_types`.
params = Params(specs, self.console, workspace, interactive_runner)
logger.debug(f"requesting {goal_product} to satisfy execution of `{goal}` goal")
try:
exit_code = self.scheduler_session.run_goal_rule(
goal_product, params, poll=poll, poll_delay=poll_delay
)
finally:
self.console.flush()
if exit_code != PANTS_SUCCEEDED_EXIT_CODE:
return exit_code
return PANTS_SUCCEEDED_EXIT_CODE
class EngineInitializer:
"""Constructs the components necessary to run the engine."""
class GoalMappingError(Exception):
"""Raised when a goal cannot be mapped to an @rule."""
@staticmethod
def _make_goal_map_from_rules(rules):
goal_map = {}
for r in rules:
output_type = getattr(r, "output_type", None)
if not output_type or not issubclass(output_type, Goal):
continue
goal = r.output_type.name
if goal in goal_map:
raise EngineInitializer.GoalMappingError(
f"could not map goal `{goal}` to rule `{r}`: already claimed by product "
f"`{goal_map[goal]}`"
)
goal_map[goal] = r.output_type
return goal_map
@staticmethod
def setup_graph(
options_bootstrapper: OptionsBootstrapper,
build_configuration: BuildConfiguration,
env: CompleteEnvironment,
executor: Optional[PyExecutor] = None,
local_only: bool = False,
) -> GraphScheduler:
build_root = get_buildroot()
bootstrap_options = options_bootstrapper.bootstrap_options.for_global_scope()
options = options_bootstrapper.full_options(build_configuration)
assert bootstrap_options is not None
executor = executor or GlobalOptions.create_py_executor(bootstrap_options)
execution_options = ExecutionOptions.from_options(options, env, local_only=local_only)
return EngineInitializer.setup_graph_extended(
build_configuration,
execution_options,
executor=executor,
pants_ignore_patterns=GlobalOptions.compute_pants_ignore(build_root, bootstrap_options),
use_gitignore=bootstrap_options.pants_ignore_use_gitignore,
local_store_dir=bootstrap_options.local_store_dir,
local_execution_root_dir=bootstrap_options.local_execution_root_dir,
named_caches_dir=bootstrap_options.named_caches_dir,
ca_certs_path=bootstrap_options.ca_certs_path,
build_root=build_root,
include_trace_on_error=bootstrap_options.print_stacktrace,
native_engine_visualize_to=bootstrap_options.native_engine_visualize_to,
)
@staticmethod
def setup_graph_extended(
build_configuration: BuildConfiguration,
execution_options: ExecutionOptions,
*,
executor: PyExecutor,
pants_ignore_patterns: List[str],
use_gitignore: bool,
local_store_dir: str,
local_execution_root_dir: str,
named_caches_dir: str,
ca_certs_path: Optional[str] = None,
build_root: Optional[str] = None,
include_trace_on_error: bool = True,
native_engine_visualize_to: Optional[str] = None,
) -> GraphScheduler:
build_root = build_root or get_buildroot()
rules = build_configuration.rules
union_membership = UnionMembership.from_rules(build_configuration.union_rules)
registered_target_types = RegisteredTargetTypes.create(build_configuration.target_types)
execution_options = execution_options or DEFAULT_EXECUTION_OPTIONS
@rule
def parser_singleton() -> Parser:
return Parser(
target_type_aliases=registered_target_types.aliases,
object_aliases=build_configuration.registered_aliases,
)
@rule
def build_configuration_singleton() -> BuildConfiguration:
return build_configuration
@rule
def registered_target_types_singleton() -> RegisteredTargetTypes:
return registered_target_types
@rule
def union_membership_singleton() -> UnionMembership:
return union_membership
@rule
def build_root_singleton() -> BuildRoot:
return cast(BuildRoot, BuildRoot.instance)
# Create a Scheduler containing graph and filesystem rules, with no installed goals.
rules = FrozenOrderedSet(
(
*collect_rules(locals()),
*build_files.rules(),
*fs.rules(),
*environment.rules(),
*desktop.rules(),
*graph.rules(),
*options_parsing.rules(),
*process.rules(),
*platform.rules(),
*changed_rules(),
*streaming_workunit_handler_rules(),
*specs_calculator.rules(),
*rules,
)
)
goal_map = EngineInitializer._make_goal_map_from_rules(rules)
rules = FrozenOrderedSet(
(
*rules,
# Install queries for each Goal.
*(
QueryRule(goal_type, GraphSession.goal_param_types)
for goal_type in goal_map.values()
),
QueryRule(Snapshot, [PathGlobs]), # Used by the SchedulerService.
)
)
def ensure_absolute_path(v: str) -> str:
return Path(v).resolve().as_posix()
def ensure_optional_absolute_path(v: Optional[str]) -> Optional[str]:
if v is None:
return None
return ensure_absolute_path(v)
scheduler = Scheduler(
ignore_patterns=pants_ignore_patterns,
use_gitignore=use_gitignore,
build_root=build_root,
local_store_dir=ensure_absolute_path(local_store_dir),
local_execution_root_dir=ensure_absolute_path(local_execution_root_dir),
named_caches_dir=ensure_absolute_path(named_caches_dir),
ca_certs_path=ensure_optional_absolute_path(ca_certs_path),
rules=rules,
union_membership=union_membership,
executor=executor,
execution_options=execution_options,
include_trace_on_error=include_trace_on_error,
visualize_to_dir=native_engine_visualize_to,
)
return GraphScheduler(scheduler, goal_map)
|
|
import asyncio
import platform
import sys
from datetime import datetime
import discord
from bot import Language, Manager, constants
from bot.lib.guild_configuration import GuildConfiguration
from bot.database import BotDatabase
from bot.lib.configuration import BotConfiguration
from bot.logger import new_logger
from bot.utils import auto_int
log = new_logger('Core')
class AlexisBot(discord.Client):
__author__ = 'makzk (github.com/jkcgs)'
__license__ = 'MIT'
__version__ = constants.BOT_VERSION
name = 'AlexisBot'
def __init__(self, **options):
"""
Initializes configuration, logging, an aiohttp session and class attributes.
:param options: The discord.Client options
"""
intents = discord.Intents.default()
intents.members = True
super().__init__(**options, intents=intents)
self.db = None
self.initialized = False
self.start_time = datetime.now()
self.connect_delta = None
self.lang = {}
self.deleted_messages = []
self.deleted_messages_nolog = []
self.manager = Manager(self)
self.config = None
self.loop = asyncio.get_event_loop()
# Dinamically create and override event handler methods
from bot.constants import EVENT_HANDLERS
for method, margs in EVENT_HANDLERS.items():
def make_handler(event_name, event_args):
async def dispatch(*args):
kwargs = dict(zip(event_args, args))
await self.manager.dispatch(event_name=event_name, **kwargs)
return dispatch
event = 'on_' + method
setattr(self, event, make_handler(event, margs.copy()))
def init(self):
"""
Loads configuration, connects to database, and then connects to Discord.
"""
log.info('%s v%s, discord.py v%s', AlexisBot.name, AlexisBot.__version__, discord.__version__)
log.info('Python %s in %s.', sys.version.replace('\n', ''), sys.platform)
log.info(platform.uname())
log.info('Bot root path: %s', constants.bot_root)
log.info('------')
# Load configuration
self.load_config()
if self.config.get('token', '') == '':
raise RuntimeError('Discord bot token not defined. It should be in config.yml file.')
# Load languages
self.load_language()
# Load database
log.info('Connecting to the database...')
self.db = BotDatabase.initialize()
log.info('Successfully conected to database using %s', self.db.__class__.__name__)
# Load command classes and instances from bots.modules
log.info('Loading commands...')
self.manager.load_instances()
self.manager.dispatch_sync('on_loaded', force=True)
# Connect to Discord
try:
self.start_time = datetime.now()
log.info('Connecting to Discord...')
self.loop.run_until_complete(self.start(self.config['token']))
except discord.errors.LoginFailure:
log.error('Invalid Discord token!')
raise
except KeyboardInterrupt:
self.loop.run_until_complete(self.close())
log.warning('Keyboard interrupt!')
finally:
self.loop.close()
async def on_ready(self):
""" This is executed once the bot has successfully connected to Discord. """
self.connect_delta = (datetime.now() - self.start_time).total_seconds()
log.info('Connected as "%s" (%s)', self.user.name, self.user.id)
log.info('It took %.3f seconds to connect.', self.connect_delta)
log.info('------')
self.initialized = True
self.manager.create_tasks()
await self.manager.dispatch('on_ready')
def load_config(self):
"""
Loads static configuration
:return: A boolean depending on the operation's result.
"""
try:
log.info('Loading configuration...')
self.config = BotConfiguration.get_instance()
log.info('Configuration loaded')
return True
except Exception as ex:
log.exception(ex)
return False
def load_language(self):
"""
Loads language content
:return: A boolean depending on the operation's result.
"""
try:
log.info('Loading language stuff...')
self.lang = Language('lang', default=self.config['default_lang'], autoload=True)
log.info('Loaded languages: %s, default: %s', list(self.lang.lib.keys()), self.config['default_lang'])
return True
except Exception as ex:
log.exception(ex)
return False
async def close(self):
"""
Stops tasks, close connections and logout from Discord.
:return:
"""
log.debug('Closing stuff...')
await super().close()
# Close everything http related
self.manager.close_http()
# Stop tasks
self.manager.cancel_tasks()
async def send_modlog(self, guild: discord.Guild, message=None, embed: discord.Embed = None,
locales=None, logtype=None):
"""
Sends a message to the modlog channel of a guild, if modlog channel is set, and if the
logtype is enabled.
:param guild: The guild to send the modlog message.
:param message: The message content.
:param embed: An embed for the message.
:param locales: Locale variables for language messages.
:param logtype: The modlog type of the message. Guilds can disable individual modlog types.
"""
config = GuildConfiguration.get_instance(guild)
chanid = config.get('join_send_channel')
if chanid == '':
return
if logtype and logtype in config.get_list('logtype_disabled'):
return
chan = self.get_channel(auto_int(chanid))
if chan is None:
return
await self.send_message(chan, content=message, embed=embed, locales=locales)
async def send_message(self, destination, content='', **kwargs):
"""
Method that proxies all messages sent to Discord, to fire other calls
like event handlers, message filters and bot logging. Allows original method's parameters.
:param destination: Where to send the message, must be a discord.abc.Messageable compatible instance.
:param content: The content of the message to send.
:return: The message sent
"""
kwargs['content'] = content
if not isinstance(destination, discord.abc.Messageable):
raise RuntimeError('destination must be a discord.abc.Messageable compatible instance')
# Call pre_send_message handlers, append destination
self.manager.dispatch_ref('pre_send_message', kwargs)
# Log the message
if isinstance(destination, discord.TextChannel):
destination_repr = '{}#{} (IDS {}#{})'.format(
destination.guild, str(destination), destination.id, destination.guild.id)
else:
destination_repr = str(destination)
msg = 'Sending message "{}" to {} '.format(kwargs['content'], destination_repr)
if isinstance(kwargs.get('embed', None), discord.Embed):
msg += ' (with embed: {})'.format(kwargs.get('embed').to_dict())
log.debug(msg)
# Send the actual message
if 'locales' in kwargs:
del kwargs['locales']
if 'event' in kwargs:
del kwargs['event']
return await destination.send(**kwargs)
async def delete_message(self, message, silent=False):
"""
Deletes a message and registers the last 50 messages' IDs.
:param message: The message to delete
:param silent: Add the message to the no-log list
"""
if not isinstance(message, discord.Message):
raise RuntimeError('message must be a discord.Message instance')
self.deleted_messages.append(message.id)
if silent:
self.deleted_messages_nolog.append(message.id)
try:
await message.delete()
except discord.Forbidden as e:
del self.deleted_messages[-1]
if silent:
del self.deleted_messages_nolog[-1]
raise e
if len(self.deleted_messages) > 50:
del self.deleted_messages[0]
if len(self.deleted_messages_nolog) > 50:
del self.deleted_messages_nolog[0]
@property
def uptime(self):
return datetime.now() - self.start_time
|
|
def load_multiclass_scores(self):
# convert simple scores to multiclass scores
import vtool_ibeis as vt
self.multiclass_scores = {}
for key in self.samples.simple_scores.keys():
scores = self.samples.simple_scores[key].values
# Hack scores into the range 0 to 1
normer = vt.ScoreNormalizer(adjust=8, monotonize=True)
normer.fit(scores, y=self.samples.is_same())
normed_scores = normer.normalize_scores(scores)
# Create a dimension for each class
# but only populate two of the dimensions
class_idxs = ut.take(self.samples.text_to_class, ['nomatch', 'match'])
pred = np.zeros((len(scores), len(self.samples.class_names)))
pred[:, class_idxs[0]] = 1 - normed_scores
pred[:, class_idxs[1]] = normed_scores
self.multiclass_scores[key] = pred
def photobombing_subset():
"""
CommandLine:
python -m ibeis.scripts.script_vsone photobombing_subset
"""
import ibeis
# pair_sample = ut.odict([
# ('top_gt', 4), ('mid_gt', 2), ('bot_gt', 2), ('rand_gt', 2),
# ('top_gf', 3), ('mid_gf', 2), ('bot_gf', 1), ('rand_gf', 2),
# ])
qreq_ = ibeis.testdata_qreq_(
defaultdb='PZ_Master1',
a=':mingt=2,species=primary',
# t='default:K=4,Knorm=1,score_method=csum,prescore_method=csum',
t='default:K=4,Knorm=1,score_method=csum,prescore_method=csum,QRH=True',
)
ibs = qreq_.ibs
# cm_list = qreq_.execute()
# infr = ibeis.AnnotInference.from_qreq_(qreq_, cm_list, autoinit=True)
# aid_pairs_ = infr._cm_training_pairs(rng=np.random.RandomState(42),
# **pair_sample)
# # ut.dict_hist(ut.flatten(am_tags))
# am_rowids = ibs._get_all_annotmatch_rowids()
# am_tags = ibs.get_annotmatch_case_tags(am_rowids)
# am_flags = ut.filterflags_general_tags(am_tags, has_any=['photobomb'])
# am_rowids_ = ut.compress(am_rowids, am_flags)
# aids1 = ibs.get_annotmatch_aid1(am_rowids_)
# aids2 = ibs.get_annotmatch_aid2(am_rowids_)
# pb_aids_pairs = list(zip(aids1, aids2))
# # aids = unique_pb_aids = ut.unique(ut.flatten(pb_aids_pairs))
# # ut.compress(unique_pb_aids, ibs.is_aid_unknown(unique_pb_aids))
# assert len(pb_aids_pairs) > 0
# # Keep only a random subset
# subset_idxs = list(range(len(aid_pairs_)))
# rng = np.random.RandomState(3104855634)
# num_max = len(pb_aids_pairs)
# if num_max < len(subset_idxs):
# subset_idxs = rng.choice(subset_idxs, size=num_max, replace=False)
# subset_idxs = sorted(subset_idxs)
# aid_pairs_ = ut.take(aid_pairs_, subset_idxs)
# aid_pairs_ += pb_aids_pairs
# unique_aids = ut.unique(ut.flatten(aid_pairs_))
# a1 = ibs.filter_annots_general(unique_aids, is_known=True, verbose=True, min_pername=2, has_none=['photobomb'])
# a2 = ibs.filter_annots_general(unique_aids, has_any=['photobomb'], verbose=True, is_known=True)
# a = sorted(set(a1 + a2))
# ibs.print_annot_stats(a)
# len(a)
a = [8, 27, 30, 86, 87, 90, 92, 94, 99, 103, 104, 106, 111, 217, 218, 242,
298, 424, 425, 456, 464, 465, 472, 482, 529, 559, 574, 585, 588, 592,
598, 599, 601, 617, 630, 645, 661, 664, 667, 694, 723, 724, 759, 768,
843, 846, 861, 862, 866, 933, 934, 980, 987, 1000, 1003, 1005, 1011,
1017, 1020, 1027, 1059, 1074, 1076, 1080, 1095, 1096, 1107, 1108,
1192, 1203, 1206, 1208, 1220, 1222, 1223, 1224, 1256, 1278, 1293,
1294, 1295, 1296, 1454, 1456, 1474, 1484, 1498, 1520, 1521, 1548,
1563, 1576, 1593, 1669, 1675, 1680, 1699, 1748, 1751, 1811, 1813,
1821, 1839, 1927, 1934, 1938, 1952, 1992, 2003, 2038, 2054, 2066,
2080, 2103, 2111, 2170, 2171, 2175, 2192, 2216, 2227, 2240, 2250,
2253, 2266, 2272, 2288, 2292, 2314, 2329, 2341, 2344, 2378, 2397,
2417, 2429, 2444, 2451, 2507, 2551, 2552, 2553, 2581, 2628, 2640,
2642, 2646, 2654, 2667, 2686, 2733, 2743, 2750, 2759, 2803, 2927,
3008, 3054, 3077, 3082, 3185, 3205, 3284, 3306, 3334, 3370, 3386,
3390, 3393, 3401, 3448, 3508, 3542, 3597, 3614, 3680, 3684, 3695,
3707, 3727, 3758, 3765, 3790, 3812, 3813, 3818, 3858, 3860, 3874,
3875, 3887, 3892, 3915, 3918, 3924, 3927, 3929, 3933, 3941, 3952,
3955, 3956, 3959, 4004, 4059, 4073, 4076, 4089, 4094, 4124, 4126,
4128, 4182, 4189, 4217, 4222, 4229, 4257, 4266, 4268, 4288, 4289,
4296, 4306, 4339, 4353, 4376, 4403, 4428, 4455, 4487, 4494, 4515,
4517, 4524, 4541, 4544, 4556, 4580, 4585, 4597, 4604, 4629, 4639,
4668, 4671, 4672, 4675, 4686, 4688, 4693, 4716, 4730, 4731, 4749,
4772, 4803, 4820, 4823, 4832, 4833, 4836, 4900, 4902, 4909, 4924,
4936, 4938, 4939, 4944, 5004, 5006, 5034, 5043, 5044, 5055, 5064,
5072, 5115, 5131, 5150, 5159, 5165, 5167, 5168, 5174, 5218, 5235,
5245, 5249, 5309, 5319, 5334, 5339, 5344, 5347, 5378, 5379, 5384,
5430, 5447, 5466, 5509, 5546, 5587, 5588, 5621, 5640, 5663, 5676,
5682, 5685, 5687, 5690, 5707, 5717, 5726, 5732, 5733, 5791, 5830,
5863, 5864, 5869, 5870, 5877, 5879, 5905, 5950, 6008, 6110, 6134,
6160, 6167, 6234, 6238, 6265, 6344, 6345, 6367, 6384, 6386, 6437,
6495, 6533, 6538, 6569, 6587, 6626, 6634, 6643, 6659, 6661, 6689,
6714, 6725, 6739, 6754, 6757, 6759, 6763, 6781, 6830, 6841, 6843,
6893, 6897, 6913, 6930, 6932, 6936, 6944, 6976, 7003, 7022, 7037,
7052, 7058, 7074, 7103, 7107, 7108, 7113, 7143, 7183, 7185, 7187,
7198, 7200, 7202, 7207, 7222, 7275, 7285, 7388, 7413, 7421, 7425,
7429, 7445, 7487, 7507, 7508, 7528, 7615, 7655, 7696, 7762, 7786,
7787, 7796, 7797, 7801, 7807, 7808, 7809, 7826, 7834, 7835, 7852,
7861, 7874, 7881, 7901, 7902, 7905, 7913, 7918, 7941, 7945, 7990,
7999, 8007, 8009, 8017, 8018, 8019, 8034, 8041, 8057, 8058, 8079,
8080, 8086, 8089, 8092, 8094, 8100, 8105, 8109, 8147, 8149, 8153,
8221, 8264, 8302, 8303, 8331, 8366, 8367, 8370, 8376, 8474, 8501,
8504, 8506, 8507, 8514, 8531, 8532, 8534, 8538, 8563, 8564, 8587,
8604, 8608, 8751, 8771, 8792, 9175, 9204, 9589, 9726, 9841, 10674,
12122, 12305, 12796, 12944, 12947, 12963, 12966, 13098, 13099, 13101,
13103, 13109, 13147, 13157, 13168, 13194, 13236, 13253, 13255, 13410,
13450, 13474, 13477, 13481, 13508, 13630, 13670, 13727, 13741, 13819,
13820, 13908, 13912, 13968, 13979, 14007, 14009, 14010, 14019, 14066,
14067, 14072, 14074, 14148, 14153, 14224, 14230, 14237, 14239, 14241,
14274, 14277, 14290, 14293, 14308, 14309, 14313, 14319, 14668, 14670,
14776, 14918, 14920, 14924, 15135, 15157, 15318, 15319, 15490, 15518,
15531, 15777, 15903, 15913, 16004, 16012, 16013, 16014, 16020, 16215,
16221, 16235, 16240, 16259, 16273, 16279, 16284, 16289, 16316, 16322,
16329, 16336, 16364, 16389, 16706, 16897, 16898, 16903, 16949, 17094,
17101, 17137, 17200, 17222, 17290, 17327, 17336]
from ibeis.dbio import export_subset
export_subset.export_annots(ibs, a, 'PZ_PB_RF_TRAIN')
# closed_aids = ibs.annots(unique_aids).get_name_image_closure()
# annots = ibs.annots(unique_aids)
# closed_gt_aids = ut.unique(ut.flatten(ibs.get_annot_groundtruth(unique_aids)))
# closed_gt_aids = ut.unique(ut.flatten(ibs.get_annot_groundtruth(unique_aids)))
# closed_img_aids = ut.unique(ut.flatten(ibs.get_annot_otherimage_aids(unique_aids)))
# ibs.print_annot_stats(unique_aids)
# all_annots = ibs.annots()
def bigcache_vsone(qreq_, hyper_params):
"""
Cached output of one-vs-one matches
>>> from ibeis.scripts.script_vsone import * # NOQA
>>> self = OneVsOneProblem()
>>> qreq_ = self.qreq_
>>> hyper_params = self.hyper_params
"""
import vtool_ibeis as vt
import ibeis
# Get a set of training pairs
ibs = qreq_.ibs
cm_list = qreq_.execute()
infr = ibeis.AnnotInference.from_qreq_(qreq_, cm_list, autoinit=True)
# Per query choose a set of correct, incorrect, and random training pairs
aid_pairs_ = infr._cm_training_pairs(rng=np.random.RandomState(42),
**hyper_params.pair_sample)
aid_pairs_ = vt.unique_rows(np.array(aid_pairs_), directed=False).tolist()
pb_aid_pairs_ = photobomb_samples(ibs)
# TODO: try to add in more non-comparable samples
aid_pairs_ = pb_aid_pairs_ + aid_pairs_
aid_pairs_ = vt.unique_rows(np.array(aid_pairs_))
# ======================================
# Compute one-vs-one scores and local_measures
# ======================================
# Prepare lazy attributes for annotations
qreq_ = infr.qreq_
ibs = qreq_.ibs
qconfig2_ = qreq_.extern_query_config2
dconfig2_ = qreq_.extern_data_config2
qannot_cfg = ibs.depc.stacked_config(None, 'featweight', qconfig2_)
dannot_cfg = ibs.depc.stacked_config(None, 'featweight', dconfig2_)
# Remove any pairs missing features
if dannot_cfg == qannot_cfg:
unique_annots = ibs.annots(np.unique(np.array(aid_pairs_)),
config=dannot_cfg)
bad_aids = unique_annots.compress(~np.array(unique_annots.num_feats) >
0).aids
bad_aids = set(bad_aids)
else:
annots1_ = ibs.annots(ut.unique(ut.take_column(aid_pairs_, 0)),
config=qannot_cfg)
annots2_ = ibs.annots(ut.unique(ut.take_column(aid_pairs_, 1)),
config=dannot_cfg)
bad_aids1 = annots1_.compress(~np.array(annots1_.num_feats) > 0).aids
bad_aids2 = annots2_.compress(~np.array(annots2_.num_feats) > 0).aids
bad_aids = set(bad_aids1 + bad_aids2)
subset_idxs = np.where([not (a1 in bad_aids or a2 in bad_aids)
for a1, a2 in aid_pairs_])[0]
# Keep only a random subset
if hyper_params.subsample:
rng = np.random.RandomState(3104855634)
num_max = hyper_params.subsample
if num_max < len(subset_idxs):
subset_idxs = rng.choice(subset_idxs, size=num_max, replace=False)
subset_idxs = sorted(subset_idxs)
# Take the current selection
aid_pairs = ut.take(aid_pairs_, subset_idxs)
if True:
# NEW WAY
config = hyper_params.vsone_assign
# TODO: ensure annot probs like chips and features can be appropriately
# set via qreq_ config or whatever
matches = infr.exec_vsone_subset(aid_pairs, config=config)
else:
query_aids = ut.take_column(aid_pairs, 0)
data_aids = ut.take_column(aid_pairs, 1)
# OLD WAY
# Determine a unique set of annots per config
configured_aids = ut.ddict(set)
configured_aids[qannot_cfg].update(query_aids)
configured_aids[dannot_cfg].update(data_aids)
# Make efficient annot-object representation
configured_obj_annots = {}
for config, aids in configured_aids.items():
annots = ibs.annots(sorted(list(aids)), config=config)
configured_obj_annots[config] = annots
annots1 = configured_obj_annots[qannot_cfg].loc(query_aids)
annots2 = configured_obj_annots[dannot_cfg].loc(data_aids)
# Get hash based on visual annotation appearence of each pair
# as well as algorithm configurations used to compute those properties
qvuuids = annots1.visual_uuids
dvuuids = annots2.visual_uuids
qcfgstr = annots1._config.get_cfgstr()
dcfgstr = annots2._config.get_cfgstr()
annots_cfgstr = ut.hashstr27(qcfgstr) + ut.hashstr27(dcfgstr)
vsone_uuids = [
ut.combine_uuids(uuids, salt=annots_cfgstr)
for uuids in ut.ProgIter(zip(qvuuids, dvuuids), length=len(qvuuids),
label='hashing ids')
]
# Combine into a big cache for the entire 1-v-1 matching run
big_uuid = ut.hashstr_arr27(vsone_uuids, '', pathsafe=True)
cacher = ut.Cacher('vsone_v7', cfgstr=str(big_uuid), appname='vsone_rf_train')
cached_data = cacher.tryload()
if cached_data is not None:
# Caching doesn't work 100% for PairwiseMatch object, so we need to do
# some postprocessing
configured_lazy_annots = ut.ddict(dict)
for config, annots in configured_obj_annots.items():
annot_dict = configured_lazy_annots[config]
for _annot in ut.ProgIter(annots.scalars(), label='make lazy dict'):
annot_dict[_annot.aid] = _annot._make_lazy_dict()
# Extract pairs of annot objects (with shared caches)
lazy_annots1 = ut.take(configured_lazy_annots[qannot_cfg], query_aids)
lazy_annots2 = ut.take(configured_lazy_annots[dannot_cfg], data_aids)
# Create a set of PairwiseMatches with the correct annot properties
matches = [vt.PairwiseMatch(annot1, annot2)
for annot1, annot2 in zip(lazy_annots1, lazy_annots2)]
# Updating a new matches dictionary ensure the annot1/annot2 properties
# are set correctly
for key, cached_matches in list(cached_data.items()):
fixed_matches = [match.copy() for match in matches]
for fixed, internal in zip(fixed_matches, cached_matches):
dict_ = internal.__dict__
ut.delete_dict_keys(dict_, ['annot1', 'annot2'])
fixed.__dict__.update(dict_)
cached_data[key] = fixed_matches
else:
cached_data = vsone_(qreq_, query_aids, data_aids, qannot_cfg,
dannot_cfg, configured_obj_annots, hyper_params)
cacher.save(cached_data)
# key_ = 'SV_LNBNN'
key_ = 'RAT_SV'
# for key in list(cached_data.keys()):
# if key != 'SV_LNBNN':
# del cached_data[key]
matches = cached_data[key_]
return matches, infr
def bigcache_vsone(qreq_, hyper_params):
"""
Cached output of one-vs-one matches
>>> from ibeis.scripts.script_vsone import * # NOQA
>>> self = OneVsOneProblem()
>>> qreq_ = self.qreq_
>>> hyper_params = self.hyper_params
"""
import vtool_ibeis as vt
import ibeis
# Get a set of training pairs
ibs = qreq_.ibs
cm_list = qreq_.execute()
infr = ibeis.AnnotInference.from_qreq_(qreq_, cm_list, autoinit=True)
# Per query choose a set of correct, incorrect, and random training pairs
aid_pairs_ = infr._cm_training_pairs(rng=np.random.RandomState(42),
**hyper_params.pair_sample)
aid_pairs_ = vt.unique_rows(np.array(aid_pairs_), directed=False).tolist()
pb_aid_pairs_ = photobomb_samples(ibs)
# TODO: try to add in more non-comparable samples
aid_pairs_ = pb_aid_pairs_ + aid_pairs_
aid_pairs_ = vt.unique_rows(np.array(aid_pairs_))
# ======================================
# Compute one-vs-one scores and local_measures
# ======================================
# Prepare lazy attributes for annotations
qreq_ = infr.qreq_
ibs = qreq_.ibs
qconfig2_ = qreq_.extern_query_config2
dconfig2_ = qreq_.extern_data_config2
qannot_cfg = ibs.depc.stacked_config(None, 'featweight', qconfig2_)
dannot_cfg = ibs.depc.stacked_config(None, 'featweight', dconfig2_)
# Remove any pairs missing features
if dannot_cfg == qannot_cfg:
unique_annots = ibs.annots(np.unique(np.array(aid_pairs_)),
config=dannot_cfg)
bad_aids = unique_annots.compress(~np.array(unique_annots.num_feats) >
0).aids
bad_aids = set(bad_aids)
else:
annots1_ = ibs.annots(ut.unique(ut.take_column(aid_pairs_, 0)),
config=qannot_cfg)
annots2_ = ibs.annots(ut.unique(ut.take_column(aid_pairs_, 1)),
config=dannot_cfg)
bad_aids1 = annots1_.compress(~np.array(annots1_.num_feats) > 0).aids
bad_aids2 = annots2_.compress(~np.array(annots2_.num_feats) > 0).aids
bad_aids = set(bad_aids1 + bad_aids2)
subset_idxs = np.where([not (a1 in bad_aids or a2 in bad_aids)
for a1, a2 in aid_pairs_])[0]
# Keep only a random subset
if hyper_params.subsample:
rng = np.random.RandomState(3104855634)
num_max = hyper_params.subsample
if num_max < len(subset_idxs):
subset_idxs = rng.choice(subset_idxs, size=num_max, replace=False)
subset_idxs = sorted(subset_idxs)
# Take the current selection
aid_pairs = ut.take(aid_pairs_, subset_idxs)
if True:
# NEW WAY
config = hyper_params.vsone_assign
# TODO: ensure annot probs like chips and features can be appropriately
# set via qreq_ config or whatever
matches = infr.exec_vsone_subset(aid_pairs, config=config)
else:
query_aids = ut.take_column(aid_pairs, 0)
data_aids = ut.take_column(aid_pairs, 1)
# OLD WAY
# Determine a unique set of annots per config
configured_aids = ut.ddict(set)
configured_aids[qannot_cfg].update(query_aids)
configured_aids[dannot_cfg].update(data_aids)
# Make efficient annot-object representation
configured_obj_annots = {}
for config, aids in configured_aids.items():
annots = ibs.annots(sorted(list(aids)), config=config)
configured_obj_annots[config] = annots
annots1 = configured_obj_annots[qannot_cfg].loc(query_aids)
annots2 = configured_obj_annots[dannot_cfg].loc(data_aids)
# Get hash based on visual annotation appearence of each pair
# as well as algorithm configurations used to compute those properties
qvuuids = annots1.visual_uuids
dvuuids = annots2.visual_uuids
qcfgstr = annots1._config.get_cfgstr()
dcfgstr = annots2._config.get_cfgstr()
annots_cfgstr = ut.hashstr27(qcfgstr) + ut.hashstr27(dcfgstr)
vsone_uuids = [
ut.combine_uuids(uuids, salt=annots_cfgstr)
for uuids in ut.ProgIter(zip(qvuuids, dvuuids), length=len(qvuuids),
label='hashing ids')
]
# Combine into a big cache for the entire 1-v-1 matching run
big_uuid = ut.hashstr_arr27(vsone_uuids, '', pathsafe=True)
cacher = ut.Cacher('vsone_v7', cfgstr=str(big_uuid), appname='vsone_rf_train')
cached_data = cacher.tryload()
if cached_data is not None:
# Caching doesn't work 100% for PairwiseMatch object, so we need to do
# some postprocessing
configured_lazy_annots = ut.ddict(dict)
for config, annots in configured_obj_annots.items():
annot_dict = configured_lazy_annots[config]
for _annot in ut.ProgIter(annots.scalars(), label='make lazy dict'):
annot_dict[_annot.aid] = _annot._make_lazy_dict()
# Extract pairs of annot objects (with shared caches)
lazy_annots1 = ut.take(configured_lazy_annots[qannot_cfg], query_aids)
lazy_annots2 = ut.take(configured_lazy_annots[dannot_cfg], data_aids)
# Create a set of PairwiseMatches with the correct annot properties
matches = [vt.PairwiseMatch(annot1, annot2)
for annot1, annot2 in zip(lazy_annots1, lazy_annots2)]
# Updating a new matches dictionary ensure the annot1/annot2 properties
# are set correctly
for key, cached_matches in list(cached_data.items()):
fixed_matches = [match.copy() for match in matches]
for fixed, internal in zip(fixed_matches, cached_matches):
dict_ = internal.__dict__
ut.delete_dict_keys(dict_, ['annot1', 'annot2'])
fixed.__dict__.update(dict_)
cached_data[key] = fixed_matches
else:
cached_data = vsone_(qreq_, query_aids, data_aids, qannot_cfg,
dannot_cfg, configured_obj_annots, hyper_params)
cacher.save(cached_data)
# key_ = 'SV_LNBNN'
key_ = 'RAT_SV'
# for key in list(cached_data.keys()):
# if key != 'SV_LNBNN':
# del cached_data[key]
matches = cached_data[key_]
return matches, infr
def vsone_(qreq_, query_aids, data_aids, qannot_cfg, dannot_cfg,
configured_obj_annots, hyper_params):
# Do vectorized preload before constructing lazy dicts
# Then make sure the lazy dicts point to this subset
unique_obj_annots = list(configured_obj_annots.values())
for annots in ut.ProgIter(unique_obj_annots, 'vectorized preload'):
annots.set_caching(True)
annots.chip_size
annots.vecs
annots.kpts
annots.yaw
annots.qual
annots.gps
annots.time
if qreq_.qparams.featweight_enabled:
annots.fgweights
# annots._internal_attrs.clear()
# Make convinient lazy dict representations (after loading pre info)
configured_lazy_annots = ut.ddict(dict)
for config, annots in configured_obj_annots.items():
annot_dict = configured_lazy_annots[config]
for _annot in ut.ProgIter(annots.scalars(), label='make lazy dict'):
annot = _annot._make_lazy_dict()
annot_dict[_annot.aid] = annot
unique_lazy_annots = ut.flatten(
[x.values() for x in configured_lazy_annots.values()])
flann_params = {'algorithm': 'kdtree', 'trees': 4}
for annot in ut.ProgIter(unique_lazy_annots, label='lazy flann'):
vt.matching.ensure_metadata_flann(annot, flann_params)
vt.matching.ensure_metadata_normxy(annot)
for annot in ut.ProgIter(unique_lazy_annots, 'preload kpts'):
annot['kpts']
for annot in ut.ProgIter(unique_lazy_annots, 'preload normxy'):
annot['norm_xys']
for annot in ut.ProgIter(unique_lazy_annots, 'preload vecs'):
annot['vecs']
# Extract pairs of annot objects (with shared caches)
lazy_annots1 = ut.take(configured_lazy_annots[qannot_cfg], query_aids)
lazy_annots2 = ut.take(configured_lazy_annots[dannot_cfg], data_aids)
# TODO: param search over grid
# 'use_sv': [0, 1],
# 'use_fg': [0, 1],
# 'use_ratio_test': [0, 1],
matches_RAT = [vt.PairwiseMatch(annot1, annot2)
for annot1, annot2 in zip(lazy_annots1, lazy_annots2)]
# Construct global measurements
global_keys = ['yaw', 'qual', 'gps', 'time']
for match in ut.ProgIter(matches_RAT, label='setup globals'):
match.add_global_measures(global_keys)
# Preload flann for only specific annots
for match in ut.ProgIter(matches_RAT, label='preload FLANN'):
match.annot1['flann']
cfgdict = hyper_params.vsone_assign
# Find one-vs-one matches
# cfgdict = {'checks': 20, 'symmetric': False}
for match in ut.ProgIter(matches_RAT, label='assign vsone'):
match.assign(cfgdict=cfgdict)
# gridsearch_ratio_thresh()
# vt.matching.gridsearch_match_operation(matches_RAT, 'apply_ratio_test', {
# 'ratio_thresh': np.linspace(.6, .7, 50)
# })
for match in ut.ProgIter(matches_RAT, label='apply ratio thresh'):
match.apply_ratio_test({'ratio_thresh': .638}, inplace=True)
# TODO gridsearch over sv params
# vt.matching.gridsearch_match_operation(matches_RAT, 'apply_sver', {
# 'xy_thresh': np.linspace(0, 1, 3)
# })
matches_RAT_SV = [
match.apply_sver(inplace=True)
for match in ut.ProgIter(matches_RAT, label='sver')
]
# Add keypoint spatial information to local features
for match in matches_RAT_SV:
match.add_local_measures()
# key_ = 'norm_xys'
# norm_xy1 = match.annot1[key_].take(match.fm.T[0], axis=1)
# norm_xy2 = match.annot2[key_].take(match.fm.T[1], axis=1)
# match.local_measures['norm_x1'] = norm_xy1[0]
# match.local_measures['norm_y1'] = norm_xy1[1]
# match.local_measures['norm_x2'] = norm_xy2[0]
# match.local_measures['norm_y2'] = norm_xy2[1]
# match.local_measures['scale1'] = vt.get_scales(
# match.annot1['kpts'].take(match.fm.T[0], axis=0))
# match.local_measures['scale2'] = vt.get_scales(
# match.annot2['kpts'].take(match.fm.T[1], axis=0))
# Create another version where we find global normalizers for the data
# qreq_.load_indexer()
# matches_SV_LNBNN = batch_apply_lnbnn(matches_RAT_SV, qreq_, inplace=True)
# if 'weight' in cfgdict:
# for match in matches_SV_LNBNN[::-1]:
# lnbnn_dist = match.local_measures['lnbnn']
# ndist = match.local_measures['lnbnn_norm_dist']
# weights = match.local_measures[cfgdict['weight']]
# match.local_measures['weighted_lnbnn'] = weights * lnbnn_dist
# match.local_measures['weighted_lnbnn_norm_dist'] = weights * ndist
# match.fs = match.local_measures['weighted_lnbnn']
cached_data = {
# 'RAT': matches_RAT,
'RAT_SV': matches_RAT_SV,
# 'SV_LNBNN': matches_SV_LNBNN,
}
return cached_data
from sklearn.metrics.classification import coo_matrix
def quick_cm(y_true, y_pred, labels, sample_weight):
n_labels = len(labels)
C = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels)).toarray()
return C
def quick_mcc(C):
""" assumes y_true and y_pred are in index/encoded format """
t_sum = C.sum(axis=1)
p_sum = C.sum(axis=0)
n_correct = np.diag(C).sum()
n_samples = p_sum.sum()
cov_ytyp = n_correct * n_samples - np.dot(t_sum, p_sum)
cov_ypyp = n_samples ** 2 - np.dot(p_sum, p_sum)
cov_ytyt = n_samples ** 2 - np.dot(t_sum, t_sum)
mcc = cov_ytyp / np.sqrt(cov_ytyt * cov_ypyp)
return mcc
def mcc_hack():
sample_weight = np.ones(len(self.samples), dtype=np.int)
task_mccs = ut.ddict(dict)
# Determine threshold levels per score type
score_to_order = {}
for scoretype in score_dict.keys():
y_score = score_dict[scoretype].values
sortx = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[sortx]
distinct_value_indices = np.where(np.diff(y_score))[0]
threshold_idxs = np.r_[distinct_value_indices, y_score.size - 1]
thresh = y_score[threshold_idxs]
score_to_order[scoretype] = (sortx, y_score, thresh)
classes_ = np.array([0, 1], dtype=np.int)
for task in task_list:
labels = self.samples.subtasks[task]
for sublabels in labels.gen_one_vs_rest_labels():
for scoretype in score_dict.keys():
sortx, y_score, thresh = score_to_order[scoretype]
y_true = sublabels.y_enc[sortx]
mcc = -np.inf
for t in thresh:
y_pred = (y_score > t).astype(np.int)
C1 = quick_cm(y_true, y_pred, classes_, sample_weight)
mcc1 = quick_mcc(C1)
if mcc1 < 0:
C2 = quick_cm(y_true, 1 - y_pred, classes_, sample_weight)
mcc1 = quick_mcc(C2)
mcc = max(mcc1, mcc)
# print('mcc = %r' % (mcc,))
task_mccs[sublabels.task_name][scoretype] = mcc
return task_mccs
if 0:
with ut.Timer('mcc'):
task_mccs = mcc_hack()
print('\nMCC of simple scoring measures:')
df = pd.DataFrame.from_dict(task_mccs, orient='index')
from utool.experimental.pandas_highlight import to_string_monkey
print(to_string_monkey(
df, highlight_cols=np.arange(len(df.columns))))
# _all_dfs.append(df_rf)
# df_all = pd.concat(_all_dfs, axis=1)
# # Add in the simple scores
# from utool.experimental.pandas_highlight import to_string_monkey
# print(to_string_monkey(df_all, highlight_cols=np.arange(len(df_all.columns))))
# best_name = df_all.columns[df_all.values.argmax()]
# pt.show_if_requested()
# import utool
# utool.embed()
# print('rat_sver_rf_auc = %r' % (rat_sver_rf_auc,))
# columns = ['Method', 'AUC']
# data = [
# ['1vM-LNBNN', vsmany_lnbnn_auc],
# ['1v1-LNBNN', vsone_sver_lnbnn_auc],
# ['1v1-RAT', rat_auc],
# ['1v1-RAT+SVER', rat_sver_auc],
# ['1v1-RAT+SVER+RF', rat_sver_rf_auc],
# ]
# table = pd.DataFrame(data, columns=columns)
# error = 1 - table['AUC']
# orig = 1 - vsmany_lnbnn_auc
# import tabulate
# table = table.assign(percent_error_decrease=(orig - error) / orig * 100)
# col_to_nice = {
# 'percent_error_decrease': '% error decrease',
# }
# header = [col_to_nice.get(c, c) for c in table.columns]
# print(tabulate.tabulate(table.values, header, tablefmt='orgtbl'))
|
|
"""Tests for Monomial, Posynomial, and Signomial classes"""
import math
import unittest
from gpkit import Variable, Monomial, Posynomial, Signomial, SignomialsEnabled
from gpkit import VectorVariable, NomialArray
from gpkit.nomials import NomialMap
from gpkit.small_classes import HashVector
import gpkit
class TestMonomial(unittest.TestCase):
"""TestCase for the Monomial class"""
def test_init(self):
"Test multiple ways to create a Monomial"
m = Monomial({'x': 2, 'y': -1}, 5)
m2 = Monomial({'x': 2, 'y': -1}, 5)
x, = m.varkeys["x"]
y, = m.varkeys["y"]
self.assertEqual(m.varlocs, {x: [0], y: [0]})
self.assertEqual(m.exp, {x: 2, y: -1})
self.assertEqual(m.c, 5)
self.assertEqual(m, m2)
# default c and a
m = Monomial('x')
x, = m.varkeys["x"]
self.assertEqual(m.varlocs, {x: [0]})
self.assertEqual(m.exp, {x: 1})
self.assertEqual(m.c, 1)
# single (string) var with non-default c
m = 0.1*Variable('tau')
tau, = m.varkeys["tau"]
self.assertEqual(m.varlocs, {tau: [0]})
self.assertEqual(m.exp, {tau: 1}) # pylint: disable=no-member
self.assertEqual(m.c, .1) # pylint: disable=no-member
# variable names not compatible with python namespaces
crazy_varstr = 'what the !!!/$**?'
m = Monomial({'x': 1, crazy_varstr: .5}, 25)
crazy_varkey, = m.varkeys[crazy_varstr]
self.assertTrue(crazy_varkey in m.exp)
# non-positive c raises
self.assertRaises(ValueError, Monomial, -2)
self.assertRaises(ValueError, Monomial, -1.)
self.assertRaises(ValueError, Monomial, 0)
self.assertRaises(ValueError, Monomial, 0.0)
# can create nameless Monomials
x1 = Monomial()
x2 = Monomial()
V = Monomial('V')
vel = Monomial('V')
self.assertNotEqual(x1, x2)
self.assertEqual(V, vel)
# test label kwarg
x = Monomial('x', label='dummy variable')
self.assertEqual(list(x.exp)[0].descr['label'], 'dummy variable')
def test_repr(self):
"Simple tests for __repr__, which prints more than str"
m = Monomial({'x': 2, 'y': -1}, 5)
r = m.__repr__()
self.assertEqual(type(r), str)
self.assertEqual(Monomial('x').__repr__(), 'gpkit.Monomial(x)')
def test_latex(self):
"Test latex string creation"
x = Variable("x")
m = Monomial({'x': 2, 'y': -1}, 5).latex()
self.assertEqual(type(m), str)
self.assertEqual((5*x).latex(), '5x')
def test_str_with_units(self):
"Make sure __str__() works when units are involved"
S = Variable('S', units='m^2')
rho = Variable('rho', units='kg/m^3')
x = rho*S
xstr = str(x)
self.assertEqual(type(xstr), str)
self.assertTrue('S' in xstr and 'rho' in xstr)
def test_eq_ne(self):
"Test equality and inequality comparators"
# simple one
x = Monomial('x')
y = Monomial('y')
self.assertNotEqual(x, y)
self.assertFalse(x == y)
xx = Monomial('x')
self.assertEqual(x, xx)
self.assertFalse(x != xx)
self.assertEqual(x, x)
self.assertFalse(x != x)
x = Monomial({}, 1)
self.assertEqual(x, 1)
self.assertEqual(x, Monomial({}))
# several vars
m1 = Monomial({'a': 3, 'b': 2, 'c': 1}, 5)
m2 = Monomial({'a': 3, 'b': 2, 'c': 1}, 5)
m3 = Monomial({'a': 3, 'b': 2, 'c': 1}, 6)
m4 = Monomial({'a': 3, 'b': 2}, 5)
self.assertEqual(m1, m2)
self.assertNotEqual(m1, m3)
self.assertNotEqual(m1, m4)
# numeric
self.assertEqual(Monomial(3), 3)
self.assertEqual(Monomial(3), Monomial(3))
self.assertNotEqual(Monomial(3), 2)
self.assertNotEqual(Monomial('x'), 3)
self.assertNotEqual(Monomial(3), Monomial('x'))
def test_div(self):
"Test Monomial division"
x = Monomial('x')
y = Monomial('y')
z = Monomial('z')
t = Monomial('t')
a = 36*x/y
# sanity check
self.assertEqual(a, Monomial({'x': 1, 'y': -1}, 36))
# divide by scalar
self.assertEqual(a/9, 4*x/y)
# divide by Monomial
b = a / z
self.assertEqual(b, 36*x/y/z)
# make sure x unchanged
self.assertEqual(a, Monomial({'x': 1, 'y': -1}, 36))
# mixed new and old vars
c = a / (0.5*t**2/x)
self.assertEqual(c, Monomial({'x': 2, 'y': -1, 't': -2}, 72))
def test_mul(self):
"Test monomial multiplication"
x = Monomial({'x': 1, 'y': -1}, 4)
# test integer division
self.assertEqual(x/5, Monomial({'x': 1, 'y': -1}, 0.8))
# divide by scalar
self.assertEqual(x*9, Monomial({'x': 1, 'y': -1}, 36))
# divide by Monomial
y = x * Monomial('z')
self.assertEqual(y, Monomial({'x': 1, 'y': -1, 'z': 1}, 4))
# make sure x unchanged
self.assertEqual(x, Monomial({'x': 1, 'y': -1}, 4))
# mixed new and old vars
z = x * Monomial({'x': -1, 't': 2}, .5)
self.assertEqual(z, Monomial({'x': 0, 'y': -1, 't': 2}, 2))
x0 = Monomial('x0')
self.assertEqual(0.0, 0.0*x0)
x1 = Monomial('x1')
n_hat = [1, 0]
p = n_hat[0]*x0 + n_hat[1]*x1
self.assertEqual(p, x0)
def test_pow(self):
"Test Monomial exponentiation"
x = Monomial({'x': 1, 'y': -1}, 4)
self.assertEqual(x, Monomial({'x': 1, 'y': -1}, 4))
# identity
self.assertEqual(x/x, Monomial({}, 1))
# square
self.assertEqual(x*x, x**2)
# divide
y = Monomial({'x': 2, 'y': 3}, 5)
self.assertEqual(x/y, x*y**-1)
# make sure x unchanged
self.assertEqual(x, Monomial({'x': 1, 'y': -1}, 4))
def test_numerical_precision(self):
"not sure what to test here, placeholder for now"
c1, c2 = 1/700., 123e8
m1 = Monomial({'x': 2, 'y': 1}, c1)
m2 = Monomial({'y': -1, 'z': 3/2.}, c2)
self.assertEqual(math.log((m1**4 * m2**3).c), # pylint: disable=no-member
4*math.log(c1) + 3*math.log(c2))
def test_units(self):
"make sure multiplication with units works (issue 492)"
# have had issues where Quantity.__mul__ causes wrong return type
m = 1.2 * gpkit.units.ft * Variable("x", "m")**2
self.assertTrue(isinstance(m, Monomial))
if m.units:
self.assertEqual(m.units, 1*gpkit.ureg.ft*gpkit.ureg.m**2)
# also multiply at the end, though this has not been a problem
m = 0.5 * Variable("x", "m")**2 * gpkit.units.kg
self.assertTrue(isinstance(m, Monomial))
if m.units:
self.assertEqual(m.units, 1*gpkit.ureg.kg*gpkit.ureg.m**2)
# and with vectors...
v = 0.5 * VectorVariable(3, "x", "m")**2 * gpkit.units.kg
self.assertTrue(isinstance(v, NomialArray))
if v.units:
self.assertEqual(v.units, 1*gpkit.ureg.kg*gpkit.ureg.m**2)
v = 0.5 * gpkit.units.kg * VectorVariable(3, "x", "m")**2
self.assertTrue(isinstance(v, NomialArray))
if v.units:
self.assertEqual(v.units, 1*gpkit.ureg.kg*gpkit.ureg.m**2)
class TestSignomial(unittest.TestCase):
"""TestCase for the Signomial class"""
def test_init(self):
"Test Signomial construction"
x = Monomial('x')
y = Monomial('y')
with SignomialsEnabled():
self.assertEqual(str(1 - x - y**2 - 1), "-x + -y**2")
self.assertEqual((1 - x/y**2).latex(), "-\\frac{x}{y^{2}} + 1")
self.assertRaises(TypeError, lambda: x-y)
def test_eq_ne(self):
"Test Signomial equality and inequality operators"
x = Variable('x')
xu = Variable('x', units="ft")
with SignomialsEnabled():
self.assertEqual(x - x**2, -x**2 + x)
if gpkit.units:
self.assertNotEqual(-x, -xu)
else: # units don't create inequality if they're disabled
self.assertEqual(-x, -xu)
# numeric
self.assertEqual(Signomial(0), 0)
self.assertNotEqual(Signomial(0), 1)
self.assertEqual(Signomial(-3), -3)
self.assertNotEqual(Signomial(-3), 3)
class TestPosynomial(unittest.TestCase):
"""TestCase for the Posynomial class"""
def test_init(self):
"Test Posynomial construction"
x = Monomial('x')
y = Monomial('y')
ms = [Monomial({'x': 1, 'y': 2}, 3.14),
0.5*Monomial('y'),
Monomial({'x': 3, 'y': 1}, 6),
Monomial(2)]
exps, cs = [], []
for m in ms:
cs += m.cs.tolist()
exps += m.exps
hmap = NomialMap(zip(exps, cs))
hmap.units_of_product(None)
p = Posynomial(hmap)
# check arithmetic
p2 = 3.14*x*y**2 + y/2 + x**3*6*y + 2
self.assertEqual(p, p2)
hmap = NomialMap({HashVector({'m': 1, 'v': 2}): 0.5,
HashVector({'m': 1, 'g': 1, 'h': 1}): 1})
hmap.units_of_product(None)
p = Posynomial(hmap)
m, = p.varkeys["m"]
g, = p.varkeys["g"]
h, = p.varkeys["h"]
v, = p.varkeys["v"]
self.assertTrue(all(isinstance(x, float) for x in p.cs))
self.assertEqual(len(p.exps), 2)
self.assertEqual(set(p.varlocs), set([m, g, h, v]))
self.assertEqual(p.varlocs[g], p.varlocs[h])
self.assertNotEqual(p.varlocs[g], p.varlocs[v])
self.assertEqual(len(p.varlocs[m]), 2)
self.assertTrue(all(len(p.varlocs[key]) == 1 for key in [g, h, v]))
def test_eq(self):
"""Test Posynomial __eq__"""
x = Variable('x')
y = Variable('y')
self.assertTrue((1 + x) == (1 + x))
self.assertFalse((1 + x) == 2*(1 + x))
self.assertFalse((1 + x) == 0.5*(1 + x))
self.assertFalse((1 + x) == (1 + y))
x = Variable('x', value=3)
y = Variable('y', value=2)
self.assertEqual((1 + x**2).value, (4 + y + y**2).value)
def test_eq_units(self):
p1 = Variable('x') + Variable('y')
p2 = Variable('x') + Variable('y')
p1u = Variable('x', units="m") + Variable('y', units="m")
p2u = Variable('x', units="m") + Variable('y', units="m")
self.assertEqual(p1, p2)
self.assertEqual(p1u, p2u)
if gpkit.units:
self.assertFalse(p1 == p1u)
self.assertNotEqual(p1, p1u)
else: # units don't distinguish variables when they're disabled
self.assertTrue(p1 == p1u)
self.assertEqual(p1, p1u)
def test_simplification(self):
"Make sure like monomial terms get automatically combined"
x = Monomial('x')
y = Monomial('y')
p1 = x + y + y + (x+y) + (y+x**2) + 3*x
p2 = 4*y + x**2 + 5*x
# ps1 = [list(exp.keys())for exp in p1.exps]
# ps2 = [list(exp.keys())for exp in p2.exps]
# print("%s, %s" % (ps1, ps2)) # python 3 dict reordering
self.assertEqual(p1, p2)
def test_posyposy_mult(self):
"Test multiplication of Posynomial with Posynomial"
x = Monomial('x')
y = Monomial('y')
p1 = x**2 + 2*y*x + y**2
p2 = (x+y)**2
# ps1 = [list(exp.keys())for exp in p1.exps]
# ps2 = [list(exp.keys())for exp in p2.exps]
# print("%s, %s" % (ps1, ps2)) # python 3 dict reordering
self.assertEqual(p1, p2)
p1 = (x+y)*(2*x+y**2)
p2 = 2*x**2 + 2*y*x + y**2*x + y**3
# ps1 = [list(exp.keys())for exp in p1.exps]
# ps2 = [list(exp.keys())for exp in p2.exps]
# print("%s, %s" % (ps1, ps2)) # python 3 dict reordering
self.assertEqual(p1, p2)
def test_constraint_gen(self):
"Test creation of Constraints via operator overloading"
x = Monomial('x')
y = Monomial('y')
p = x**2 + 2*y*x + y**2
self.assertEqual((p <= 1).as_posyslt1(), [p])
self.assertEqual((p <= x).as_posyslt1(), [p/x])
def test_integer_division(self):
"Make sure division by integer doesn't use Python integer division"
x = Monomial('x')
y = Monomial('y')
p = 4*x + y
self.assertEqual(p/3, p/3.)
equiv1 = all((p/3).cs == [1./3., 4./3.])
equiv2 = all((p/3).cs == [4./3., 1./3.])
self.assertTrue(equiv1 or equiv2)
def test_diff(self):
"Test differentiation (!!)"
x = Variable('x')
y = Variable('y')
self.assertEqual(x.diff(x), 1)
self.assertEqual(x.diff(y), 0)
self.assertEqual((y**2).diff(y), 2*y)
self.assertEqual((x + y**2).diff(y), 2*y)
self.assertEqual((x + y**2).diff('x'), 1)
self.assertEqual((x + x*y**2).diff(y), 2*x*y)
self.assertEqual((2*y).diff(y), 2)
# test with units
x = Variable('x', units='ft')
d = (3*x**2).diff(x)
self.assertEqual(d, 6*x)
# test negative exponent
d = (1 + 1/y).diff(y)
with SignomialsEnabled():
expected = -y**-2
self.assertEqual(d, expected)
def test_mono_lower_bound(self):
"Test monomial approximation"
x = Variable('x')
y = Variable('y')
p = y**2 + 1
self.assertEqual(y.mono_lower_bound({y: 1}), y)
# TODO: remove pylint warning below after Nomials refactor
# pylint is confused because it thinks p is a Signomial
# pylint: disable=no-member
self.assertEqual(p.mono_lower_bound({y: 1}), 2*y)
self.assertEqual(p.mono_lower_bound({y: 0}), 1)
self.assertEqual((x*y**2 + 1).mono_lower_bound({y: 1, x: 1}),
2*y*x**0.5)
# test with units
d = Variable('d', units='ft')
h = Variable('h', units='ft')
p = (d*h**2 + h*d**2)
m = p.mono_lower_bound({d: 1, h: 1})
self.assertEqual(m, 2*(d*h)**1.5)
# test substitution
TESTS = [TestPosynomial, TestMonomial, TestSignomial]
if __name__ == '__main__':
# pylint: disable=wrong-import-position
from gpkit.tests.helpers import run_tests
run_tests(TESTS)
|
|
import logging
import warnings
import inspect
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm import joinedload, aliased
from sqlalchemy.sql.expression import desc, ColumnElement
from sqlalchemy import Boolean, Table, func, or_
from sqlalchemy.exc import IntegrityError
from sqlalchemy.sql.expression import cast
from sqlalchemy import Unicode
from flask import flash
from flask_admin._compat import string_types, text_type
from flask_admin.babel import gettext, ngettext, lazy_gettext
from flask_admin.model import BaseModelView
from flask_admin.model.form import wrap_fields_in_fieldlist
from flask_admin.model.fields import ListEditableFieldList
from flask_admin.actions import action
from flask_admin._backwards import ObsoleteAttr
from flask_admin.contrib.sqla import form, filters as sqla_filters, tools
from .typefmt import DEFAULT_FORMATTERS
from .tools import get_query_for_ids
from .ajax import create_ajax_loader
# Set up logger
log = logging.getLogger("flask-admin.sqla")
class ModelView(BaseModelView):
"""
SQLAlchemy model view
Usage sample::
admin = Admin()
admin.add_view(ModelView(User, db.session))
"""
column_auto_select_related = ObsoleteAttr('column_auto_select_related',
'auto_select_related',
True)
"""
Enable automatic detection of displayed foreign keys in this view
and perform automatic joined loading for related models to improve
query performance.
Please note that detection is not recursive: if `__unicode__` method
of related model uses another model to generate string representation, it
will still make separate database call.
"""
column_select_related_list = ObsoleteAttr('column_select_related',
'list_select_related',
None)
"""
List of parameters for SQLAlchemy `subqueryload`. Overrides `column_auto_select_related`
property.
For example::
class PostAdmin(ModelView):
column_select_related_list = ('user', 'city')
You can also use properties::
class PostAdmin(ModelView):
column_select_related_list = (Post.user, Post.city)
Please refer to the `subqueryload` on list of possible values.
"""
column_display_all_relations = ObsoleteAttr('column_display_all_relations',
'list_display_all_relations',
False)
"""
Controls if list view should display all relations, not only many-to-one.
"""
column_searchable_list = ObsoleteAttr('column_searchable_list',
'searchable_columns',
None)
"""
Collection of the searchable columns.
Example::
class MyModelView(ModelView):
column_searchable_list = ('name', 'email')
You can also pass columns::
class MyModelView(ModelView):
column_searchable_list = (User.name, User.email)
The following search rules apply:
- If you enter ``ZZZ`` in the UI search field, it will generate ``ILIKE '%ZZZ%'``
statement against searchable columns.
- If you enter multiple words, each word will be searched separately, but
only rows that contain all words will be displayed. For example, searching
for ``abc def`` will find all rows that contain ``abc`` and ``def`` in one or
more columns.
- If you prefix your search term with ``^``, it will find all rows
that start with ``^``. So, if you entered ``^ZZZ`` then ``ILIKE 'ZZZ%'`` will be used.
- If you prefix your search term with ``=``, it will perform an exact match.
For example, if you entered ``=ZZZ``, the statement ``ILIKE 'ZZZ'`` will be used.
"""
column_filters = None
"""
Collection of the column filters.
Can contain either field names or instances of
:class:`flask_admin.contrib.sqla.filters.BaseSQLAFilter` classes.
Filters will be grouped by name when displayed in the drop-down.
For example::
class MyModelView(BaseModelView):
column_filters = ('user', 'email')
or::
from flask_admin.contrib.sqla.filters import BooleanEqualFilter
class MyModelView(BaseModelView):
column_filters = (BooleanEqualFilter(column=User.name, name='Name'),)
or::
from flask_admin.contrib.sqla.filters import BaseSQLAFilter
class FilterLastNameBrown(BaseSQLAFilter):
def apply(self, query, value, alias=None):
if value == '1':
return query.filter(self.column == "Brown")
else:
return query.filter(self.column != "Brown")
def operation(self):
return 'is Brown'
class MyModelView(BaseModelView):
column_filters = [
FilterLastNameBrown(
User.last_name, 'Last Name', options=(('1', 'Yes'), ('0', 'No'))
)
]
"""
model_form_converter = form.AdminModelConverter
"""
Model form conversion class. Use this to implement custom field conversion logic.
For example::
class MyModelConverter(AdminModelConverter):
pass
class MyAdminView(ModelView):
model_form_converter = MyModelConverter
"""
inline_model_form_converter = form.InlineModelConverter
"""
Inline model conversion class. If you need some kind of post-processing for inline
forms, you can customize behavior by doing something like this::
class MyInlineModelConverter(InlineModelConverter):
def post_process(self, form_class, info):
form_class.value = wtf.StringField('value')
return form_class
class MyAdminView(ModelView):
inline_model_form_converter = MyInlineModelConverter
"""
filter_converter = sqla_filters.FilterConverter()
"""
Field to filter converter.
Override this attribute to use non-default converter.
"""
fast_mass_delete = False
"""
If set to `False` and user deletes more than one model using built in action,
all models will be read from the database and then deleted one by one
giving SQLAlchemy a chance to manually cleanup any dependencies (many-to-many
relationships, etc).
If set to `True`, will run a ``DELETE`` statement which is somewhat faster,
but may leave corrupted data if you forget to configure ``DELETE
CASCADE`` for your model.
"""
inline_models = None
"""
Inline related-model editing for models with parent-child relations.
Accepts enumerable with one of the following possible values:
1. Child model class::
class MyModelView(ModelView):
inline_models = (Post,)
2. Child model class and additional options::
class MyModelView(ModelView):
inline_models = [(Post, dict(form_columns=['title']))]
3. Django-like ``InlineFormAdmin`` class instance::
class MyInlineModelForm(InlineFormAdmin):
form_columns = ('title', 'date')
class MyModelView(ModelView):
inline_models = (MyInlineModelForm(MyInlineModel),)
You can customize the generated field name by:
1. Using the `form_name` property as a key to the options dictionary::
class MyModelView(ModelView):
inline_models = ((Post, dict(form_label='Hello')))
2. Using forward relation name and `column_labels` property::
class Model1(Base):
pass
class Model2(Base):
# ...
model1 = relation(Model1, backref='models')
class MyModel1View(Base):
inline_models = (Model2,)
column_labels = {'models': 'Hello'}
"""
column_type_formatters = DEFAULT_FORMATTERS
form_choices = None
"""
Map choices to form fields
Example::
class MyModelView(BaseModelView):
form_choices = {'my_form_field': [
('db_value', 'display_value'),
]}
"""
form_optional_types = (Boolean,)
"""
List of field types that should be optional if column is not nullable.
Example::
class MyModelView(BaseModelView):
form_optional_types = (Boolean, Unicode)
"""
def __init__(self, model, session,
name=None, category=None, endpoint=None, url=None, static_folder=None,
menu_class_name=None, menu_icon_type=None, menu_icon_value=None):
"""
Constructor.
:param model:
Model class
:param session:
SQLAlchemy session
:param name:
View name. If not set, defaults to the model name
:param category:
Category name
:param endpoint:
Endpoint name. If not set, defaults to the model name
:param url:
Base URL. If not set, defaults to '/admin/' + endpoint
:param menu_class_name:
Optional class name for the menu item.
:param menu_icon_type:
Optional icon. Possible icon types:
- `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon
- `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon
- `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory
- `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL
:param menu_icon_value:
Icon glyph name or URL, depending on `menu_icon_type` setting
"""
self.session = session
self._search_fields = None
self._filter_joins = dict()
self._sortable_joins = dict()
if self.form_choices is None:
self.form_choices = {}
super(ModelView, self).__init__(model, name, category, endpoint, url, static_folder,
menu_class_name=menu_class_name,
menu_icon_type=menu_icon_type,
menu_icon_value=menu_icon_value)
# Primary key
self._primary_key = self.scaffold_pk()
if self._primary_key is None:
raise Exception('Model %s does not have primary key.' % self.model.__name__)
# Configuration
if not self.column_select_related_list:
self._auto_joins = self.scaffold_auto_joins()
else:
self._auto_joins = self.column_select_related_list
# Internal API
def _get_model_iterator(self, model=None):
"""
Return property iterator for the model
"""
if model is None:
model = self.model
return model._sa_class_manager.mapper.iterate_properties
def _get_columns_for_field(self, field):
if (not field or
not hasattr(field, 'property') or
not hasattr(field.property, 'columns') or
not field.property.columns):
raise Exception('Invalid field %s: does not contains any columns.' % field)
return field.property.columns
def _get_field_with_path(self, name):
"""
Resolve property by name and figure out its join path.
Join path might contain both properties and tables.
"""
path = []
model = self.model
# For strings, resolve path
if isinstance(name, string_types):
for attribute in name.split('.'):
value = getattr(model, attribute)
if (hasattr(value, 'property') and
hasattr(value.property, 'direction')):
model = value.property.mapper.class_
table = model.__table__
if self._need_join(table):
path.append(value)
attr = value
else:
attr = name
# Determine joins if table.column (relation object) is provided
if isinstance(attr, InstrumentedAttribute):
columns = self._get_columns_for_field(attr)
if len(columns) > 1:
raise Exception('Can only handle one column for %s' % name)
column = columns[0]
# TODO: Use SQLAlchemy "path-finder" to find exact join path to the target property
if self._need_join(column.table):
path.append(column.table)
return attr, path
def _need_join(self, table):
"""
Check if join to a table is necessary.
"""
return table not in self.model._sa_class_manager.mapper.tables
def _apply_path_joins(self, query, joins, path, inner_join=True):
"""
Apply join path to the query.
:param query:
Query to add joins to
:param joins:
List of current joins. Used to avoid joining on same relationship more than once
:param path:
Path to be joined
:param fn:
Join function
"""
last = None
if path:
for item in path:
key = (inner_join, item)
alias = joins.get(key)
if key not in joins:
if not isinstance(item, Table):
alias = aliased(item.property.mapper.class_)
fn = query.join if inner_join else query.outerjoin
if last is None:
query = fn(item) if alias is None else fn(alias, item)
else:
prop = getattr(last, item.key)
query = fn(prop) if alias is None else fn(alias, prop)
joins[key] = alias
last = alias
return query, joins, last
# Scaffolding
def scaffold_pk(self):
"""
Return the primary key name(s) from a model
If model has single primary key, will return a string and tuple otherwise
"""
return tools.get_primary_key(self.model)
def get_pk_value(self, model):
"""
Return the primary key value from a model object.
If there are multiple primary keys, they're encoded into string representation.
"""
if isinstance(self._primary_key, tuple):
return tools.iterencode(getattr(model, attr) for attr in self._primary_key)
else:
return tools.escape(getattr(model, self._primary_key))
def scaffold_list_columns(self):
"""
Return a list of columns from the model.
"""
columns = []
for p in self._get_model_iterator():
if hasattr(p, 'direction'):
if self.column_display_all_relations or p.direction.name == 'MANYTOONE':
columns.append(p.key)
elif hasattr(p, 'columns'):
if len(p.columns) > 1:
filtered = tools.filter_foreign_columns(self.model.__table__, p.columns)
if len(filtered) > 1:
warnings.warn('Can not convert multiple-column properties (%s.%s)' % (self.model, p.key))
continue
column = filtered[0]
else:
column = p.columns[0]
if column.foreign_keys:
continue
if not self.column_display_pk and column.primary_key:
continue
columns.append(p.key)
return columns
def scaffold_sortable_columns(self):
"""
Return a dictionary of sortable columns.
Key is column name, value is sort column/field.
"""
columns = dict()
for p in self._get_model_iterator():
if hasattr(p, 'columns'):
# Sanity check
if len(p.columns) > 1:
# Multi-column properties are not supported
continue
column = p.columns[0]
# Can't sort on primary or foreign keys by default
if column.foreign_keys:
continue
if not self.column_display_pk and column.primary_key:
continue
columns[p.key] = column
return columns
def get_sortable_columns(self):
"""
Returns a dictionary of the sortable columns. Key is a model
field name and value is sort column (for example - attribute).
If `column_sortable_list` is set, will use it. Otherwise, will call
`scaffold_sortable_columns` to get them from the model.
"""
self._sortable_joins = dict()
if self.column_sortable_list is None:
return self.scaffold_sortable_columns()
else:
result = dict()
for c in self.column_sortable_list:
if isinstance(c, tuple):
column, path = self._get_field_with_path(c[1])
column_name = c[0]
elif isinstance(c, InstrumentedAttribute):
column, path = self._get_field_with_path(c)
column_name = str(c)
else:
column, path = self._get_field_with_path(c)
column_name = c
result[column_name] = column
if path:
self._sortable_joins[column_name] = path
return result
def init_search(self):
"""
Initialize search. Returns `True` if search is supported for this
view.
For SQLAlchemy, this will initialize internal fields: list of
column objects used for filtering, etc.
"""
if self.column_searchable_list:
self._search_fields = []
for p in self.column_searchable_list:
attr, joins = self._get_field_with_path(p)
if not attr:
raise Exception('Failed to find field for search field: %s' % p)
for column in self._get_columns_for_field(attr):
self._search_fields.append((column, joins))
return bool(self.column_searchable_list)
def scaffold_filters(self, name):
"""
Return list of enabled filters
"""
attr, joins = self._get_field_with_path(name)
if attr is None:
raise Exception('Failed to find field for filter: %s' % name)
# Figure out filters for related column, unless it's a hybrid_property
if isinstance(attr, ColumnElement):
warnings.warn(('Unable to scaffold the filter for %s, scaffolding '
'for hybrid_property is not supported yet.') % name)
elif hasattr(attr, 'property') and hasattr(attr.property, 'direction'):
filters = []
for p in self._get_model_iterator(attr.property.mapper.class_):
if hasattr(p, 'columns'):
# TODO: Check for multiple columns
column = p.columns[0]
if column.foreign_keys or column.primary_key:
continue
visible_name = '%s / %s' % (self.get_column_name(attr.prop.table.name),
self.get_column_name(p.key))
type_name = type(column.type).__name__
flt = self.filter_converter.convert(type_name,
column,
visible_name)
if flt:
table = column.table
if joins:
self._filter_joins[column] = joins
elif self._need_join(table):
self._filter_joins[column] = [table]
filters.extend(flt)
return filters
else:
columns = self._get_columns_for_field(attr)
if len(columns) > 1:
raise Exception('Can not filter more than on one column for %s' % name)
column = columns[0]
if self._need_join(column.table) and name not in self.column_labels:
visible_name = '%s / %s' % (
self.get_column_name(column.table.name),
self.get_column_name(column.name)
)
else:
if not isinstance(name, string_types):
visible_name = self.get_column_name(name.property.key)
else:
visible_name = self.get_column_name(name)
type_name = type(column.type).__name__
flt = self.filter_converter.convert(
type_name,
column,
visible_name,
options=self.column_choices.get(name),
)
if joins:
self._filter_joins[column] = joins
elif self._need_join(column.table):
self._filter_joins[column] = [column.table]
return flt
def handle_filter(self, filter):
if isinstance(filter, sqla_filters.BaseSQLAFilter):
column = filter.column
# hybrid_property joins are not supported yet
if (isinstance(column, InstrumentedAttribute) and
self._need_join(column.table)):
self._filter_joins[column] = [column.table]
return filter
def scaffold_form(self):
"""
Create form from the model.
"""
converter = self.model_form_converter(self.session, self)
form_class = form.get_form(self.model, converter,
base_class=self.form_base_class,
only=self.form_columns,
exclude=self.form_excluded_columns,
field_args=self.form_args,
extra_fields=self.form_extra_fields)
if self.inline_models:
form_class = self.scaffold_inline_form_models(form_class)
return form_class
def scaffold_list_form(self, custom_fieldlist=ListEditableFieldList,
validators=None):
"""
Create form for the `index_view` using only the columns from
`self.column_editable_list`.
:param validators:
`form_args` dict with only validators
{'name': {'validators': [required()]}}
:param custom_fieldlist:
A WTForm FieldList class. By default, `ListEditableFieldList`.
"""
converter = self.model_form_converter(self.session, self)
form_class = form.get_form(self.model, converter,
base_class=self.form_base_class,
only=self.column_editable_list,
field_args=validators)
return wrap_fields_in_fieldlist(self.form_base_class,
form_class,
custom_fieldlist)
def scaffold_inline_form_models(self, form_class):
"""
Contribute inline models to the form
:param form_class:
Form class
"""
inline_converter = self.inline_model_form_converter(self.session,
self,
self.model_form_converter)
for m in self.inline_models:
form_class = inline_converter.contribute(self.model, form_class, m)
return form_class
def scaffold_auto_joins(self):
"""
Return a list of joined tables by going through the
displayed columns.
"""
if not self.column_auto_select_related:
return []
relations = set()
for p in self._get_model_iterator():
if hasattr(p, 'direction'):
# Check if it is pointing to same model
if p.mapper.class_ == self.model:
continue
if p.direction.name in ['MANYTOONE', 'MANYTOMANY']:
relations.add(p.key)
joined = []
for prop, name in self._list_columns:
if prop in relations:
joined.append(getattr(self.model, prop))
return joined
# AJAX foreignkey support
def _create_ajax_loader(self, name, options):
return create_ajax_loader(self.model, self.session, name, name, options)
# Database-related API
def get_query(self):
"""
Return a query for the model type.
If you override this method, don't forget to override `get_count_query` as well.
This method can be used to set a "persistent filter" on an index_view.
Example::
class MyView(ModelView):
def get_query(self):
return super(MyView, self).get_query().filter(User.username == current_user.username)
"""
return self.session.query(self.model)
def get_count_query(self):
"""
Return a the count query for the model type
A ``query(self.model).count()`` approach produces an excessive
subquery, so ``query(func.count('*'))`` should be used instead.
See commit ``#45a2723`` for details.
"""
return self.session.query(func.count('*')).select_from(self.model)
def _order_by(self, query, joins, sort_joins, sort_field, sort_desc):
"""
Apply order_by to the query
:param query:
Query
:pram joins:
Current joins
:param sort_joins:
Sort joins (properties or tables)
:param sort_field:
Sort field
:param sort_desc:
Ascending or descending
"""
if sort_field is not None:
# Handle joins
query, joins, alias = self._apply_path_joins(query, joins, sort_joins, inner_join=False)
column = sort_field if alias is None else getattr(alias, sort_field.key)
if sort_desc:
query = query.order_by(desc(column))
else:
query = query.order_by(column)
return query, joins
def _get_default_order(self):
order = super(ModelView, self)._get_default_order()
if order is not None:
field, direction = order
attr, joins = self._get_field_with_path(field)
return attr, joins, direction
return None
def _apply_sorting(self, query, joins, sort_column, sort_desc):
if sort_column is not None:
if sort_column in self._sortable_columns:
sort_field = self._sortable_columns[sort_column]
sort_joins = self._sortable_joins.get(sort_column)
query, joins = self._order_by(query, joins, sort_joins, sort_field, sort_desc)
else:
order = self._get_default_order()
if order:
sort_field, sort_joins, sort_desc = order
query, joins = self._order_by(query, joins, sort_joins, sort_field, sort_desc)
return query, joins
def _apply_search(self, query, count_query, joins, count_joins, search):
"""
Apply search to a query.
"""
terms = search.split(' ')
for term in terms:
if not term:
continue
stmt = tools.parse_like_term(term)
filter_stmt = []
count_filter_stmt = []
for field, path in self._search_fields:
query, joins, alias = self._apply_path_joins(query, joins, path, inner_join=False)
count_alias = None
if count_query is not None:
count_query, count_joins, count_alias = self._apply_path_joins(count_query,
count_joins,
path,
inner_join=False)
column = field if alias is None else getattr(alias, field.key)
filter_stmt.append(cast(column, Unicode).ilike(stmt))
if count_filter_stmt is not None:
column = field if count_alias is None else getattr(count_alias, field.key)
count_filter_stmt.append(cast(column, Unicode).ilike(stmt))
query = query.filter(or_(*filter_stmt))
if count_query is not None:
count_query = count_query.filter(or_(*count_filter_stmt))
return query, count_query, joins, count_joins
def _apply_filters(self, query, count_query, joins, count_joins, filters):
for idx, flt_name, value in filters:
flt = self._filters[idx]
alias = None
count_alias = None
# Figure out joins
if isinstance(flt, sqla_filters.BaseSQLAFilter):
path = self._filter_joins.get(flt.column, [])
query, joins, alias = self._apply_path_joins(query, joins, path, inner_join=False)
if count_query is not None:
count_query, count_joins, count_alias = self._apply_path_joins(
count_query,
count_joins,
path,
inner_join=False)
# Clean value .clean() and apply the filter
clean_value = flt.clean(value)
try:
query = flt.apply(query, clean_value, alias)
except TypeError:
spec = inspect.getargspec(flt.apply)
if len(spec.args) == 3:
warnings.warn('Please update your custom filter %s to include additional `alias` parameter.' % repr(flt))
else:
raise
query = flt.apply(query, clean_value)
if count_query is not None:
try:
count_query = flt.apply(count_query, clean_value, count_alias)
except TypeError:
count_query = flt.apply(count_query, clean_value)
return query, count_query, joins, count_joins
def _apply_pagination(self, query, page, page_size):
if page_size is None:
page_size = self.page_size
if page_size:
query = query.limit(page_size)
if page and page_size:
query = query.offset(page * page_size)
return query
def get_list(self, page, sort_column, sort_desc, search, filters,
execute=True, page_size=None):
"""
Return records from the database.
:param page:
Page number
:param sort_column:
Sort column name
:param sort_desc:
Descending or ascending sort
:param search:
Search query
:param execute:
Execute query immediately? Default is `True`
:param filters:
List of filter tuples
:param page_size:
Number of results. Defaults to ModelView's page_size. Can be
overriden to change the page_size limit. Removing the page_size
limit requires setting page_size to 0 or False.
"""
# Will contain join paths with optional aliased object
joins = {}
count_joins = {}
query = self.get_query()
count_query = self.get_count_query() if not self.simple_list_pager else None
# Ignore eager-loaded relations (prevent unnecessary joins)
# TODO: Separate join detection for query and count query?
if hasattr(query, '_join_entities'):
for entity in query._join_entities:
for table in entity.tables:
joins[table] = None
# Apply search criteria
if self._search_supported and search:
query, count_query, joins, count_joins = self._apply_search(query,
count_query,
joins,
count_joins,
search)
# Apply filters
if filters and self._filters:
query, count_query, joins, count_joins = self._apply_filters(query,
count_query,
joins,
count_joins,
filters)
# Calculate number of rows if necessary
count = count_query.scalar() if count_query else None
# Auto join
for j in self._auto_joins:
query = query.options(joinedload(j))
# Sorting
query, joins = self._apply_sorting(query, joins, sort_column, sort_desc)
# Pagination
query = self._apply_pagination(query, page, page_size)
# Execute if needed
if execute:
query = query.all()
return count, query
def get_one(self, id):
"""
Return a single model by its id.
:param id:
Model id
"""
return self.session.query(self.model).get(tools.iterdecode(id))
# Error handler
def handle_view_exception(self, exc):
if isinstance(exc, IntegrityError):
flash(gettext('Integrity error. %(message)s', message=text_type(exc)), 'error')
return True
return super(ModelView, self).handle_view_exception(exc)
# Model handlers
def create_model(self, form):
"""
Create model from form.
:param form:
Form instance
"""
try:
model = self.model()
form.populate_obj(model)
self.session.add(model)
self._on_model_change(form, model, True)
self.session.commit()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to create record. %(error)s', error=str(ex)), 'error')
log.exception('Failed to create record.')
self.session.rollback()
return False
else:
self.after_model_change(form, model, True)
return model
def update_model(self, form, model):
"""
Update model from form.
:param form:
Form instance
:param model:
Model instance
"""
try:
form.populate_obj(model)
self._on_model_change(form, model, False)
self.session.commit()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to update record. %(error)s', error=str(ex)), 'error')
log.exception('Failed to update record.')
self.session.rollback()
return False
else:
self.after_model_change(form, model, False)
return True
def delete_model(self, model):
"""
Delete model.
:param model:
Model to delete
"""
try:
self.on_model_delete(model)
self.session.flush()
self.session.delete(model)
self.session.commit()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to delete record. %(error)s', error=str(ex)), 'error')
log.exception('Failed to delete record.')
self.session.rollback()
return False
else:
self.after_model_delete(model)
return True
# Default model actions
def is_action_allowed(self, name):
# Check delete action permission
if name == 'delete' and not self.can_delete:
return False
return super(ModelView, self).is_action_allowed(name)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected records?'))
def action_delete(self, ids):
try:
query = get_query_for_ids(self.get_query(), self.model, ids)
if self.fast_mass_delete:
count = query.delete(synchronize_session=False)
else:
count = 0
for m in query.all():
if self.delete_model(m):
count += 1
self.session.commit()
flash(ngettext('Record was successfully deleted.',
'%(count)s records were successfully deleted.',
count,
count=count))
except Exception as ex:
if not self.handle_view_exception(ex):
raise
flash(gettext('Failed to delete records. %(error)s', error=str(ex)), 'error')
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from keystoneclient.common import cms
import six
from keystone.common import controller
from keystone.common import dependency
from keystone.common import wsgi
from keystone import config
from keystone.contrib import federation
from keystone import exception
from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import importutils
from keystone.openstack.common import log
from keystone.openstack.common import timeutils
LOG = log.getLogger(__name__)
CONF = config.CONF
# registry of authentication methods
AUTH_METHODS = {}
AUTH_PLUGINS_LOADED = False
def load_auth_methods():
global AUTH_PLUGINS_LOADED
if AUTH_PLUGINS_LOADED:
# Only try and load methods a single time.
return
# config.setup_authentication should be idempotent, call it to ensure we
# have setup all the appropriate configuration options we may need.
config.setup_authentication()
for plugin in CONF.auth.methods:
if '.' in plugin:
# NOTE(morganfainberg): if '.' is in the plugin name, it should be
# imported rather than used as a plugin identifier.
plugin_class = plugin
driver = importutils.import_object(plugin)
if not hasattr(driver, 'method'):
raise ValueError(_('Cannot load an auth-plugin by class-name '
'without a "method" attribute defined: %s'),
plugin_class)
else:
plugin_class = CONF.auth.get(plugin)
driver = importutils.import_object(plugin_class)
if hasattr(driver, 'method'):
if driver.method != plugin:
raise ValueError(_('Driver requested method %(req)s does '
'not match plugin name %(plugin)s.') %
{'req': driver.method,
'plugin': plugin})
else:
LOG.warning(_('Auth Plugin %s does not have a "method" '
'attribute.'), plugin)
setattr(driver, 'method', plugin)
if driver.method in AUTH_METHODS:
raise ValueError(_('Auth plugin %(plugin)s is requesting '
'previously registered method %(method)s') %
{'plugin': plugin_class, 'method': driver.method})
AUTH_METHODS[driver.method] = driver
AUTH_PLUGINS_LOADED = True
def get_auth_method(method_name):
global AUTH_METHODS
if method_name not in AUTH_METHODS:
raise exception.AuthMethodNotSupported()
return AUTH_METHODS[method_name]
# TODO(blk-u): this class doesn't use identity_api directly, but makes it
# available for consumers. Consumers should probably not be getting
# identity_api from this since it's available in global registry, then
# identity_api should be removed from this list.
@dependency.requires('assignment_api', 'identity_api', 'trust_api')
class AuthInfo(object):
"""Encapsulation of "auth" request."""
@staticmethod
def create(context, auth=None):
auth_info = AuthInfo(context, auth=auth)
auth_info._validate_and_normalize_auth_data()
return auth_info
def __init__(self, context, auth=None):
self.context = context
self.auth = auth
self._scope_data = (None, None, None)
# self._scope_data is (domain_id, project_id, trust_ref)
# project scope: (None, project_id, None)
# domain scope: (domain_id, None, None)
# trust scope: (None, None, trust_ref)
# unscoped: (None, None, None)
def _assert_project_is_enabled(self, project_ref):
# ensure the project is enabled
if not project_ref.get('enabled', True):
msg = _('Project is disabled: %s') % project_ref['id']
LOG.warning(msg)
raise exception.Unauthorized(msg)
def _assert_domain_is_enabled(self, domain_ref):
if not domain_ref.get('enabled'):
msg = _('Domain is disabled: %s') % (domain_ref['id'])
LOG.warning(msg)
raise exception.Unauthorized(msg)
def _lookup_domain(self, domain_info):
domain_id = domain_info.get('id')
domain_name = domain_info.get('name')
domain_ref = None
if not domain_id and not domain_name:
raise exception.ValidationError(attribute='id or name',
target='domain')
try:
if domain_name:
domain_ref = self.assignment_api.get_domain_by_name(
domain_name)
else:
domain_ref = self.assignment_api.get_domain(domain_id)
except exception.DomainNotFound as e:
LOG.exception(e)
raise exception.Unauthorized(e)
self._assert_domain_is_enabled(domain_ref)
return domain_ref
def _lookup_project(self, project_info):
project_id = project_info.get('id')
project_name = project_info.get('name')
project_ref = None
if not project_id and not project_name:
raise exception.ValidationError(attribute='id or name',
target='project')
try:
if project_name:
if 'domain' not in project_info:
raise exception.ValidationError(attribute='domain',
target='project')
domain_ref = self._lookup_domain(project_info['domain'])
project_ref = self.assignment_api.get_project_by_name(
project_name, domain_ref['id'])
else:
project_ref = self.assignment_api.get_project(project_id)
except exception.ProjectNotFound as e:
LOG.exception(e)
raise exception.Unauthorized(e)
self._assert_project_is_enabled(project_ref)
return project_ref
def _lookup_trust(self, trust_info):
trust_id = trust_info.get('id')
if not trust_id:
raise exception.ValidationError(attribute='trust_id',
target='trust')
trust = self.trust_api.get_trust(trust_id)
if not trust:
raise exception.TrustNotFound(trust_id=trust_id)
return trust
def _validate_and_normalize_scope_data(self):
"""Validate and normalize scope data."""
if 'scope' not in self.auth:
return
if sum(['project' in self.auth['scope'],
'domain' in self.auth['scope'],
'OS-TRUST:trust' in self.auth['scope']]) != 1:
raise exception.ValidationError(
attribute='project, domain, or OS-TRUST:trust',
target='scope')
if 'project' in self.auth['scope']:
project_ref = self._lookup_project(self.auth['scope']['project'])
self._scope_data = (None, project_ref['id'], None)
elif 'domain' in self.auth['scope']:
domain_ref = self._lookup_domain(self.auth['scope']['domain'])
self._scope_data = (domain_ref['id'], None, None)
elif 'OS-TRUST:trust' in self.auth['scope']:
if not CONF.trust.enabled:
raise exception.Forbidden('Trusts are disabled.')
trust_ref = self._lookup_trust(
self.auth['scope']['OS-TRUST:trust'])
# TODO(ayoung): when trusts support domains, fill in domain data
if 'project_id' in trust_ref:
project_ref = self._lookup_project(
{'id': trust_ref['project_id']})
self._scope_data = (None, project_ref['id'], trust_ref)
else:
self._scope_data = (None, None, trust_ref)
def _validate_auth_methods(self):
if 'identity' not in self.auth:
raise exception.ValidationError(attribute='identity',
target='auth')
# make sure auth methods are provided
if 'methods' not in self.auth['identity']:
raise exception.ValidationError(attribute='methods',
target='identity')
# make sure all the method data/payload are provided
for method_name in self.get_method_names():
if method_name not in self.auth['identity']:
raise exception.ValidationError(attribute=method_name,
target='identity')
# make sure auth method is supported
for method_name in self.get_method_names():
if method_name not in AUTH_METHODS:
raise exception.AuthMethodNotSupported()
def _validate_and_normalize_auth_data(self):
"""Make sure "auth" is valid."""
# make sure "auth" exist
if not self.auth:
raise exception.ValidationError(attribute='auth',
target='request body')
self._validate_auth_methods()
self._validate_and_normalize_scope_data()
def get_method_names(self):
"""Returns the identity method names.
:returns: list of auth method names
"""
# Sanitizes methods received in request's body
# Filters out duplicates, while keeping elements' order.
method_names = []
for method in self.auth['identity']['methods']:
if method not in method_names:
method_names.append(method)
return method_names
def get_method_data(self, method):
"""Get the auth method payload.
:returns: auth method payload
"""
if method not in self.auth['identity']['methods']:
raise exception.ValidationError(attribute=method,
target='identity')
return self.auth['identity'][method]
def get_scope(self):
"""Get scope information.
Verify and return the scoping information.
:returns: (domain_id, project_id, trust_ref).
If scope to a project, (None, project_id, None)
will be returned.
If scoped to a domain, (domain_id, None, None)
will be returned.
If scoped to a trust, (None, project_id, trust_ref),
Will be returned, where the project_id comes from the
trust definition.
If unscoped, (None, None, None) will be returned.
"""
return self._scope_data
def set_scope(self, domain_id=None, project_id=None, trust=None):
"""Set scope information."""
if domain_id and project_id:
msg = _('Scoping to both domain and project is not allowed')
raise ValueError(msg)
if domain_id and trust:
msg = _('Scoping to both domain and trust is not allowed')
raise ValueError(msg)
if project_id and trust:
msg = _('Scoping to both project and trust is not allowed')
raise ValueError(msg)
self._scope_data = (domain_id, project_id, trust)
@dependency.requires('assignment_api', 'identity_api', 'token_api',
'token_provider_api', 'trust_api')
class Auth(controller.V3Controller):
# Note(atiwari): From V3 auth controller code we are
# calling protection() wrappers, so we need to setup
# the member_name and collection_name attributes of
# auth controller code.
# In the absence of these attributes, default 'entity'
# string will be used to represent the target which is
# generic. Policy can be defined using 'entity' but it
# would not reflect the exact entity that is in context.
# We are defining collection_name = 'tokens' and
# member_name = 'token' to facilitate policy decisions.
collection_name = 'tokens'
member_name = 'token'
def __init__(self, *args, **kw):
super(Auth, self).__init__(*args, **kw)
config.setup_authentication()
def authenticate_for_token(self, context, auth=None):
"""Authenticate user and issue a token."""
include_catalog = 'nocatalog' not in context['query_string']
try:
auth_info = AuthInfo.create(context, auth=auth)
auth_context = {'extras': {}, 'method_names': [], 'bind': {}}
self.authenticate(context, auth_info, auth_context)
if auth_context.get('access_token_id'):
auth_info.set_scope(None, auth_context['project_id'], None)
self._check_and_set_default_scoping(auth_info, auth_context)
(domain_id, project_id, trust) = auth_info.get_scope()
if trust:
self.trust_api.consume_use(trust['id'])
method_names = auth_info.get_method_names()
method_names += auth_context.get('method_names', [])
# make sure the list is unique
method_names = list(set(method_names))
expires_at = auth_context.get('expires_at')
# NOTE(morganfainberg): define this here so it is clear what the
# argument is during the issue_v3_token provider call.
metadata_ref = None
(token_id, token_data) = self.token_provider_api.issue_v3_token(
auth_context['user_id'], method_names, expires_at, project_id,
domain_id, auth_context, trust, metadata_ref, include_catalog)
return render_token_data_response(token_id, token_data,
created=True)
except exception.TrustNotFound as e:
raise exception.Unauthorized(e)
def _check_and_set_default_scoping(self, auth_info, auth_context):
(domain_id, project_id, trust) = auth_info.get_scope()
if trust:
project_id = trust['project_id']
if domain_id or project_id or trust:
# scope is specified
return
# Skip scoping when unscoped federated token is being issued
if federation.IDENTITY_PROVIDER in auth_context:
return
# fill in default_project_id if it is available
try:
user_ref = self.identity_api.get_user(auth_context['user_id'])
except exception.UserNotFound as e:
LOG.exception(e)
raise exception.Unauthorized(e)
default_project_id = user_ref.get('default_project_id')
if not default_project_id:
# User has no default project. He shall get an unscoped token.
return
# make sure user's default project is legit before scoping to it
try:
default_project_ref = self.assignment_api.get_project(
default_project_id)
default_project_domain_ref = self.assignment_api.get_domain(
default_project_ref['domain_id'])
if (default_project_ref.get('enabled', True) and
default_project_domain_ref.get('enabled', True)):
if self.assignment_api.get_roles_for_user_and_project(
user_ref['id'], default_project_id):
auth_info.set_scope(project_id=default_project_id)
else:
msg = _("User %(user_id)s doesn't have access to"
" default project %(project_id)s. The token will"
" be unscoped rather than scoped to the project.")
LOG.warning(msg,
{'user_id': user_ref['id'],
'project_id': default_project_id})
else:
msg = _("User %(user_id)s's default project %(project_id)s is"
" disabled. The token will be unscoped rather than"
" scoped to the project.")
LOG.warning(msg,
{'user_id': user_ref['id'],
'project_id': default_project_id})
except (exception.ProjectNotFound, exception.DomainNotFound):
# default project or default project domain doesn't exist,
# will issue unscoped token instead
msg = _("User %(user_id)s's default project %(project_id)s not"
" found. The token will be unscoped rather than"
" scoped to the project.")
LOG.warning(msg, {'user_id': user_ref['id'],
'project_id': default_project_id})
def authenticate(self, context, auth_info, auth_context):
"""Authenticate user."""
# user has been authenticated externally
if 'REMOTE_USER' in context['environment']:
external = get_auth_method('external')
external.authenticate(context, auth_info, auth_context)
# need to aggregate the results in case two or more methods
# are specified
auth_response = {'methods': []}
for method_name in auth_info.get_method_names():
method = get_auth_method(method_name)
resp = method.authenticate(context,
auth_info.get_method_data(method_name),
auth_context)
if resp:
auth_response['methods'].append(method_name)
auth_response[method_name] = resp
if auth_response["methods"]:
# authentication continuation required
raise exception.AdditionalAuthRequired(auth_response)
if 'user_id' not in auth_context:
msg = _('User not found')
raise exception.Unauthorized(msg)
@controller.protected()
def check_token(self, context):
token_id = context.get('subject_token_id')
token_data = self.token_provider_api.validate_v3_token(
token_id)
# NOTE(morganfainberg): The code in
# ``keystone.common.wsgi.render_response`` will remove the content
# body.
return render_token_data_response(token_id, token_data)
@controller.protected()
def revoke_token(self, context):
token_id = context.get('subject_token_id')
return self.token_provider_api.revoke_token(token_id)
@controller.protected()
def validate_token(self, context):
token_id = context.get('subject_token_id')
include_catalog = 'nocatalog' not in context['query_string']
token_data = self.token_provider_api.validate_v3_token(
token_id)
if not include_catalog and 'catalog' in token_data['token']:
del token_data['token']['catalog']
return render_token_data_response(token_id, token_data)
@controller.protected()
def revocation_list(self, context, auth=None):
if not CONF.token.revoke_by_id:
raise exception.Gone()
tokens = self.token_api.list_revoked_tokens()
for t in tokens:
expires = t['expires']
if not (expires and isinstance(expires, six.text_type)):
t['expires'] = timeutils.isotime(expires)
data = {'revoked': tokens}
json_data = json.dumps(data)
signed_text = cms.cms_sign_text(json_data,
CONF.signing.certfile,
CONF.signing.keyfile)
return {'signed': signed_text}
#FIXME(gyee): not sure if it belongs here or keystone.common. Park it here
# for now.
def render_token_data_response(token_id, token_data, created=False):
"""Render token data HTTP response.
Stash token ID into the X-Subject-Token header.
"""
headers = [('X-Subject-Token', token_id)]
if created:
status = (201, 'Created')
else:
status = (200, 'OK')
return wsgi.render_response(body=token_data,
status=status, headers=headers)
|
|
import math
import logging
from time import time
from collections import deque, namedtuple
# Based on a fork of Arduino PID AutoTune Library
# See https://github.com/t0mpr1c3/Arduino-PID-AutoTune-Library
class PIDAutotune(object):
"""Determines viable parameters for a PID controller.
Args:
setpoint (float): The target value.
out_step (float): The value by which the output will be
increased/decreased when stepping up/down.
sampletime (float): The interval between run() calls.
loockback (float): The reference period for local minima/maxima.
out_min (float): Lower output limit.
out_max (float): Upper output limit.
noiseband (float): Determines by how much the input value must
overshoot/undershoot the setpoint before the state changes.
time (function): A function which returns the current time in seconds.
"""
PIDParams = namedtuple('PIDParams', ['Kp', 'Ki', 'Kd'])
PEAK_AMPLITUDE_TOLERANCE = 0.05
STATE_OFF = 'off'
STATE_RELAY_STEP_UP = 'relay step up'
STATE_RELAY_STEP_DOWN = 'relay step down'
STATE_SUCCEEDED = 'succeeded'
STATE_FAILED = 'failed'
_tuning_rules = {
# rule: [Kp_divisor, Ki_divisor, Kd_divisor]
"ziegler-nichols": [34, 40, 160],
"tyreus-luyben": [44, 9, 126],
"ciancone-marlin": [66, 88, 162],
"pessen-integral": [28, 50, 133],
"some-overshoot": [60, 40, 60],
"no-overshoot": [100, 40, 60],
"brewing": [2.5, 6, 380]
}
def __init__(self, setpoint, out_step=10, sampletime=5, lookback=60,
out_min=float('-inf'), out_max=float('inf'), noiseband=0.5, time=time):
if setpoint is None:
raise ValueError('setpoint must be specified')
if out_step < 1:
raise ValueError('out_step must be greater or equal to 1')
if sampletime < 1:
raise ValueError('sampletime must be greater or equal to 1')
if lookback < sampletime:
raise ValueError('lookback must be greater or equal to sampletime')
if out_min >= out_max:
raise ValueError('out_min must be less than out_max')
self._time = time
self._logger = logging.getLogger(type(self).__name__)
self._inputs = deque(maxlen=round(lookback / sampletime))
self._sampletime = sampletime * 1000
self._setpoint = setpoint
self._outputstep = out_step
self._noiseband = noiseband
self._out_min = out_min
self._out_max = out_max
self._state = PIDAutotune.STATE_OFF
self._peak_timestamps = deque(maxlen=5)
self._peaks = deque(maxlen=5)
self._output = 0
self._last_run_timestamp = 0
self._peak_type = 0
self._peak_count = 0
self._initial_output = 0
self._induced_amplitude = 0
self._Ku = 0
self._Pu = 0
@property
def state(self):
"""Get the current state."""
return self._state
@property
def output(self):
"""Get the last output value."""
return self._output
@property
def tuning_rules(self):
"""Get a list of all available tuning rules."""
return self._tuning_rules.keys()
def get_pid_parameters(self, tuning_rule='ziegler-nichols'):
"""Get PID parameters.
Args:
tuning_rule (str): Sets the rule which should be used to calculate
the parameters.
"""
divisors = self._tuning_rules[tuning_rule]
kp = self._Ku / divisors[0]
ki = kp / (self._Pu / divisors[1])
kd = kp * (self._Pu / divisors[2])
return PIDAutotune.PIDParams(kp, ki, kd)
def run(self, input_val):
"""To autotune a system, this method must be called periodically.
Args:
input_val (float): The input value.
Returns:
`true` if tuning is finished, otherwise `false`.
"""
now = self._time() * 1000
if (self._state == PIDAutotune.STATE_OFF
or self._state == PIDAutotune.STATE_SUCCEEDED
or self._state == PIDAutotune.STATE_FAILED):
self._initTuner(input_val, now)
elif (now - self._last_run_timestamp) < self._sampletime:
return False
self._last_run_timestamp = now
# check input and change relay state if necessary
if (self._state == PIDAutotune.STATE_RELAY_STEP_UP
and input_val > self._setpoint + self._noiseband):
self._state = PIDAutotune.STATE_RELAY_STEP_DOWN
self._logger.debug('switched state: {0}'.format(self._state))
self._logger.debug('input: {0}'.format(input_val))
elif (self._state == PIDAutotune.STATE_RELAY_STEP_DOWN
and input_val < self._setpoint - self._noiseband):
self._state = PIDAutotune.STATE_RELAY_STEP_UP
self._logger.debug('switched state: {0}'.format(self._state))
self._logger.debug('input: {0}'.format(input_val))
# set output
if (self._state == PIDAutotune.STATE_RELAY_STEP_UP):
self._output = self._initial_output + self._outputstep
elif self._state == PIDAutotune.STATE_RELAY_STEP_DOWN:
self._output = self._initial_output - self._outputstep
# respect output limits
self._output = min(self._output, self._out_max)
self._output = max(self._output, self._out_min)
# identify peaks
is_max = True
is_min = True
for val in self._inputs:
is_max = is_max and (input_val >= val)
is_min = is_min and (input_val <= val)
self._inputs.append(input_val)
# we don't want to trust the maxes or mins until the input array is full
if len(self._inputs) < self._inputs.maxlen:
return False
# increment peak count and record peak time for maxima and minima
inflection = False
# peak types:
# -1: minimum
# +1: maximum
if is_max:
if self._peak_type == -1:
inflection = True
self._peak_type = 1
elif is_min:
if self._peak_type == 1:
inflection = True
self._peak_type = -1
# update peak times and values
if inflection:
self._peak_count += 1
self._peaks.append(input_val)
self._peak_timestamps.append(now)
self._logger.debug('found peak: {0}'.format(input_val))
self._logger.debug('peak count: {0}'.format(self._peak_count))
# check for convergence of induced oscillation
# convergence of amplitude assessed on last 4 peaks (1.5 cycles)
self._induced_amplitude = 0
if inflection and (self._peak_count > 4):
abs_max = self._peaks[-2]
abs_min = self._peaks[-2]
for i in range(0, len(self._peaks) - 2):
self._induced_amplitude += abs(self._peaks[i] - self._peaks[i+1])
abs_max = max(self._peaks[i], abs_max)
abs_min = min(self._peaks[i], abs_min)
self._induced_amplitude /= 6.0
# check convergence criterion for amplitude of induced oscillation
amplitude_dev = ((0.5 * (abs_max - abs_min) - self._induced_amplitude)
/ self._induced_amplitude)
self._logger.debug('amplitude: {0}'.format(self._induced_amplitude))
self._logger.debug('amplitude deviation: {0}'.format(amplitude_dev))
if amplitude_dev < PIDAutotune.PEAK_AMPLITUDE_TOLERANCE:
self._state = PIDAutotune.STATE_SUCCEEDED
# if the autotune has not already converged
# terminate after 10 cycles
if self._peak_count >= 20:
self._output = 0
self._state = PIDAutotune.STATE_FAILED
return True
if self._state == PIDAutotune.STATE_SUCCEEDED:
self._output = 0
# calculate ultimate gain
self._Ku = 4.0 * self._outputstep / (self._induced_amplitude * math.pi)
# calculate ultimate period in seconds
period1 = self._peak_timestamps[3] - self._peak_timestamps[1]
period2 = self._peak_timestamps[4] - self._peak_timestamps[2]
self._Pu = 0.5 * (period1 + period2) / 1000.0
return True
return False
def _initTuner(self, inputValue, timestamp):
self._peak_type = 0
self._peak_count = 0
self._output = 0
self._initial_output = 0
self._Ku = 0
self._Pu = 0
self._inputs.clear()
self._peaks.clear()
self._peak_timestamps.clear()
self._peak_timestamps.append(timestamp)
self._state = PIDAutotune.STATE_RELAY_STEP_UP
|
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase
from test_framework.siphash import siphash256
from test_framework.script import CScript, OP_TRUE
XT_TWEAK = True
if XT_TWEAK:
print("\nXT tweaks enabled:")
print("We do on purpose not implement version = 2 (segwit).\n"
+ "All mentions of segwit are not segwit.\n\n")
'''
CompactBlocksTest -- test compact blocks (BIP 152)
Version 1 compact blocks are pre-segwit (txids)
Version 2 compact blocks are post-segwit (wtxids)
'''
# TestNode: A peer we use to send messages to bitcoind, and store responses.
class TestNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.last_sendcmpct = []
self.last_headers = None
self.last_inv = None
self.last_cmpctblock = None
self.block_announced = False
self.last_getdata = None
self.last_getheaders = None
self.last_getblocktxn = None
self.last_block = None
self.last_blocktxn = None
# Store the hashes of blocks we've seen announced.
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.set_announced_blockhashes = set()
def on_sendcmpct(self, conn, message):
self.last_sendcmpct.append(message)
def on_block(self, conn, message):
self.last_block = message
def on_cmpctblock(self, conn, message):
self.last_cmpctblock = message
self.block_announced = True
self.last_cmpctblock.header_and_shortids.header.calc_sha256()
self.set_announced_blockhashes.add(self.last_cmpctblock.header_and_shortids.header.sha256)
def on_headers(self, conn, message):
self.last_headers = message
self.block_announced = True
for x in self.last_headers.headers:
x.calc_sha256()
self.set_announced_blockhashes.add(x.sha256)
def on_inv(self, conn, message):
self.last_inv = message
for x in self.last_inv.inv:
if x.type == 2:
self.block_announced = True
self.set_announced_blockhashes.add(x.hash)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_getheaders(self, conn, message):
self.last_getheaders = message
def on_getblocktxn(self, conn, message):
self.last_getblocktxn = message
def on_blocktxn(self, conn, message):
self.last_blocktxn = message
# Requires caller to hold mininode_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_inv = None
self.last_headers = None
self.last_cmpctblock = None
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
assert(wait_until(self.received_block_announcement, timeout=30))
assert(self.received_block_announcement())
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
# received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.set_announced_blockhashes)
return wait_until(received_hash, timeout=timeout)
class CompactBlocksTest(BitcoinTestFramework):
def __init__(self):
super(CompactBlocksTest, self).__init__()
self.setup_clean_chain = True
# Node0 = pre-segwit, node1 = segwit-aware
self.num_nodes = 2
self.utxos = []
def setup_network(self):
self.nodes = []
# Start up node0 to be a version 1, pre-segwit node.
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
[["-debug", "-logtimemicros=1", "-bip9params=segwit:0:0"],
["-debug", "-logtimemicros", "-txindex"]])
connect_nodes(self.nodes[0], 1)
def build_block_on_tip(self, node, segwit=False):
height = node.getblockcount()
tip = node.getbestblockhash()
mtp = node.getblockheader(tip)['mediantime']
block = create_block(int(tip, 16), create_coinbase(absoluteHeight = height + 1), mtp + 1)
block.nVersion = 4
if segwit:
add_witness_commitment(block)
block.solve()
return block
# Create 10 more anyone-can-spend utxo's for testing.
def make_utxos(self):
# Doesn't matter which node we use, just use node0.
block = self.build_block_on_tip(self.nodes[0])
self.test_node.send_and_ping(msg_block(block))
assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256)
self.nodes[0].generate(100)
total_value = block.vtx[0].vout[0].nValue
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
for i in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
block2 = self.build_block_on_tip(self.nodes[0])
block2.vtx.append(tx)
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.solve()
self.test_node.send_and_ping(msg_block(block2))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
return
# Test "sendcmpct" (between peers preferring the same version):
# - No compact block announcements unless sendcmpct is sent.
# - If sendcmpct is sent with version > preferred_version, the message is ignored.
# - If sendcmpct is sent with boolean 0, then block announcements are not
# made with compact blocks.
# - If sendcmpct is then sent with boolean 1, then new block announcements
# are made with compact blocks.
# If old_node is passed in, request compact blocks with version=preferred-1
# and verify that it receives block announcements via compact block.
def test_sendcmpct(self, node, test_node, preferred_version, old_node=None):
if XT_TWEAK:
# We never prefer segwit.
preferred_version = 1
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
got_message = wait_until(received_sendcmpct, timeout=30)
assert(received_sendcmpct())
assert(got_message)
with mininode_lock:
# Check that the first version received is the preferred one
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
# And that we receive versions down to 1.
assert_equal(test_node.last_sendcmpct[-1].version, 1)
test_node.last_sendcmpct = []
tip = int(node.getbestblockhash(), 16)
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
peer.wait_for_block_announcement(block_hash, timeout=30)
assert(peer.block_announced)
assert(got_message)
with mininode_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_cmpctblock, peer.last_inv))
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None)
# Try one more time, this time after requesting headers.
test_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None and p.last_inv is not None)
# Test a few ways of using sendcmpct that should NOT
# result in compact block announcements.
# Before each test, sync the headers chain.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with too-high version
sendcmpct = msg_sendcmpct()
sendcmpct.version = preferred_version+1
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with valid version, but announce=False
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Finally, try a SENDCMPCT message with announce=True
sendcmpct.version = preferred_version
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
# Try one more time (no headers sync should be needed!)
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
# Try one more time, after turning on sendheaders
test_node.send_and_ping(msg_sendheaders())
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
# Try one more time, after sending a version-1, announce=false message.
sendcmpct.version = preferred_version-1
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
# Now turn off announcements
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None and p.last_headers is not None)
if old_node is not None:
# Verify that a peer using an older protocol version can receive
# announcements from this node.
sendcmpct.version = preferred_version-1
if XT_TWEAK:
sendcmpct.version = 1
sendcmpct.announce = True
old_node.send_and_ping(sendcmpct)
# Header sync
old_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, old_node, lambda p: p.last_cmpctblock is not None)
# This test actually causes bitcoind to (reasonably!) disconnect us, so do this last.
def test_invalid_cmpctblock_message(self):
self.nodes[0].generate(101)
block = self.build_block_on_tip(self.nodes[0])
cmpct_block = P2PHeaderAndShortIDs()
cmpct_block.header = CBlockHeader(block)
cmpct_block.prefilled_txn_length = 1
# This index will be too high
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
cmpct_block.prefilled_txn = [prefilled_txn]
self.test_node.send_and_ping(msg_cmpctblock(cmpct_block))
assert(int(self.nodes[0].getbestblockhash(), 16) == block.hashPrevBlock)
# Compare the generated shortids to what we expect based on BIP 152, given
# bitcoind's choice of nonce.
def test_compactblock_construction(self, node, test_node, version, use_witness_address):
if XT_TWEAK:
version = 1
use_witness_address = False
# Generate a bunch of transactions.
node.generate(101)
num_transactions = 25
address = node.getnewaddress()
if use_witness_address:
# Want at least one segwit spend, so move all funds to
# a witness address.
address = node.addwitnessaddress(address)
value_to_send = node.getbalance()
node.sendtoaddress(address, satoshi_round(value_to_send-Decimal(0.1)))
node.generate(1)
segwit_tx_generated = False
for i in range(num_transactions):
txid = node.sendtoaddress(address, 0.1)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
if XT_TWEAK:
continue
if not tx.wit.is_null():
segwit_tx_generated = True
if use_witness_address:
assert(segwit_tx_generated) # check that our test is not broken
# Wait until we've seen the block announcement for the resulting tip
tip = int(node.getbestblockhash(), 16)
assert(test_node.wait_for_block_announcement(tip))
# Now mine a block, and look at the resulting compact block.
test_node.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
# Store the raw block in our internal format.
block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False))
[tx.calc_sha256() for tx in block.vtx]
block.rehash()
# Don't care which type of announcement came back for this test; just
# request the compact block if we didn't get one yet.
wait_until(test_node.received_block_announcement, timeout=30)
assert(test_node.received_block_announcement())
with mininode_lock:
if test_node.last_cmpctblock is None:
test_node.clear_block_announcement()
inv = CInv(4, block_hash) # 4 == "CompactBlock"
test_node.send_message(msg_getdata([inv]))
wait_until(test_node.received_block_announcement, timeout=30)
assert(test_node.received_block_announcement())
# Now we should have the compactblock
header_and_shortids = None
with mininode_lock:
assert(test_node.last_cmpctblock is not None)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_cmpctblock.header_and_shortids)
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
# Make sure the prefilled_txn appears to have included the coinbase
assert(len(header_and_shortids.prefilled_txn) >= 1)
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
# Check that all prefilled_txn entries match what's in the block.
for entry in header_and_shortids.prefilled_txn:
entry.tx.calc_sha256()
# This checks the non-witness parts of the tx agree
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
# And this checks the witness
if XT_TWEAK:
continue
wtxid = entry.tx.calc_sha256(True)
if version == 2:
assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))
else:
# Shouldn't have received a witness
assert(entry.tx.wit.is_null())
# Check that the cmpctblock message announced all the transactions.
assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
# And now check that all the shortids are as expected as well.
# Determine the siphash keys to use.
[k0, k1] = header_and_shortids.get_siphash_keys()
index = 0
while index < len(block.vtx):
if (len(header_and_shortids.prefilled_txn) > 0 and
header_and_shortids.prefilled_txn[0].index == index):
# Already checked prefilled transactions above
header_and_shortids.prefilled_txn.pop(0)
else:
tx_hash = block.vtx[index].sha256
if version == 2:
tx_hash = block.vtx[index].calc_sha256(True)
shortid = calculate_shortid(k0, k1, tx_hash)
assert_equal(shortid, header_and_shortids.shortids[0])
header_and_shortids.shortids.pop(0)
index += 1
# Test that bitcoind requests compact blocks when we announce new blocks
# via header or inv, and that responding to getblocktxn causes the block
# to be successfully reconstructed.
# Post-segwit: upgraded nodes would only make this request of cb-version-2,
# NODE_WITNESS peers. Unupgraded nodes would still make this request of
# any cb-version-1-supporting peer.
def test_compactblock_requests(self, node, test_node, version, segwit):
# Try announcing a block with an inv or header, expect a compactblock
# request
if XT_TWEAK:
version = 1
segwit = False
for announce in ["inv", "header"]:
block = self.build_block_on_tip(node, segwit=segwit)
with mininode_lock:
test_node.last_getdata = None
if announce == "inv":
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
success = wait_until(lambda: test_node.last_getheaders is not None, timeout=30)
assert(success)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
success = wait_until(lambda: test_node.last_getdata is not None, timeout=30)
assert(success)
assert_equal(len(test_node.last_getdata.inv), 1)
assert_equal(test_node.last_getdata.inv[0].type, 4)
assert_equal(test_node.last_getdata.inv[0].hash, block.sha256)
# Send back a compactblock message that omits the coinbase
comp_block = HeaderAndShortIDs()
comp_block.header = CBlockHeader(block)
comp_block.nonce = 0
[k0, k1] = comp_block.get_siphash_keys()
coinbase_hash = block.vtx[0].sha256
if version == 2:
coinbase_hash = block.vtx[0].calc_sha256(True)
comp_block.shortids = [
calculate_shortid(k0, k1, coinbase_hash) ]
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
with mininode_lock:
assert(test_node.last_getblocktxn is not None)
absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute()
assert_equal(absolute_indexes, [0]) # should be a coinbase request
# Send the coinbase, and verify that the tip advances.
if version == 2:
msg = msg_witness_blocktxn()
else:
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = [block.vtx[0]]
test_node.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
# Create a chain of transactions from given utxo, and add to a new block.
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
for i in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE])))
tx.rehash()
utxo = [tx.sha256, 0, tx.vout[0].nValue]
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
# Test that we only receive getblocktxn requests for transactions that the
# node needs, and that responding to them causes the block to be
# reconstructed.
def test_getblocktxn_requests(self, node, test_node, version):
if XT_TWEAK:
version = 1
with_witness = (version==2)
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert(peer.last_getblocktxn is not None)
absolute_indexes = peer.last_getblocktxn.block_txn_request.to_absolute()
assert_equal(absolute_indexes, expected_result)
def test_tip_after_message(node, peer, msg, tip):
peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), tip)
# First try announcing compactblocks that won't reconstruct, and verify
# that we receive getblocktxn messages back.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
comp_block = HeaderAndShortIDs()
if XT_TWEAK:
comp_block.initialize_from_block(block)
else:
comp_block.initialize_from_block(block, use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
msg_bt = msg_blocktxn()
if with_witness:
msg_bt = msg_witness_blocktxn() # serialize with witnesses
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now try interspersing the prefilled transactions
if XT_TWEAK:
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5])
else:
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now try giving one transaction ahead of time.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
test_node.send_and_ping(msg_tx(block.vtx[1]))
assert(block.vtx[1].hash in node.getrawmempool())
# Prefill 4 out of the 6 transactions, and verify that only the one
# that was not in the mempool is requested.
if XT_TWEAK:
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4])
else:
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [5])
msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now provide all transactions to the node before the block is
# announced and verify reconstruction happens immediately.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
for tx in block.vtx[1:]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
# Clear out last request.
with mininode_lock:
test_node.last_getblocktxn = None
# Send compact block
if XT_TWEAK:
comp_block.initialize_from_block(block, prefill_list=[0])
else:
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
with mininode_lock:
# Shouldn't have gotten a request for any transaction
assert(test_node.last_getblocktxn is None)
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
# permanently failed.
def test_incorrect_blocktxn_response(self, node, test_node, version):
if XT_TWEAK:
version = 1
if (len(self.utxos) == 0):
self.make_utxos()
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Relay the first 5 transactions from the block in advance
for tx in block.vtx[1:6]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:6]:
assert(tx.hash in mempool)
# Send compact block
comp_block = HeaderAndShortIDs()
if XT_TWEAK:
comp_block.initialize_from_block(block, prefill_list=[0])
else:
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indexes = []
with mininode_lock:
assert(test_node.last_getblocktxn is not None)
absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute()
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
# Now give an incorrect response.
# Note that it's possible for bitcoind to be smart enough to know we're
# lying, since it could check to see if the shortid matches what we're
# sending, and eg disconnect us for misbehavior. If that behavior
# change were made, we could just modify this test by having a
# different peer provide the block further down, so that we're still
# verifying that the block isn't marked bad permanently. This is good
# enough for now.
msg = msg_blocktxn()
if version==2:
msg = msg_witness_blocktxn()
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
test_node.send_and_ping(msg)
# Tip should not have updated
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# We should receive a getdata request
success = wait_until(lambda: test_node.last_getdata is not None, timeout=10)
assert(success)
assert_equal(len(test_node.last_getdata.inv), 1)
assert(test_node.last_getdata.inv[0].type == 2 or test_node.last_getdata.inv[0].type == 2|MSG_WITNESS_FLAG)
assert_equal(test_node.last_getdata.inv[0].hash, block.sha256)
# Deliver the block
if version==2:
test_node.send_and_ping(msg_witness_block(block))
else:
test_node.send_and_ping(msg_block(block))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_getblocktxn_handler(self, node, test_node, version):
# bitcoind will not send blocktxn responses for blocks whose height is
# more than 10 blocks deep.
MAX_GETBLOCKTXN_DEPTH = 10
chain_height = node.getblockcount()
current_height = chain_height
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
block_hash = node.getblockhash(current_height)
block = FromHex(CBlock(), node.getblock(block_hash, False))
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
success = wait_until(lambda: test_node.last_blocktxn is not None, timeout=10)
assert(success)
[tx.calc_sha256() for tx in block.vtx]
with mininode_lock:
assert_equal(test_node.last_blocktxn.block_transactions.blockhash, int(block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
tx = test_node.last_blocktxn.block_transactions.transactions.pop(0)
tx.calc_sha256()
assert_equal(tx.sha256, block.vtx[index].sha256)
continue
if version == 1:
# Witnesses should have been stripped
assert(tx.wit.is_null())
else:
# Check that the witness matches
assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
test_node.last_blocktxn = None
current_height -= 1
# Next request should send a full block response, as we're past the
# allowed depth for a blocktxn response.
if XT_TWEAK:
# XT: We don't have a special limit for blocktxn responses
return
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
with mininode_lock:
test_node.last_block = None
test_node.last_blocktxn = None
test_node.send_and_ping(msg)
with mininode_lock:
test_node.last_block.block.calc_sha256()
assert_equal(test_node.last_block.block.sha256, int(block_hash, 16))
assert_equal(test_node.last_blocktxn, None)
def test_compactblocks_not_at_tip(self, node, test_node):
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
wait_until(test_node.received_block_announcement, timeout=30)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
success = wait_until(lambda: test_node.last_cmpctblock is not None, timeout=30)
assert(success)
test_node.clear_block_announcement()
node.generate(1)
wait_until(test_node.received_block_announcement, timeout=30)
test_node.clear_block_announcement()
with mininode_lock:
test_node.last_block = None
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
success = wait_until(lambda: test_node.last_block is not None, timeout=30)
assert(success)
with mininode_lock:
test_node.last_block.block.calc_sha256()
assert_equal(test_node.last_block.block.sha256, int(new_blocks[0], 16))
# Generate an old compactblock, and verify that it's not accepted.
cur_height = node.getblockcount()
hashPrevBlock = int(node.getblockhash(cur_height-5), 16)
block = self.build_block_on_tip(node)
block.hashPrevBlock = hashPrevBlock
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
tips = node.getchaintips()
found = False
for x in tips:
if x["hash"] == block.hash:
assert_equal(x["status"], "headers-only")
found = True
break
assert(found)
# Requesting this block via getblocktxn should silently fail
# (to avoid fingerprinting attacks).
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
with mininode_lock:
test_node.last_blocktxn = None
test_node.send_and_ping(msg)
with mininode_lock:
assert(test_node.last_blocktxn is None)
def activate_segwit(self, node):
node.generate(144*3)
if not XT_TWEAK:
assert_equal(get_bip9_status(node, "segwit")["status"], 'active')
def test_end_to_end_block_relay(self, node, listeners):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
[l.clear_block_announcement() for l in listeners]
# ToHex() won't serialize with witness, but this block has no witnesses
# anyway. TODO: repeat this test with witness tx's to a segwit node.
node.submitblock(ToHex(block))
for l in listeners:
wait_until(lambda: l.received_block_announcement(), timeout=30)
with mininode_lock:
for l in listeners:
assert(l.last_cmpctblock is not None)
l.last_cmpctblock.header_and_shortids.header.calc_sha256()
assert_equal(l.last_cmpctblock.header_and_shortids.header.sha256, block.sha256)
# Test that we don't get disconnected if we relay a compact block with valid header,
# but invalid transactions.
def test_invalid_tx_in_compactblock(self, node, test_node, use_segwit):
if XT_TWEAK:
use_segwit = False
assert(len(self.utxos))
utxo = self.utxos[0]
block = self.build_block_with_transactions(node, utxo, 5)
del block.vtx[3]
block.hashMerkleRoot = block.calc_merkle_root()
if use_segwit:
# If we're testing with segwit, also drop the coinbase witness,
# but include the witness commitment.
add_witness_commitment(block)
block.vtx[0].wit.vtxinwit = []
block.solve()
# Now send the compact block with all transactions prefilled, and
# verify that we don't get disconnected.
comp_block = HeaderAndShortIDs()
if XT_TWEAK:
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4])
else:
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit)
msg = msg_cmpctblock(comp_block.to_p2p())
test_node.send_and_ping(msg)
# Check that the tip didn't advance
assert(int(node.getbestblockhash(), 16) is not block.sha256)
test_node.sync_with_ping()
# Helper for enabling cb announcements
# Send the sendcmpct request and sync headers
def request_cb_announcements(self, peer, node, version):
if XT_TWEAK:
version = 1
tip = node.getbestblockhash()
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
msg = msg_sendcmpct()
msg.version = version
msg.announce = True
peer.send_and_ping(msg)
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode()
self.segwit_node = TestNode()
self.old_node = TestNode() # version 1 peer <--> segwit node
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
# In XT, this is just another regular node
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.segwit_node))
#connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
#self.segwit_node, services=NODE_NETWORK|NODE_WITNESS))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
self.old_node))#, services=NODE_NETWORK))
self.test_node.add_connection(connections[0])
self.segwit_node.add_connection(connections[1])
self.old_node.add_connection(connections[2])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
self.test_node.wait_for_verack()
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
print("Running tests, pre-segwit activation:")
print("\tTesting SENDCMPCT p2p message... ")
self.test_sendcmpct(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_sendcmpct(self.nodes[1], self.segwit_node, 2, old_node=self.old_node)
sync_blocks(self.nodes)
print("\tTesting compactblock construction...")
self.test_compactblock_construction(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
print("\tTesting compactblock requests... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
print("\tTesting getblocktxn requests...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
print("\tTesting getblocktxn handler...")
self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
sync_blocks(self.nodes)
print("\tTesting compactblock requests/announcements not at chain tip...")
self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node)
sync_blocks(self.nodes)
self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node)
self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node)
sync_blocks(self.nodes)
print("\tTesting handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
# End-to-end block relay tests
print("\tTesting end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[0], [self.segwit_node, self.test_node, self.old_node])
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
print("\tTesting handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, False)
# Advance to segwit activation
print ("\nAdvancing to segwit activation\n")
self.activate_segwit(self.nodes[1])
print ("Running tests, post-segwit activation...")
print("\tTesting compactblock construction...")
self.test_compactblock_construction(self.nodes[1], self.old_node, 1, True)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, True)
sync_blocks(self.nodes)
print("\tTesting compactblock requests (unupgraded node)... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, True)
print("\tTesting getblocktxn requests (unupgraded node)...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
# Need to manually sync node0 and node1, because post-segwit activation,
# node1 will not download blocks from node0.
print("\tSyncing nodes...")
if XT_TWEAK:
sync_blocks(self.nodes)
else:
# segwit never activates in XT
assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash())
while (self.nodes[0].getblockcount() > self.nodes[1].getblockcount()):
block_hash = self.nodes[0].getblockhash(self.nodes[1].getblockcount()+1)
self.nodes[1].submitblock(self.nodes[0].getblock(block_hash, False))
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
print("\tTesting compactblock requests (segwit node)... ")
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, True)
print("\tTesting getblocktxn requests (segwit node)...")
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
print("\tTesting getblocktxn handler (segwit node should return witnesses)...")
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
# Test that if we submitblock to node1, we'll get a compact block
# announcement to all peers.
# (Post-segwit activation, blocks won't propagate from node0 to node1
# automatically, so don't bother testing a block announced to node0.)
print("\tTesting end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
print("\tTesting handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, True)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, True)
print("\tTesting invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
if __name__ == '__main__':
CompactBlocksTest().main()
|
|
"""
These functions are mostly related to transitioning data from the Zotero app
to the IsisData app.
TODO: many of these functions could use refactoring, or at least modularizing
for easier testing.
"""
from __future__ import unicode_literals
from django.db.models import Q
from isisdata.models import *
from zotero.models import *
import iso8601
def _record_history_message(request, accession):
template = u'Created from Zotero accession {0}, performed at {1} by {2}.' \
+ u' Subsequently ingested by {3}.'
values = (
accession.id,
accession.imported_on,
accession.imported_by,
request.user.username
)
return template.format(*values)
def ingest_accession(request, accession):
"""
Move all constituents of an :class:`.ImportAccession` into the production
database.
"""
ingested = [] # These will be production Citation instances.
ingested_draft_ids = [] # These will be DraftCitation ids.
for draftcitation in accession.citations_ok:
ingested.append(ingest_citation(request, accession, draftcitation))
ingested_draft_ids.append(draftcitation.id)
ingest_ccrelations(request, accession, ingested_draft_ids)
if accession.citations_remaining.count() == 0:
accession.processed = True
accession.save()
return ingested
def ingest_citation(request, accession, draftcitation):
# If the citation is already resolved, there is nothing to do here: we
# simply return the target of the resolution.
if draftcitation.resolutions.count() > 0:
return draftcitation.resolutions.first().to_instance
citation_fields = [
('title', 'title'),
('description', 'description'),
('abstract', 'abstract'),
('type_controlled', 'type_controlled'),
('book_series', 'book_series'),
('physical_details', 'physical_details')
]
partdetails_fields = [
('page_start', 'page_begin'),
('page_end', 'page_end'),
('pages_free_text', 'pages_free_text'),
('issue', 'issue_free_text'),
('volume', 'volume_free_text'),
('extent', 'extent'),
('extent_note', 'extent_note'),
]
int_only_fields = dict([
('page_start', 'pages_free_text'),
('page_begin', 'pages_free_text'),
('page_end', 'pages_free_text'),
('extent', 'extent_note'),
])
# Gather fields that will be transferred to the production Citation.
citation_data = {}
for field, pfield in citation_fields:
value = getattr(draftcitation, field, None)
if value:
if field in int_only_fields:
try:
value = int(value)
except ValueError: # Not an int!'
citation_data[int_only_fields[pfield]] = value
continue
citation_data[pfield] = value
# Records are inactive/non-public by default. The record_history message
# provides information about the Zotero accession.
citation_data.update({
'_history_user': request.user,
'public': False,
'record_status_value': CuratedMixin.INACTIVE,
'record_status_explanation': u'Inactive by default',
'record_history': _record_history_message(request, accession),
'belongs_to': accession.ingest_to,
'zotero_accession': accession,
})
# Troll for data for PartDetails fields.
partdetails_data = {}
for field, pfield in partdetails_fields:
value = getattr(draftcitation, field)
if value:
if field in int_only_fields:
try:
value = int(value)
except ValueError: # Not an int!
partdetails_data[int_only_fields[pfield]] = value
continue
partdetails_data[pfield] = value
if partdetails_data:
partdetails = PartDetails.objects.create(**partdetails_data)
citation_data.update({'part_details': partdetails})
citation = Citation.objects.create(**citation_data)
InstanceResolutionEvent.objects.create(for_instance=draftcitation, to_instance=citation)
# ISISCB-749 Language should be preserved from Zotero records.
if draftcitation.language:
citation.language.add(draftcitation.language)
date = None
if draftcitation.publication_date:
if type(draftcitation.publication_date) in [str, str]:
try:
date = iso8601.parse_date(draftcitation.publication_date).date()
except iso8601.ParseError:
match = re.search('([0-9]{4})', draftcitation.publication_date)
if match:
date = match.groups()[0]
else:
date = None
elif type(draftcitation.publication_date) is datetime.datetime:
date = draftcitation.publication_date.date()
if date:
if type(date) in [str, str]:
date = iso8601.parse_date(date).date()
citation.publication_date = date
pubdatetype, _ = AttributeType.objects.get_or_create(
name='PublicationDate',
defaults={
'value_content_type': ContentType.objects.get_for_model(ISODateValue)
})
if type(date) in [datetime.datetime, datetime.date]:
value_freeform = date.year
elif type(date) in [str, str]:
value_freeform = date[:4]
attribute = Attribute.objects.create(
type_controlled=pubdatetype,
source=citation,
value_freeform=value_freeform
)
vvalue = ISODateValue.objects.create(
value=date,
attribute=attribute,
)
elif draftcitation.publication_date:
# If we cannot parse the publication date as an ISO8601 date, then we
# update the staff notes with the unparseable date so that it is not
# completely lost.
message= u'\nCould not parse publication date in Zotero metadata: %s'\
% draftcitation.publication_date
if citation.administrator_notes:
citation.administrator_notes += message
else:
citation.administrator_notes = message
citation.save()
for relation in draftcitation.authority_relations.all():
draft = relation.authority
try:
target = draft.resolutions.first().to_instance
except AttributeError: # No resolution target. We create a "headless"
target = None # ACRelation.
citation.record_history += u"\n\nThe attempt to match the name %s in the %s field was skipped." % (draft.name, relation.get_type_controlled_display())
citation.save()
if target:
target.zotero_accession = accession
target.save()
# Transfer any linkeddata from the DraftAuthority to the production
# Authority.
for draftlinkeddata in draft.linkeddata.all():
ldtype, _ = LinkedDataType.objects.get_or_create(name=draftlinkeddata.name.upper())
if not target.linkeddata_entries.filter(type_controlled=ldtype, universal_resource_name=draftlinkeddata.value):
LinkedData.objects.create(
subject = target,
universal_resource_name = draftlinkeddata.value,
type_controlled = ldtype
)
draft.linkeddata.all().update(processed=True)
# ISISCB-577 Created ACRelation records should be active by default.
acr_data = {
'_history_user': request.user,
'name_for_display_in_citation': draft.name,
'record_history': _record_history_message(request, accession),
'public': True,
'record_status_value': CuratedMixin.ACTIVE,
'record_status_explanation': u'Active by default',
'authority': target,
'citation': citation,
'type_controlled': relation.type_controlled,
'belongs_to': accession.ingest_to,
'zotero_accession': accession,
'data_display_order': relation.data_display_order,
}
acrelation = ACRelation.objects.create(**acr_data)
InstanceResolutionEvent.objects.create(
for_instance = relation,
to_instance = acrelation,
)
ld_created = set([])
for linkeddata in citation.linkeddata_entries.all():
ld_created.add(linkeddata.universal_resource_name)
for draftlinkeddata in draftcitation.linkeddata.all():
_key = draftlinkeddata.value
if _key in ld_created:
continue
ld_created.add(_key)
ldtype, _ = LinkedDataType.objects.get_or_create(name=draftlinkeddata.name.upper())
LinkedData.objects.create(
subject = citation,
universal_resource_name = draftlinkeddata.value,
type_controlled = ldtype
)
draftcitation.linkeddata.all().update(processed=True)
draftcitation.authority_relations.all().update(processed=True)
draftcitation.processed = True
draftcitation.save()
accession.save()
return citation
def ingest_ccrelations(request, accession, ingested):
"""
Ingest :class:`.DraftCCRelation` instances among "ready"
:class:`.DraftCitation`\s.
Parameters
----------
request
accession : :class:`.ImportAccession`
ingested : list
List of :class:`.DraftCitation` ids.
Returns
-------
None
"""
# Both source and target must be ingested, and no other resolution for this
# DraftCCRelation may exist.
query = Q(subject_id__in=ingested) & Q(object_id__in=ingested) & Q(resolutions=None)
for relation in accession.draftccrelation_set.filter(query):
draft_source = relation.subject
source = ingest_citation(request, accession, draft_source) # Get.
draft_target = relation.object
target = ingest_citation(request, accession, draft_target)
ccr_data = {
'_history_user': request.user,
'public': True,
'record_history': _record_history_message(request, accession),
'record_status_value': CuratedMixin.ACTIVE,
'record_status_explanation': u'Active by default',
'subject': source,
'object': target,
'type_controlled': relation.type_controlled,
'belongs_to': accession.ingest_to,
'zotero_accession': accession,
}
ccrelation = CCRelation.objects.create(**ccr_data)
InstanceResolutionEvent.objects.create(
for_instance = relation,
to_instance = ccrelation,
)
|
|
import os
import shutil
from biicode.client.exception import ClientException
from biicode.common.utils import file_utils
from biicode.common.find.policy import Policy
from biicode.client.store import hivedb
from biicode.common.utils.file_utils import save, load
from biicode.common.settings.settings import Settings
from biicode.client.workspace.bii_paths import (BiiPaths, DEP_DIR, BIN_DIR, BUILD_DIR, CMAKE_DIR,
LIB_DIR, BII_DIR, SRC_DIR)
from biicode.client.workspace.walk_block import walk_bii_folder, walk_anonymous_block
import fnmatch
from biicode.common.model.brl.block_name import BlockName
from biicode.common.edition.bii_config import BiiConfig
from biicode.common.model.blob import Blob
class HiveDiskImage(object):
"""Handle all actions related to hive in disk."""
def __init__(self, bii_paths, user_cache, biiout):
assert isinstance(bii_paths, BiiPaths)
self._bii_paths = bii_paths
self._user_cache = user_cache
self._biiout = biiout
self._hivedb = None
self._settings = None
self._policies = None
@property
def paths(self):
return self._bii_paths
def initialize(self):
self._hivedb = hivedb.factory(self._bii_paths.new_project_db)
self.settings
self.policies
def clean(self):
try:
self.hivedb.clean()
except Exception as e:
self._biiout.error("Unable to properly clean project DB:\n%s" % str(e))
protect = {os.path.normpath(self._bii_paths.get_by_name(p)): p for p in (BII_DIR, SRC_DIR)}
protect[self._bii_paths.project_root] = "project root"
for folder in [BUILD_DIR, BIN_DIR, CMAKE_DIR, DEP_DIR, LIB_DIR]:
try:
path_folder = os.path.normpath(self._bii_paths.get_by_name(folder))
if os.path.exists(path_folder):
if path_folder in protect:
self._biiout.warn("%s folder will not be cleaned" % folder)
self._biiout.warn("It matches the '%s' folder" % protect[path_folder])
else:
shutil.rmtree(path_folder)
except Exception as e:
self._biiout.error("Unable to delete %s folder\n%s" % (folder, str(e)))
@property
def settings(self):
""" Return Hive settings.
If settings.bii not present, creates and initialize a default hive settings.bii
"""
if self._settings is None:
settings_path = self._bii_paths.settings
if not os.path.exists(settings_path): # CREATE new hive settings file
settings = Settings() # empty settings, only OS information
save(settings_path, settings.dumps()) # save settings.bii
self._settings = settings
else: # LOAD existing settings.bii file
try:
self._settings = Settings.loads(load(settings_path))
except Exception as e:
raise ClientException('%s\nIn file %s'
% (str(e), settings_path.replace('\\', '/')))
return self._settings
@settings.setter
def settings(self, value):
"""Set hive settings and save.
:param value: new hive settings
:type value: biicode.common.settings.settings.Settings
"""
self._settings = value
save(self._bii_paths.settings, value.dumps())
@property
def policies(self):
if self._policies is None:
policies_path = self._bii_paths.policies
if not os.path.exists(policies_path):
policies = self._user_cache.default_policies
save(policies_path, policies)
else:
policies = load(policies_path)
# Migration to new simple policies.bii format
if policies.lstrip().startswith("# This is the file"):
self._biiout.warn("Upgrading your find policies to new format")
policies = self._user_cache.default_policies
save(policies_path, policies)
if "YOUR_USER_NAME" in policies:
user = self._user_cache.username
if user is not None:
policies = policies.replace("YOUR_USER_NAME", user)
save(policies_path, policies)
self._policies = Policy.loads(policies)
return self._policies
def create_new_block(self, block_name):
''' Creates block folders and main files if the language is specified'''
assert block_name
user, block = block_name.split('/') # Windows uses backslashes
# Create the block folder
new_block_path = os.path.join(self._bii_paths.blocks, user, block)
if not os.path.exists(new_block_path):
os.makedirs(new_block_path)
msg_succes = "Success: created {block_name} folder in your blocks directory!"
self._biiout.success(msg_succes.format(block_name=block_name))
else:
msg_info = "{block_name} folder already exists in your blocks directory"
self._biiout.info(msg_info.format(block_name=block_name))
return new_block_path
def create_new_file(self, block_path, file_name, content=''):
''' Create main files with Hello World templates '''
file_path = os.path.join(block_path, file_name)
save(file_path, content) # save method handles exceptions
msg_succes = 'Success: created {file_name} file in {path}'
self._biiout.success(msg_succes.format(file_name=file_name, path=block_path))
def delete_build_folder(self):
if os.path.exists(self._bii_paths.build):
shutil.rmtree(self._bii_paths.build)
os.makedirs(self._bii_paths.build)
@property
def hivedb(self):
"""Return HiveDB object."""
if self._hivedb is None:
self._hivedb = hivedb.factory(self._bii_paths.hivedb)
return self._hivedb
def close(self):
if self._hivedb is not None:
self._hivedb.disconnect()
self._hivedb = None
def update_root_block(self):
if self._bii_paths.auto_root_block:
bii_config_path = os.path.join(self._bii_paths.project_root, "biicode.conf")
parent = (None if not os.path.exists(bii_config_path) else
BiiConfig(Blob(load(bii_config_path)).text).parent)
if parent:
project_block = parent.block_name
else: # Get the root block name from user + folder
project_name = self._bii_paths.project_name
user = self._user_cache.username or "user"
project_block = BlockName("%s/%s" % (user, project_name))
self._bii_paths.root_block = project_block
def get_src_files(self):
""" scans the SRC_DIR to obtain a {BlockCellName: ByteLoad}
"""
#scan regular block folder
bii_ignore = self._user_cache.bii_ignore
result = walk_bii_folder(self._bii_paths.blocks, bii_ignore, self._biiout)
# check if the project root has to be scanned
self.update_root_block()
project_block = self._bii_paths.root_block
# scan project root
if project_block:
result_filter = {bcn: content for bcn, content in result.iteritems()
if bcn.block_name != project_block}
if len(result) != len(result_filter):
self._biiout.warn("Skipping %s block, it already exist in project root"
% project_block)
result = result_filter
anon = walk_anonymous_block(self._bii_paths, bii_ignore, self._biiout, project_block)
result.update(anon)
return result
def clean_hooks(self):
for folder in (self._bii_paths.blocks, self._bii_paths.deps):
for _, _, files in os.walk(folder):
for f in files:
if fnmatch.fnmatch(f, "bii*clean*hook*"):
return True
return False
@property
def disk_blocks(self):
"""Get the blocks based on disk, not in processed hive"""
result = {}
root_block = self._bii_paths.root_block
if os.path.exists(self._bii_paths.blocks):
for username in os.listdir(self._bii_paths.blocks):
for name in os.listdir(os.path.join(self._bii_paths.blocks, username)):
tmp_path = os.path.join(self._bii_paths.blocks, username, name)
if(os.path.isdir(tmp_path)):
block_name = BlockName("%s/%s" % (username, name))
if root_block == block_name:
self._biiout.warn("Skipping %s, it exists as root block" % root_block)
elif(os.listdir(tmp_path)): # If there is any file inside
result[block_name] = os.path.join(self._bii_paths.blocks, block_name)
if root_block:
result[root_block] = self._bii_paths.project_root
return result
def save(self, folder_name, files):
saved_blocks = set()
project_block = self._bii_paths.root_block
folder = self._bii_paths.get_src_folder(folder_name)
for disk_bcn, load in files.iteritems():
if disk_bcn.block_name == project_block:
filepath = os.path.join(self._bii_paths.project_root, disk_bcn.cell_name)
else:
filepath = os.path.join(folder, disk_bcn)
try:
file_content = file_utils.load(filepath)
except:
file_content = None
if file_content != load:
if folder_name == DEP_DIR and disk_bcn.block_name not in saved_blocks:
saved_blocks.add(disk_bcn.block_name)
self._biiout.info("Saving files from: %s" % disk_bcn.block_name)
file_utils.save(filepath, load)
def delete_removed(self, folder_name, current_block_cell_names, block_filter=None):
""" current_block_cell_names is the set of BlockCellNames currently in closure
Items not in closure, can be deleted
Params:
folder_name = SRC_DIR or DEP_DIR
current_block_cell_names = [BlockCellName]
block_filter = BlockName or None
if BlockName only files from that BlockName will be deleted
"""
bii_ignore = self._user_cache.bii_ignore
folder = self._bii_paths.get_src_folder(folder_name)
root_block = self._bii_paths.root_block
project_folder = self._bii_paths.project_root
# Files on biicode control (excluded ignored)
thefiles = walk_bii_folder(folder, bii_ignore, self._biiout)
if root_block and folder_name == SRC_DIR:
afiles = walk_anonymous_block(self._bii_paths, bii_ignore, self._biiout, root_block)
thefiles.update(afiles)
# Delete removed cells
for blockcellname in thefiles:
if ((not block_filter or blockcellname.block_name == block_filter) and
blockcellname not in current_block_cell_names):
if blockcellname.block_name == root_block:
filepath = os.path.join(project_folder, blockcellname.cell_name)
else:
filepath = os.path.join(folder, blockcellname)
os.unlink(filepath)
self._delete_empty_dirs(folder_name, block_filter)
def _delete_empty_dirs(self, folder_name, block_name=None):
folder = self._bii_paths.get_src_folder(folder_name)
if block_name is not None:
folder = os.path.join(folder, block_name)
for root, _, _ in os.walk(folder, topdown=False):
try:
os.rmdir(root)
except OSError:
pass # not empty
if block_name:
try:
os.rmdir(os.path.dirname(folder))
except OSError:
pass # not empty
|
|
#!/usr/bin/env python
"""Ninja toolchain abstraction for Microsoft compiler suite"""
import os
import subprocess
import toolchain
import vslocate
class MSVCToolchain(toolchain.Toolchain):
def initialize(self, project, archs, configs, includepaths, dependlibs, libpaths, variables, subninja):
#Local variable defaults
self.sdkpath = ''
self.toolchain = ''
self.includepaths = []
self.libpaths = libpaths
self.ccompiler = 'cl'
self.cxxcompiler = 'cl'
self.archiver = 'lib'
self.linker = 'link'
self.dller = 'dll'
#Command definitions (to generate assembly, add "/FAs /Fa$out.asm")
self.cccmd = '$toolchain$cc /showIncludes /I. $includepaths $moreincludepaths $cflags $carchflags $cconfigflags $cmoreflags /c $in /Fo$out /Fd$pdbpath /FS /nologo'
self.cxxcmd = '$toolchain$cxx /showIncludes /I. $includepaths $moreincludepaths $cxxflags $carchflags $cconfigflags $cmoreflags /c $in /Fo$out /Fd$pdbpath /FS /nologo'
self.ccdepfile = None
self.ccdeps = 'msvc'
self.arcmd = '$toolchain$ar $arflags $ararchflags $arconfigflags /NOLOGO /OUT:$out $in'
self.linkcmd = '$toolchain$link $libpaths $configlibpaths $linkflags $linkarchflags $linkconfigflags /DEBUG /NOLOGO /SUBSYSTEM:CONSOLE /DYNAMICBASE /NXCOMPAT /MANIFEST /MANIFESTUAC:\"level=\'asInvoker\' uiAccess=\'false\'\" /TLBID:1 /PDB:$pdbpath /OUT:$out $in $libs $archlibs $oslibs'
self.dllcmd = self.linkcmd + ' /DLL'
self.cflags = ['/D', '"' + project.upper() + '_COMPILE=1"', '/D', '"_UNICODE"', '/D', '"UNICODE"', '/std:c17', '/Zi', '/Oi', '/Oy-', '/GS-', '/Gy-', '/Qpar-', '/fp:fast', '/fp:except-', '/Zc:forScope', '/Zc:wchar_t', '/GR-', '/openmp-']
self.cwarnflags = ['/W4', '/WX', '/wd4201'] #Ignore nameless union/struct which is allowed in C11
self.cmoreflags = []
self.arflags = ['/ignore:4221'] #Ignore empty object file warning]
self.linkflags = ['/DEBUG']
self.oslibs = ['kernel32', 'user32', 'shell32', 'advapi32']
self.initialize_subninja(subninja)
self.initialize_archs(archs)
self.initialize_configs(configs)
self.initialize_project(project)
self.initialize_toolchain()
self.initialize_depends(dependlibs)
self.parse_default_variables(variables)
self.read_build_prefs()
self.includepaths = self.prefix_includepaths((includepaths or []) + ['.'])
if self.is_monolithic():
self.cflags += ['/D', '"BUILD_MONOLITHIC=1"']
if not 'nowarning' in variables or not variables['nowarning']:
self.cflags += self.cwarnflags
self.cxxflags = list(self.cflags)
#Overrides
self.objext = '.obj'
#Builders
self.builders['c'] = self.builder_cc
self.builders['cc'] = self.builder_cxx
self.builders['cpp'] = self.builder_cxx
self.builders['lib'] = self.builder_lib
self.builders['multilib'] = self.builder_multicopy
self.builders['sharedlib'] = self.builder_sharedlib
self.builders['multisharedlib'] = self.builder_multicopy
self.builders['bin'] = self.builder_bin
self.builders['multibin'] = self.builder_multicopy
#Setup toolchain
self.build_toolchain()
def name(self):
return 'msvc'
def parse_prefs(self, prefs):
super(MSVCToolchain, self).parse_prefs(prefs)
if 'msvc' in prefs:
msvcprefs = prefs['msvc']
if 'sdkpath' in msvcprefs:
self.sdkpath = msvcprefs['sdkpath']
if 'toolchain' in msvcprefs:
self.toolchain = msvcprefs['toolchain']
if 'toolchain_version' in msvcprefs:
self.toolchain_version = msvcprefs['toolchain_version']
def write_variables(self, writer):
super(MSVCToolchain, self).write_variables(writer)
writer.variable('cc', self.ccompiler)
writer.variable('cxx', self.cxxcompiler)
writer.variable('ar', self.archiver)
writer.variable('link', self.linker)
writer.variable('dll', self.dller)
writer.variable('toolchain', self.toolchain)
writer.variable('includepaths', self.make_includepaths(self.includepaths))
writer.variable('moreincludepaths', '')
writer.variable('pdbpath', 'ninja.pdb')
writer.variable('cflags', self.cflags)
writer.variable('carchflags', '')
writer.variable('cconfigflags', '')
writer.variable('cxxflags', self.cxxflags)
writer.variable('cmoreflags', self.cmoreflags)
writer.variable('arflags', self.arflags)
writer.variable('ararchflags', '')
writer.variable('arconfigflags', '')
writer.variable('linkflags', self.linkflags)
writer.variable('linkarchflags', '')
writer.variable('linkconfigflags', '')
writer.variable('libs', '')
writer.variable('libpaths', self.make_libpaths(self.libpaths))
writer.variable('configlibpaths', '')
writer.variable('archlibs', '')
writer.variable('oslibs', self.make_libs(self.oslibs))
writer.newline()
def write_rules(self, writer):
super(MSVCToolchain, self).write_rules(writer)
writer.rule('cc', command = self.cccmd, depfile = self.ccdepfile, deps = self.ccdeps, description = 'CC $in')
writer.rule('cxx', command = self.cxxcmd, depfile = self.ccdepfile, deps = self.ccdeps, description = 'CXX $in')
writer.rule('ar', command = self.arcmd, description = 'LIB $out')
writer.rule('link', command = self.linkcmd, description = 'LINK $out')
writer.rule('dll', command = self.dllcmd, description = 'DLL $out')
writer.newline()
def build_toolchain(self):
if self.toolchain == '':
installed_versions = vslocate.get_vs_installations()
for versionstr, installpath in installed_versions:
major_version = versionstr.split('.')[0]
if int(major_version) >= 15:
tools_basepath = os.path.join(installpath, 'VC', 'Tools', 'MSVC')
tools_list = [item for item in os.listdir(tools_basepath) if os.path.isdir(os.path.join(tools_basepath, item))]
from distutils.version import StrictVersion
tools_list.sort(key=StrictVersion)
self.toolchain = os.path.join(tools_basepath, tools_list[-1])
self.toolchain_version = major_version + ".0"
break
if self.toolchain == '':
toolchain = ''
versions = ['17.0', '16.0', '15.0']
keys = [
'HKLM\\SOFTWARE\\Microsoft\\VisualStudio\\SxS\\VC7',
'HKCU\\SOFTWARE\\Microsoft\\VisualStudio\\SxS\\VC7',
'HKLM\\SOFTWARE\\Microsoft\\VisualStudio\\SxS\\VS7',
'HKCU\\SOFTWARE\\Microsoft\\VisualStudio\\SxS\\VS7',
'HKLM\\SOFTWARE\\Wow6432Node\\Microsoft\\VisualStudio\\SxS\\VC7',
'HKCU\\SOFTWARE\\Wow6432Node\\Microsoft\\VisualStudio\\SxS\\VC7',
'HKLM\\SOFTWARE\\Wow6432Node\\Microsoft\\VisualStudio\\SxS\\VS7',
'HKCU\\SOFTWARE\\Wow6432Node\\Microsoft\\VisualStudio\\SxS\\VS7'
]
for version in versions:
for key in keys:
try:
query = subprocess.check_output(['reg', 'query', key, '/v', version ], stderr = subprocess.STDOUT).strip().splitlines()
if len(query) == 2:
toolchain = str(query[1]).split('REG_SZ')[-1].strip(" '\"\n\r\t")
except:
continue
if not toolchain == '':
tools_basepath = os.path.join(toolchain, 'VC', 'Tools', 'MSVC')
tools_list = [item for item in os.listdir(tools_basepath) if os.path.isdir(os.path.join(tools_basepath, item))]
from distutils.version import StrictVersion
tools_list.sort(key=StrictVersion)
toolchain = os.path.join(tools_basepath, tools_list[-1])
self.toolchain = toolchain
self.toolchain_version = version
break
if not self.toolchain == '':
break
if self.toolchain == '':
raise Exception("Unable to locate any installed Visual Studio toolchain")
self.includepaths += [os.path.join(self.toolchain, 'include')]
if self.sdkpath == '':
versions = ['v10.0', 'v8.1']
keys = [
'HKLM\\SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows',
'HKCU\\SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows',
'HKLM\\SOFTWARE\\Wow6432Node\\Microsoft\\Microsoft SDKs\\Windows',
'HKCU\\SOFTWARE\\Wow6432Node\\Microsoft\\Microsoft SDKs\\Windows'
]
include_path = 'include'
for version in versions:
for key in keys:
sdkpath = ''
try:
query = subprocess.check_output(['reg', 'query', key + '\\' + version, '/v', 'InstallationFolder'], stderr = subprocess.STDOUT).strip().splitlines()
if len(query) == 2:
sdkpath = str(query[1]).split('REG_SZ')[-1].strip(" '\"\n\r\t")
if not sdkpath == '' and version == 'v10.0':
base_path = sdkpath
sdkpath = ''
query = subprocess.check_output(['reg', 'query', key + '\\' + version, '/v', 'ProductVersion'], stderr = subprocess.STDOUT).strip().splitlines()
if len(query) == 2:
version_path = str(query[1]).split('REG_SZ')[-1].strip(" '\"\n\r\t")
if not version_path == '':
sdkpath = base_path
self.sdkversionpath = version_path
versioned_include_path = os.path.join(include_path, self.sdkversionpath)
if not os.path.exists(os.path.join(sdkpath, versioned_include_path)) and os.path.exists(os.path.join(sdkpath, versioned_include_path + '.0')):
self.sdkversionpath = self.sdkversionpath + '.0'
versioned_include_path = os.path.join(include_path, self.sdkversionpath)
include_path = versioned_include_path
except subprocess.CalledProcessError as e:
continue
if not sdkpath == '':
self.includepaths += [
os.path.join(sdkpath, include_path, 'shared'),
os.path.join(sdkpath, include_path, 'um'),
os.path.join(sdkpath, include_path, 'winrt')
]
if version == 'v10.0':
self.includepaths += [
os.path.join(sdkpath, include_path, 'ucrt')
]
self.sdkpath = sdkpath
self.sdkversion = version
break
if not sdkpath == '':
break
if self.toolchain != '' and not self.toolchain.endswith('/') and not self.toolchain.endswith('\\'):
self.toolchain += os.sep
def make_includepaths(self, includepaths):
if not includepaths is None:
return ['/I' + self.path_escape(path) for path in list(includepaths)]
return []
def make_libpath(self, path):
return self.path_escape(path)
def make_libpaths(self, libpaths):
if not libpaths is None:
return ['/LIBPATH:' + self.make_libpath(path) for path in libpaths]
return []
def make_arch_toolchain_path(self, arch):
if arch == 'x86-64':
return os.path.join(self.toolchain, 'bin', 'HostX64', 'x64\\')
elif arch == 'x86':
return os.path.join(self.toolchain, 'bin', 'HostX64', 'x86\\')
return os.path.join(self.toolchain, 'bin\\')
def make_carchflags(self, arch, targettype):
flags = []
if targettype == 'sharedlib':
flags += ['/MD', '/D', '"BUILD_DYNAMIC_LINK=1"']
else:
flags += ['/MT']
if arch == 'x86':
flags += ['/arch:SSE2']
elif arch == 'x86-64':
pass
return flags
def make_cconfigflags(self, config, targettype):
flags = ['/Gm-']
if config == 'debug':
flags += ['/Od', '/D', '"BUILD_DEBUG=1"', '/GF-']
else:
flags += ['/Ob2', '/Ot', '/GT', '/GL', '/GF']
if config == 'release':
flags += ['/O2', '/D' '"BUILD_RELEASE=1"']
elif config == 'profile':
flags += ['/Ox', '/D', '"BUILD_PROFILE=1"']
elif config == 'deploy':
flags += ['/Ox', '/D', '"BUILD_DEPLOY=1"']
return flags
def make_ararchflags(self, arch, targettype):
flags = []
if arch == 'x86':
flags += ['/MACHINE:X86']
elif arch == 'x86-64':
flags += ['/MACHINE:X64']
return flags
def make_arconfigflags(self, config, targettype):
flags = []
if config != 'debug':
flags += ['/LTCG']
return flags
def make_linkarchflags(self, arch, targettype):
flags = []
if arch == 'x86':
flags += ['/MACHINE:X86']
elif arch == 'x86-64':
flags += ['/MACHINE:X64']
return flags
def make_linkconfigflags(self, config, targettype):
flags = []
if config == 'debug':
flags += ['/INCREMENTAL']
else:
flags += ['/LTCG', '/INCREMENTAL:NO', '/OPT:REF', '/OPT:ICF']
return flags
def make_libs(self, libs):
if libs != None:
return [lib + '.lib' for lib in libs]
return []
def make_configlibpaths(self, config, arch, extralibpaths):
libpaths = [
self.libpath,
os.path.join(self.libpath, arch),
os.path.join(self.libpath, config),
os.path.join(self.libpath, config, arch)
]
if extralibpaths != None:
libpaths += [os.path.join(libpath, self.libpath) for libpath in extralibpaths]
libpaths += [os.path.join(libpath, self.libpath, arch) for libpath in extralibpaths]
libpaths += [os.path.join(libpath, self.libpath, config) for libpath in extralibpaths]
libpaths += [os.path.join(libpath, self.libpath, config, arch) for libpath in extralibpaths]
if self.sdkpath != '':
if arch == 'x86':
libpaths += [os.path.join(self.toolchain, 'lib', 'x86')]
if self.sdkversion == 'v8.1':
libpaths += [os.path.join( self.sdkpath, 'lib', 'winv6.3', 'um', 'x86')]
if self.sdkversion == 'v10.0':
libpaths += [os.path.join(self.sdkpath, 'lib', self.sdkversionpath, 'um', 'x86')]
libpaths += [os.path.join(self.sdkpath, 'lib', self.sdkversionpath, 'ucrt', 'x86')]
else:
libpaths += [os.path.join( self.toolchain, 'lib', 'x64')]
if self.sdkversion == 'v8.1':
libpaths += [os.path.join( self.sdkpath, 'lib', 'winv6.3', 'um', 'x64')]
if self.sdkversion == 'v10.0':
libpaths += [os.path.join( self.sdkpath, 'lib', self.sdkversionpath, 'um', 'x64')]
libpaths += [os.path.join( self.sdkpath, 'lib', self.sdkversionpath, 'ucrt', 'x64')]
return self.make_libpaths(libpaths)
def cc_variables(self, config, arch, targettype, variables):
localvariables = [('toolchain', self.make_arch_toolchain_path(arch))]
if 'includepaths' in variables:
moreincludepaths = self.make_includepaths(variables['includepaths'])
if not moreincludepaths == []:
localvariables += [('moreincludepaths', moreincludepaths)]
if 'modulepath' in variables:
localvariables += [('pdbpath', os.path.join(variables['modulepath'], 'ninja.pdb'))]
carchflags = self.make_carchflags(arch, targettype)
if carchflags != []:
localvariables += [('carchflags', carchflags)]
cconfigflags = self.make_cconfigflags(config, targettype)
if cconfigflags != []:
localvariables += [('cconfigflags', cconfigflags)]
if 'defines' in variables:
definelist = []
for define in variables['defines']:
definelist += ['/D', '"' + define + '"']
localvariables += [('cmoreflags', definelist)]
return localvariables
def ar_variables(self, config, arch, targettype, variables):
localvariables = [('toolchain', self.make_arch_toolchain_path(arch))]
ararchflags = self.make_ararchflags(arch, targettype)
if ararchflags != []:
localvariables += [('ararchflags', ararchflags)]
arconfigflags = self.make_arconfigflags(config, targettype)
if arconfigflags != []:
localvariables += [('arconfigflags', arconfigflags)]
return localvariables
def link_variables(self, config, arch, targettype, variables):
localvariables = [('toolchain', self.make_arch_toolchain_path(arch))]
linkarchflags = self.make_linkarchflags(arch, targettype)
if linkarchflags != []:
localvariables += [('linkarchflags', linkarchflags)]
linkconfigflags = self.make_linkconfigflags(config, targettype)
if linkconfigflags != []:
localvariables += [('linkconfigflags', linkconfigflags)]
if 'modulepath' in variables:
localvariables += [('pdbpath', os.path.join(variables['modulepath'], 'ninja.pdb'))]
if 'libs' in variables:
libvar = self.make_libs(variables['libs'])
if libvar != []:
localvariables += [('libs', libvar)]
libpaths = []
if 'libpaths' in variables:
libpaths = variables['libpaths']
localvariables += [('configlibpaths', self.make_configlibpaths(config, arch, libpaths))]
return localvariables
def builder_cc(self, writer, config, arch, targettype, infile, outfile, variables):
return writer.build(outfile, 'cc', infile, implicit = self.implicit_deps(config, variables), variables = self.cc_variables(config, arch, targettype, variables))
def builder_cxx(self, writer, config, arch, targettype, infile, outfile, variables):
return writer.build(outfile, 'cxx', infile, implicit = self.implicit_deps(config, variables), variables = self.cc_variables(config, arch, targettype, variables))
def builder_lib(self, writer, config, arch, targettype, infiles, outfile, variables):
return writer.build(outfile, 'ar', infiles, implicit = self.implicit_deps(config, variables), variables = self.ar_variables(config, arch, targettype, variables))
def builder_sharedlib(self, writer, config, arch, targettype, infiles, outfile, variables):
return writer.build(outfile, 'dll', infiles, implicit = self.implicit_deps(config, variables), variables = self.link_variables(config, arch, targettype, variables))
def builder_bin(self, writer, config, arch, targettype, infiles, outfile, variables):
return writer.build(outfile, 'link', infiles, implicit = self.implicit_deps(config, variables), variables = self.link_variables(config, arch, targettype, variables))
def create(host, target, toolchain):
return MSVCToolchain(host, target, toolchain)
|
|
import getpass
import os
import re
import shutil
import signal
import time
import pytest
from unit.applications.lang.php import TestApplicationPHP
from unit.option import option
class TestPHPApplication(TestApplicationPHP):
prerequisites = {'modules': {'php': 'all'}}
def before_disable_functions(self):
body = self.get()['body']
assert re.search(r'time: \d+', body), 'disable_functions before time'
assert re.search(r'exec: \/\w+', body), 'disable_functions before exec'
def check_opcache(self):
resp = self.get()
assert resp['status'] == 200, 'status'
headers = resp['headers']
if 'X-OPcache' in headers and headers['X-OPcache'] == '-1':
pytest.skip('opcache is not supported')
return resp
def set_opcache(self, app, val):
assert 'success' in self.conf(
{"admin": {"opcache.enable": val, "opcache.enable_cli": val}},
'applications/' + app + '/options',
)
r = self.check_opcache()
assert r['headers']['X-OPcache'] == val, 'opcache value'
def set_preload(self, preload):
with open(option.temp_dir + '/php.ini', 'w') as f:
f.write(
"""opcache.preload = %(test_dir)s/php/opcache/preload\
/%(preload)s
opcache.preload_user = %(user)s
"""
% {
'test_dir': option.test_dir,
'preload': preload,
'user': option.user or getpass.getuser(),
}
)
assert 'success' in self.conf(
{"file": option.temp_dir + "/php.ini"},
'applications/opcache/options',
)
def test_php_application_variables(self):
self.load('variables')
body = 'Test body string.'
resp = self.post(
headers={
'Host': 'localhost',
'Content-Type': 'text/html',
'Custom-Header': 'blah',
'Connection': 'close',
},
body=body,
url='/index.php/blah?var=val',
)
assert resp['status'] == 200, 'status'
headers = resp['headers']
header_server = headers.pop('Server')
assert re.search(r'Unit/[\d\.]+', header_server), 'server header'
assert (
headers.pop('Server-Software') == header_server
), 'server software header'
date = headers.pop('Date')
assert date[-4:] == ' GMT', 'date header timezone'
assert (
abs(self.date_to_sec_epoch(date) - self.sec_epoch()) < 5
), 'date header'
if 'X-Powered-By' in headers:
headers.pop('X-Powered-By')
headers.pop('Content-type')
assert headers == {
'Connection': 'close',
'Content-Length': str(len(body)),
'Request-Method': 'POST',
'Path-Info': '/blah',
'Request-Uri': '/index.php/blah?var=val',
'Http-Host': 'localhost',
'Server-Protocol': 'HTTP/1.1',
'Custom-Header': 'blah',
}, 'headers'
assert resp['body'] == body, 'body'
def test_php_application_query_string(self):
self.load('query_string')
resp = self.get(url='/?var1=val1&var2=val2')
assert (
resp['headers']['Query-String'] == 'var1=val1&var2=val2'
), 'query string'
def test_php_application_query_string_empty(self):
self.load('query_string')
resp = self.get(url='/?')
assert resp['status'] == 200, 'query string empty status'
assert resp['headers']['Query-String'] == '', 'query string empty'
def test_php_application_fastcgi_finish_request(self, unit_pid):
self.load('fastcgi_finish_request')
assert 'success' in self.conf(
{"admin": {"auto_globals_jit": "1"}},
'applications/fastcgi_finish_request/options',
)
assert self.get()['body'] == '0123'
os.kill(unit_pid, signal.SIGUSR1)
errs = self.findall(r'Error in fastcgi_finish_request')
assert len(errs) == 0, 'no error'
def test_php_application_fastcgi_finish_request_2(self, unit_pid):
self.load('fastcgi_finish_request')
assert 'success' in self.conf(
{"admin": {"auto_globals_jit": "1"}},
'applications/fastcgi_finish_request/options',
)
resp = self.get(url='/?skip')
assert resp['status'] == 200
assert resp['body'] == ''
os.kill(unit_pid, signal.SIGUSR1)
errs = self.findall(r'Error in fastcgi_finish_request')
assert len(errs) == 0, 'no error'
def test_php_application_query_string_absent(self):
self.load('query_string')
resp = self.get()
assert resp['status'] == 200, 'query string absent status'
assert resp['headers']['Query-String'] == '', 'query string absent'
def test_php_application_phpinfo(self):
self.load('phpinfo')
resp = self.get()
assert resp['status'] == 200, 'status'
assert resp['body'] != '', 'body not empty'
def test_php_application_header_status(self):
self.load('header')
assert (
self.get(
headers={
'Host': 'localhost',
'Connection': 'close',
'X-Header': 'HTTP/1.1 404 Not Found',
}
)['status']
== 404
), 'status'
assert (
self.get(
headers={
'Host': 'localhost',
'Connection': 'close',
'X-Header': 'http/1.1 404 Not Found',
}
)['status']
== 404
), 'status case insensitive'
assert (
self.get(
headers={
'Host': 'localhost',
'Connection': 'close',
'X-Header': 'HTTP/ 404 Not Found',
}
)['status']
== 404
), 'status version empty'
def test_php_application_404(self):
self.load('404')
resp = self.get()
assert resp['status'] == 404, '404 status'
assert re.search(
r'<title>404 Not Found</title>', resp['body']
), '404 body'
def test_php_application_keepalive_body(self):
self.load('mirror')
assert self.get()['status'] == 200, 'init'
body = '0123456789' * 500
(resp, sock) = self.post(
headers={
'Host': 'localhost',
'Connection': 'keep-alive',
'Content-Type': 'text/html',
},
start=True,
body=body,
read_timeout=1,
)
assert resp['body'] == body, 'keep-alive 1'
body = '0123456789'
resp = self.post(
headers={
'Host': 'localhost',
'Connection': 'close',
'Content-Type': 'text/html',
},
sock=sock,
body=body,
)
assert resp['body'] == body, 'keep-alive 2'
def test_php_application_conditional(self):
self.load('conditional')
assert re.search(r'True', self.get()['body']), 'conditional true'
assert re.search(r'False', self.post()['body']), 'conditional false'
def test_php_application_get_variables(self):
self.load('get_variables')
resp = self.get(url='/?var1=val1&var2=&var3')
assert resp['headers']['X-Var-1'] == 'val1', 'GET variables'
assert resp['headers']['X-Var-2'] == '', 'GET variables 2'
assert resp['headers']['X-Var-3'] == '', 'GET variables 3'
assert resp['headers']['X-Var-4'] == 'not set', 'GET variables 4'
def test_php_application_post_variables(self):
self.load('post_variables')
resp = self.post(
headers={
'Content-Type': 'application/x-www-form-urlencoded',
'Host': 'localhost',
'Connection': 'close',
},
body='var1=val1&var2=',
)
assert resp['headers']['X-Var-1'] == 'val1', 'POST variables'
assert resp['headers']['X-Var-2'] == '', 'POST variables 2'
assert resp['headers']['X-Var-3'] == 'not set', 'POST variables 3'
def test_php_application_cookies(self):
self.load('cookies')
resp = self.get(
headers={
'Cookie': 'var=val; var2=val2',
'Host': 'localhost',
'Connection': 'close',
}
)
assert resp['headers']['X-Cookie-1'] == 'val', 'cookie'
assert resp['headers']['X-Cookie-2'] == 'val2', 'cookie'
def test_php_application_ini_precision(self):
self.load('ini_precision')
assert self.get()['headers']['X-Precision'] != '4', 'ini value default'
assert 'success' in self.conf(
{"file": "ini/php.ini"}, 'applications/ini_precision/options'
)
assert (
self.get()['headers']['X-File']
== option.test_dir + '/php/ini_precision/ini/php.ini'
), 'ini file'
assert self.get()['headers']['X-Precision'] == '4', 'ini value'
@pytest.mark.skip('not yet')
def test_php_application_ini_admin_user(self):
self.load('ini_precision')
assert 'error' in self.conf(
{"user": {"precision": "4"}, "admin": {"precision": "5"}},
'applications/ini_precision/options',
), 'ini admin user'
def test_php_application_ini_admin(self):
self.load('ini_precision')
assert 'success' in self.conf(
{"file": "ini/php.ini", "admin": {"precision": "5"}},
'applications/ini_precision/options',
)
assert (
self.get()['headers']['X-File']
== option.test_dir + '/php/ini_precision/ini/php.ini'
), 'ini file'
assert self.get()['headers']['X-Precision'] == '5', 'ini value admin'
def test_php_application_ini_user(self):
self.load('ini_precision')
assert 'success' in self.conf(
{"file": "ini/php.ini", "user": {"precision": "5"}},
'applications/ini_precision/options',
)
assert (
self.get()['headers']['X-File']
== option.test_dir + '/php/ini_precision/ini/php.ini'
), 'ini file'
assert self.get()['headers']['X-Precision'] == '5', 'ini value user'
def test_php_application_ini_user_2(self):
self.load('ini_precision')
assert 'success' in self.conf(
{"file": "ini/php.ini"}, 'applications/ini_precision/options'
)
assert self.get()['headers']['X-Precision'] == '4', 'ini user file'
assert 'success' in self.conf(
{"precision": "5"}, 'applications/ini_precision/options/user'
)
assert self.get()['headers']['X-Precision'] == '5', 'ini value user'
def test_php_application_ini_set_admin(self):
self.load('ini_precision')
assert 'success' in self.conf(
{"admin": {"precision": "5"}}, 'applications/ini_precision/options'
)
assert (
self.get(url='/?precision=6')['headers']['X-Precision'] == '5'
), 'ini set admin'
def test_php_application_ini_set_user(self):
self.load('ini_precision')
assert 'success' in self.conf(
{"user": {"precision": "5"}}, 'applications/ini_precision/options'
)
assert (
self.get(url='/?precision=6')['headers']['X-Precision'] == '6'
), 'ini set user'
def test_php_application_ini_repeat(self):
self.load('ini_precision')
assert 'success' in self.conf(
{"user": {"precision": "5"}}, 'applications/ini_precision/options'
)
assert self.get()['headers']['X-Precision'] == '5', 'ini value'
assert self.get()['headers']['X-Precision'] == '5', 'ini value repeat'
def test_php_application_disable_functions_exec(self):
self.load('time_exec')
self.before_disable_functions()
assert 'success' in self.conf(
{"admin": {"disable_functions": "exec"}},
'applications/time_exec/options',
)
body = self.get()['body']
assert re.search(r'time: \d+', body), 'disable_functions time'
assert not re.search(r'exec: \/\w+', body), 'disable_functions exec'
def test_php_application_disable_functions_comma(self):
self.load('time_exec')
self.before_disable_functions()
assert 'success' in self.conf(
{"admin": {"disable_functions": "exec,time"}},
'applications/time_exec/options',
)
body = self.get()['body']
assert not re.search(
r'time: \d+', body
), 'disable_functions comma time'
assert not re.search(
r'exec: \/\w+', body
), 'disable_functions comma exec'
def test_php_application_auth(self):
self.load('auth')
resp = self.get()
assert resp['status'] == 200, 'status'
assert resp['headers']['X-Digest'] == 'not set', 'digest'
assert resp['headers']['X-User'] == 'not set', 'user'
assert resp['headers']['X-Password'] == 'not set', 'password'
resp = self.get(
headers={
'Host': 'localhost',
'Authorization': 'Basic dXNlcjpwYXNzd29yZA==',
'Connection': 'close',
}
)
assert resp['status'] == 200, 'basic status'
assert resp['headers']['X-Digest'] == 'not set', 'basic digest'
assert resp['headers']['X-User'] == 'user', 'basic user'
assert resp['headers']['X-Password'] == 'password', 'basic password'
resp = self.get(
headers={
'Host': 'localhost',
'Authorization': 'Digest username="blah", realm="", uri="/"',
'Connection': 'close',
}
)
assert resp['status'] == 200, 'digest status'
assert (
resp['headers']['X-Digest'] == 'username="blah", realm="", uri="/"'
), 'digest digest'
assert resp['headers']['X-User'] == 'not set', 'digest user'
assert resp['headers']['X-Password'] == 'not set', 'digest password'
def test_php_application_auth_invalid(self):
self.load('auth')
def check_auth(auth):
resp = self.get(
headers={
'Host': 'localhost',
'Authorization': auth,
'Connection': 'close',
}
)
assert resp['status'] == 200, 'status'
assert resp['headers']['X-Digest'] == 'not set', 'Digest'
assert resp['headers']['X-User'] == 'not set', 'User'
assert resp['headers']['X-Password'] == 'not set', 'Password'
check_auth('Basic dXN%cjpwYXNzd29yZA==')
check_auth('Basic XNlcjpwYXNzd29yZA==')
check_auth('Basic DdXNlcjpwYXNzd29yZA==')
check_auth('Basic blah')
check_auth('Basic')
check_auth('Digest')
check_auth('blah')
def test_php_application_disable_functions_space(self):
self.load('time_exec')
self.before_disable_functions()
assert 'success' in self.conf(
{"admin": {"disable_functions": "exec time"}},
'applications/time_exec/options',
)
body = self.get()['body']
assert not re.search(
r'time: \d+', body
), 'disable_functions space time'
assert not re.search(
r'exec: \/\w+', body
), 'disable_functions space exec'
def test_php_application_disable_functions_user(self):
self.load('time_exec')
self.before_disable_functions()
assert 'success' in self.conf(
{"user": {"disable_functions": "exec"}},
'applications/time_exec/options',
)
body = self.get()['body']
assert re.search(r'time: \d+', body), 'disable_functions user time'
assert not re.search(
r'exec: \/\w+', body
), 'disable_functions user exec'
def test_php_application_disable_functions_nonexistent(self):
self.load('time_exec')
self.before_disable_functions()
assert 'success' in self.conf(
{"admin": {"disable_functions": "blah"}},
'applications/time_exec/options',
)
body = self.get()['body']
assert re.search(
r'time: \d+', body
), 'disable_functions nonexistent time'
assert re.search(
r'exec: \/\w+', body
), 'disable_functions nonexistent exec'
def test_php_application_disable_classes(self):
self.load('date_time')
assert re.search(
r'012345', self.get()['body']
), 'disable_classes before'
assert 'success' in self.conf(
{"admin": {"disable_classes": "DateTime"}},
'applications/date_time/options',
)
assert not re.search(
r'012345', self.get()['body']
), 'disable_classes before'
def test_php_application_disable_classes_user(self):
self.load('date_time')
assert re.search(
r'012345', self.get()['body']
), 'disable_classes before'
assert 'success' in self.conf(
{"user": {"disable_classes": "DateTime"}},
'applications/date_time/options',
)
assert not re.search(
r'012345', self.get()['body']
), 'disable_classes before'
def test_php_application_error_log(self):
self.load('error_log')
assert self.get()['status'] == 200, 'status'
time.sleep(1)
assert self.get()['status'] == 200, 'status 2'
pattern = r'\d{4}\/\d\d\/\d\d\s\d\d:.+\[notice\].+Error in application'
assert self.wait_for_record(pattern) is not None, 'errors print'
errs = self.findall(pattern)
assert len(errs) == 2, 'error_log count'
date = errs[0].split('[')[0]
date2 = errs[1].split('[')[0]
assert date != date2, 'date diff'
def test_php_application_script(self):
assert 'success' in self.conf(
{
"listeners": {"*:7080": {"pass": "applications/script"}},
"applications": {
"script": {
"type": self.get_application_type(),
"processes": {"spare": 0},
"root": option.test_dir + "/php/script",
"script": "phpinfo.php",
}
},
}
), 'configure script'
resp = self.get()
assert resp['status'] == 200, 'status'
assert resp['body'] != '', 'body not empty'
def test_php_application_index_default(self):
assert 'success' in self.conf(
{
"listeners": {"*:7080": {"pass": "applications/phpinfo"}},
"applications": {
"phpinfo": {
"type": self.get_application_type(),
"processes": {"spare": 0},
"root": option.test_dir + "/php/phpinfo",
}
},
}
), 'configure index default'
resp = self.get()
assert resp['status'] == 200, 'status'
assert resp['body'] != '', 'body not empty'
def test_php_application_extension_check(self, temp_dir):
self.load('phpinfo')
assert self.get(url='/index.wrong')['status'] != 200, 'status'
new_root = temp_dir + "/php"
os.mkdir(new_root)
shutil.copy(option.test_dir + '/php/phpinfo/index.wrong', new_root)
assert 'success' in self.conf(
{
"listeners": {"*:7080": {"pass": "applications/phpinfo"}},
"applications": {
"phpinfo": {
"type": self.get_application_type(),
"processes": {"spare": 0},
"root": new_root,
"working_directory": new_root,
}
},
}
), 'configure new root'
resp = self.get()
assert str(resp['status']) + resp['body'] != '200', 'status new root'
def run_php_application_cwd_root_tests(self):
assert 'success' in self.conf_delete(
'applications/cwd/working_directory'
)
script_cwd = option.test_dir + '/php/cwd'
resp = self.get()
assert resp['status'] == 200, 'status ok'
assert resp['body'] == script_cwd, 'default cwd'
assert 'success' in self.conf(
'"' + option.test_dir + '"', 'applications/cwd/working_directory',
)
resp = self.get()
assert resp['status'] == 200, 'status ok'
assert resp['body'] == script_cwd, 'wdir cwd'
resp = self.get(url='/?chdir=/')
assert resp['status'] == 200, 'status ok'
assert resp['body'] == '/', 'cwd after chdir'
# cwd must be restored
resp = self.get()
assert resp['status'] == 200, 'status ok'
assert resp['body'] == script_cwd, 'cwd restored'
resp = self.get(url='/subdir/')
assert resp['body'] == script_cwd + '/subdir', 'cwd subdir'
def test_php_application_cwd_root(self):
self.load('cwd')
self.run_php_application_cwd_root_tests()
def test_php_application_cwd_opcache_disabled(self):
self.load('cwd')
self.set_opcache('cwd', '0')
self.run_php_application_cwd_root_tests()
def test_php_application_cwd_opcache_enabled(self):
self.load('cwd')
self.set_opcache('cwd', '1')
self.run_php_application_cwd_root_tests()
def run_php_application_cwd_script_tests(self):
self.load('cwd')
script_cwd = option.test_dir + '/php/cwd'
assert 'success' in self.conf_delete(
'applications/cwd/working_directory'
)
assert 'success' in self.conf('"index.php"', 'applications/cwd/script')
assert self.get()['body'] == script_cwd, 'default cwd'
assert self.get(url='/?chdir=/')['body'] == '/', 'cwd after chdir'
# cwd must be restored
assert self.get()['body'] == script_cwd, 'cwd restored'
def test_php_application_cwd_script(self):
self.load('cwd')
self.run_php_application_cwd_script_tests()
def test_php_application_cwd_script_opcache_disabled(self):
self.load('cwd')
self.set_opcache('cwd', '0')
self.run_php_application_cwd_script_tests()
def test_php_application_cwd_script_opcache_enabled(self):
self.load('cwd')
self.set_opcache('cwd', '1')
self.run_php_application_cwd_script_tests()
def test_php_application_path_relative(self):
self.load('open')
assert self.get()['body'] == 'test', 'relative path'
assert (
self.get(url='/?chdir=/')['body'] != 'test'
), 'relative path w/ chdir'
assert self.get()['body'] == 'test', 'relative path 2'
def test_php_application_shared_opcache(self):
self.load('opcache', limits={'requests': 1})
r = self.check_opcache()
pid = r['headers']['X-Pid']
assert r['headers']['X-Cached'] == '0', 'not cached'
r = self.get()
assert r['headers']['X-Pid'] != pid, 'new instance'
assert r['headers']['X-Cached'] == '1', 'cached'
def test_php_application_opcache_preload_chdir(self, temp_dir):
self.load('opcache')
self.check_opcache()
self.set_preload('chdir.php')
assert self.get()['headers']['X-Cached'] == '0', 'not cached'
assert self.get()['headers']['X-Cached'] == '1', 'cached'
def test_php_application_opcache_preload_ffr(self, temp_dir):
self.load('opcache')
self.check_opcache()
self.set_preload('fastcgi_finish_request.php')
assert self.get()['headers']['X-Cached'] == '0', 'not cached'
assert self.get()['headers']['X-Cached'] == '1', 'cached'
|
|
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Paul Michali, Cisco Systems, Inc.
"""Mock REST requests to Cisco Cloud Services Router."""
import re
from functools import wraps
# import httmock
import requests
from requests import exceptions as r_exc
from neutron.openstack.common import log as logging
# TODO(pcm) Remove once httmock package is added to test-requirements. For
# now, uncomment and include httmock source to UT
from neutron.tests.unit.services.vpn.device_drivers import httmock
# TODO(pcm) Remove, once verified these have been fixed
FIXED_CSCum35484 = False
FIXED_CSCul82396 = False
FIXED_CSCum10324 = False
LOG = logging.getLogger(__name__)
def repeat(n):
"""Decorator to limit the number of times a handler is called.
Will allow the wrapped function (handler) to be called 'n' times.
After that, this will return None for any additional calls,
allowing other handlers, if any, to be invoked.
"""
class static:
retries = n
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
if static.retries == 0:
return None
static.retries -= 1
return func(*args, **kwargs)
return wrapped
return decorator
def filter_request(methods, resource):
"""Decorator to invoke handler once for a specific resource.
This will call the handler only for a specific resource using
a specific method(s). Any other resource request or method will
return None, allowing other handlers, if any, to be invoked.
"""
class static:
target_methods = [m.upper() for m in methods]
target_resource = resource
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
if (args[1].method in static.target_methods and
static.target_resource in args[0].path):
return func(*args, **kwargs)
else:
return None # Not for this resource
return wrapped
return decorator
@httmock.urlmatch(netloc=r'localhost')
def token(url, request):
if 'auth/token-services' in url.path:
return {'status_code': requests.codes.OK,
'content': {'token-id': 'dummy-token'}}
@httmock.urlmatch(netloc=r'localhost')
def token_unauthorized(url, request):
if 'auth/token-services' in url.path:
return {'status_code': requests.codes.UNAUTHORIZED}
@httmock.urlmatch(netloc=r'wrong-host')
def token_wrong_host(url, request):
raise r_exc.ConnectionError()
@httmock.all_requests
def token_timeout(url, request):
raise r_exc.Timeout()
@filter_request(['get'], 'global/host-name')
@httmock.all_requests
def timeout(url, request):
"""Simulated timeout of a normal request."""
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
raise r_exc.Timeout()
@httmock.urlmatch(netloc=r'localhost')
def no_such_resource(url, request):
"""Indicate not found error, when invalid resource requested."""
return {'status_code': requests.codes.NOT_FOUND}
@filter_request(['get'], 'global/host-name')
@repeat(1)
@httmock.urlmatch(netloc=r'localhost')
def expired_request(url, request):
"""Simulate access denied failure on first request for this resource.
Intent here is to simulate that the token has expired, by failing
the first request to the resource. Because of the repeat=1, this
will only be called once, and subsequent calls will not be handled
by this function, but instead will access the normal handler and
will pass. Currently configured for a GET request, but will work
with POST and PUT as well. For DELETE, would need to filter_request on a
different resource (e.g. 'global/local-users')
"""
return {'status_code': requests.codes.UNAUTHORIZED}
@httmock.urlmatch(netloc=r'localhost')
def normal_get(url, request):
if request.method != 'GET':
return
LOG.debug("DEBUG: GET mock for %s", url)
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
if 'global/host-name' in url.path:
content = {u'kind': u'object#host-name',
u'host-name': u'Router'}
return httmock.response(requests.codes.OK, content=content)
if 'global/local-users' in url.path:
content = {u'kind': u'collection#local-user',
u'users': ['peter', 'paul', 'mary']}
return httmock.response(requests.codes.OK, content=content)
if 'interfaces/GigabitEthernet' in url.path:
actual_interface = url.path.split('/')[-1]
ip = actual_interface[-1]
content = {u'kind': u'object#interface',
u'description': u'Changed description',
u'if-name': actual_interface,
u'proxy-arp': True,
u'subnet-mask': u'255.255.255.0',
u'icmp-unreachable': True,
u'nat-direction': u'',
u'icmp-redirects': True,
u'ip-address': u'192.168.200.%s' % ip,
u'verify-unicast-source': False,
u'type': u'ethernet'}
return httmock.response(requests.codes.OK, content=content)
if 'vpn-svc/ike/policies/2' in url.path:
content = {u'kind': u'object#ike-policy',
u'priority-id': u'2',
u'version': u'v1',
u'local-auth-method': u'pre-share',
u'encryption': u'aes',
u'hash': u'sha',
u'dhGroup': 5,
u'lifetime': 3600}
return httmock.response(requests.codes.OK, content=content)
if 'vpn-svc/ike/keyrings' in url.path:
content = {u'kind': u'object#ike-keyring',
u'keyring-name': u'5',
u'pre-shared-key-list': [
{u'key': u'super-secret',
u'encrypted': False,
u'peer-address': u'10.10.10.20 255.255.255.0'}
]}
return httmock.response(requests.codes.OK, content=content)
if 'vpn-svc/ipsec/policies/' in url.path:
ipsec_policy_id = url.path.split('/')[-1]
content = {u'kind': u'object#ipsec-policy',
u'mode': u'tunnel',
u'policy-id': u'%s' % ipsec_policy_id,
u'protection-suite': {
u'esp-encryption': u'esp-aes',
u'esp-authentication': u'esp-sha-hmac',
u'ah': u'ah-sha-hmac',
},
u'anti-replay-window-size': u'128',
u'lifetime-sec': 120,
u'pfs': u'group5',
u'lifetime-kb': 4608000,
u'idle-time': None}
return httmock.response(requests.codes.OK, content=content)
if 'vpn-svc/site-to-site/Tunnel' in url.path:
tunnel = url.path.split('/')[-1]
# Use same number, to allow mock to generate IPSec policy ID
ipsec_policy_id = tunnel[6:]
content = {u'kind': u'object#vpn-site-to-site',
u'vpn-interface-name': u'%s' % tunnel,
u'ip-version': u'ipv4',
u'vpn-type': u'site-to-site',
u'ipsec-policy-id': u'%s' % ipsec_policy_id,
u'ike-profile-id': None,
u'mtu': 1500,
u'local-device': {
u'ip-address': '10.3.0.1/24',
u'tunnel-ip-address': '10.10.10.10'
},
u'remote-device': {
u'tunnel-ip-address': '10.10.10.20'
}}
return httmock.response(requests.codes.OK, content=content)
if 'vpn-svc/ike/keepalive' in url.path:
content = {u'interval': 60,
u'retry': 4,
u'periodic': True}
return httmock.response(requests.codes.OK, content=content)
if 'routing-svc/static-routes' in url.path:
content = {u'destination-network': u'10.1.0.0/24',
u'kind': u'object#static-route',
u'next-hop-router': None,
u'outgoing-interface': u'GigabitEthernet1',
u'admin-distance': 1}
return httmock.response(requests.codes.OK, content=content)
if 'vpn-svc/site-to-site/active/sessions' in url.path:
# Only including needed fields for mock
content = {u'kind': u'collection#vpn-active-sessions',
u'items': [{u'status': u'DOWN-NEGOTIATING',
u'vpn-interface-name': u'Tunnel123'}, ]}
return httmock.response(requests.codes.OK, content=content)
@filter_request(['get'], 'vpn-svc/ike/keyrings')
@httmock.urlmatch(netloc=r'localhost')
def get_fqdn(url, request):
LOG.debug("DEBUG: GET FQDN mock for %s", url)
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
content = {u'kind': u'object#ike-keyring',
u'keyring-name': u'5',
u'pre-shared-key-list': [
{u'key': u'super-secret',
u'encrypted': False,
u'peer-address': u'cisco.com'}
]}
return httmock.response(requests.codes.OK, content=content)
@filter_request(['get'], 'vpn-svc/ipsec/policies/')
@httmock.urlmatch(netloc=r'localhost')
def get_no_ah(url, request):
LOG.debug("DEBUG: GET No AH mock for %s", url)
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
ipsec_policy_id = url.path.split('/')[-1]
content = {u'kind': u'object#ipsec-policy',
u'mode': u'tunnel',
u'anti-replay-window-size': u'128',
u'policy-id': u'%s' % ipsec_policy_id,
u'protection-suite': {
u'esp-encryption': u'esp-aes',
u'esp-authentication': u'esp-sha-hmac',
},
u'lifetime-sec': 120,
u'pfs': u'group5',
u'lifetime-kb': 4608000,
u'idle-time': None}
return httmock.response(requests.codes.OK, content=content)
@httmock.urlmatch(netloc=r'localhost')
def get_defaults(url, request):
if request.method != 'GET':
return
LOG.debug("DEBUG: GET mock for %s", url)
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
if 'vpn-svc/ike/policies/2' in url.path:
content = {u'kind': u'object#ike-policy',
u'priority-id': u'2',
u'version': u'v1',
u'local-auth-method': u'pre-share',
u'encryption': u'des',
u'hash': u'sha',
u'dhGroup': 1,
u'lifetime': 86400}
return httmock.response(requests.codes.OK, content=content)
if 'vpn-svc/ipsec/policies/' in url.path:
ipsec_policy_id = url.path.split('/')[-1]
content = {u'kind': u'object#ipsec-policy',
u'mode': u'tunnel',
u'policy-id': u'%s' % ipsec_policy_id,
u'protection-suite': {},
u'lifetime-sec': 3600,
u'pfs': u'Disable',
u'anti-replay-window-size': u'None',
u'lifetime-kb': 4608000,
u'idle-time': None}
return httmock.response(requests.codes.OK, content=content)
@filter_request(['get'], 'vpn-svc/site-to-site')
@httmock.urlmatch(netloc=r'localhost')
def get_unnumbered(url, request):
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
tunnel = url.path.split('/')[-1]
ipsec_policy_id = tunnel[6:]
content = {u'kind': u'object#vpn-site-to-site',
u'vpn-interface-name': u'%s' % tunnel,
u'ip-version': u'ipv4',
u'vpn-type': u'site-to-site',
u'ipsec-policy-id': u'%s' % ipsec_policy_id,
u'ike-profile-id': None,
u'mtu': 1500,
u'local-device': {
u'ip-address': u'GigabitEthernet3',
u'tunnel-ip-address': u'10.10.10.10'
},
u'remote-device': {
u'tunnel-ip-address': u'10.10.10.20'
}}
return httmock.response(requests.codes.OK, content=content)
@filter_request(['get'], 'vpn-svc/site-to-site')
@httmock.urlmatch(netloc=r'localhost')
def get_mtu(url, request):
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
tunnel = url.path.split('/')[-1]
ipsec_policy_id = tunnel[6:]
content = {u'kind': u'object#vpn-site-to-site',
u'vpn-interface-name': u'%s' % tunnel,
u'ip-version': u'ipv4',
u'vpn-type': u'site-to-site',
u'ipsec-policy-id': u'%s' % ipsec_policy_id,
u'ike-profile-id': None,
u'mtu': 9192,
u'local-device': {
u'ip-address': u'10.3.0.1/24',
u'tunnel-ip-address': u'10.10.10.10'
},
u'remote-device': {
u'tunnel-ip-address': u'10.10.10.20'
}}
return httmock.response(requests.codes.OK, content=content)
@filter_request(['get'], 'vpn-svc/ike/keepalive')
@httmock.urlmatch(netloc=r'localhost')
def get_not_configured(url, request):
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
return {'status_code': requests.codes.NOT_FOUND}
@filter_request(['get'], 'vpn-svc/site-to-site/active/sessions')
@httmock.urlmatch(netloc=r'localhost')
def get_none(url, request):
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
content = {u'kind': u'collection#vpn-active-sessions',
u'items': []}
return httmock.response(requests.codes.OK, content=content)
@filter_request(['get'], 'interfaces/GigabitEthernet3')
@httmock.urlmatch(netloc=r'localhost')
def get_local_ip(url, request):
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
content = {u'kind': u'object#interface',
u'subnet-mask': u'255.255.255.0',
u'ip-address': u'10.5.0.2'}
return httmock.response(requests.codes.OK, content=content)
@httmock.urlmatch(netloc=r'localhost')
def post(url, request):
if request.method != 'POST':
return
LOG.debug("DEBUG: POST mock for %s", url)
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
if 'interfaces/GigabitEthernet' in url.path:
return {'status_code': requests.codes.NO_CONTENT}
if 'global/local-users' in url.path:
if 'username' not in request.body:
return {'status_code': requests.codes.BAD_REQUEST}
if '"privilege": 20' in request.body:
return {'status_code': requests.codes.BAD_REQUEST}
headers = {'location': '%s/test-user' % url.geturl()}
return httmock.response(requests.codes.CREATED, headers=headers)
if 'vpn-svc/ike/policies' in url.path:
headers = {'location': "%s/2" % url.geturl()}
return httmock.response(requests.codes.CREATED, headers=headers)
if 'vpn-svc/ipsec/policies' in url.path:
m = re.search(r'"policy-id": "(\S+)"', request.body)
if m:
headers = {'location': "%s/%s" % (url.geturl(), m.group(1))}
return httmock.response(requests.codes.CREATED, headers=headers)
return {'status_code': requests.codes.BAD_REQUEST}
if 'vpn-svc/ike/keyrings' in url.path:
headers = {'location': "%s/5" % url.geturl()}
return httmock.response(requests.codes.CREATED, headers=headers)
if 'vpn-svc/site-to-site' in url.path:
m = re.search(r'"vpn-interface-name": "(\S+)"', request.body)
if m:
headers = {'location': "%s/%s" % (url.geturl(), m.group(1))}
return httmock.response(requests.codes.CREATED, headers=headers)
return {'status_code': requests.codes.BAD_REQUEST}
if 'routing-svc/static-routes' in url.path:
headers = {'location':
"%s/10.1.0.0_24_GigabitEthernet1" % url.geturl()}
return httmock.response(requests.codes.CREATED, headers=headers)
@filter_request(['post'], 'global/local-users')
@httmock.urlmatch(netloc=r'localhost')
def post_change_attempt(url, request):
LOG.debug("DEBUG: POST change value mock for %s", url)
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
return {'status_code': requests.codes.NOT_FOUND,
'content': {
u'error-code': -1,
u'error-message': u'user test-user already exists'}}
@httmock.urlmatch(netloc=r'localhost')
def post_duplicate(url, request):
LOG.debug("DEBUG: POST duplicate mock for %s", url)
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
return {'status_code': requests.codes.BAD_REQUEST,
'content': {
u'error-code': -1,
u'error-message': u'policy 2 exist, not allow to '
u'update policy using POST method'}}
@filter_request(['post'], 'vpn-svc/site-to-site')
@httmock.urlmatch(netloc=r'localhost')
def post_missing_ipsec_policy(url, request):
LOG.debug("DEBUG: POST missing ipsec policy mock for %s", url)
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
return {'status_code': requests.codes.BAD_REQUEST}
@filter_request(['post'], 'vpn-svc/site-to-site')
@httmock.urlmatch(netloc=r'localhost')
def post_missing_ike_policy(url, request):
LOG.debug("DEBUG: POST missing ike policy mock for %s", url)
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
return {'status_code': requests.codes.BAD_REQUEST}
@filter_request(['post'], 'vpn-svc/site-to-site')
@httmock.urlmatch(netloc=r'localhost')
def post_bad_ip(url, request):
LOG.debug("DEBUG: POST bad IP mock for %s", url)
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
return {'status_code': requests.codes.BAD_REQUEST}
@filter_request(['post'], 'vpn-svc/site-to-site')
@httmock.urlmatch(netloc=r'localhost')
def post_bad_mtu(url, request):
LOG.debug("DEBUG: POST bad mtu mock for %s", url)
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
return {'status_code': requests.codes.BAD_REQUEST}
@filter_request(['post'], 'vpn-svc/ipsec/policies')
@httmock.urlmatch(netloc=r'localhost')
def post_bad_lifetime(url, request):
LOG.debug("DEBUG: POST bad lifetime mock for %s", url)
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
return {'status_code': requests.codes.BAD_REQUEST}
@httmock.urlmatch(netloc=r'localhost')
def put(url, request):
if request.method != 'PUT':
return
LOG.debug("DEBUG: PUT mock for %s", url)
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
# Any resource
return {'status_code': requests.codes.NO_CONTENT}
@httmock.urlmatch(netloc=r'localhost')
def delete(url, request):
if request.method != 'DELETE':
return
LOG.debug("DEBUG: DELETE mock for %s", url)
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
# Any resource
return {'status_code': requests.codes.NO_CONTENT}
@httmock.urlmatch(netloc=r'localhost')
def delete_unknown(url, request):
if request.method != 'DELETE':
return
LOG.debug("DEBUG: DELETE unknown mock for %s", url)
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
# Any resource
return {'status_code': requests.codes.NOT_FOUND,
'content': {
u'error-code': -1,
u'error-message': 'user unknown not found'}}
@httmock.urlmatch(netloc=r'localhost')
def delete_not_allowed(url, request):
if request.method != 'DELETE':
return
LOG.debug("DEBUG: DELETE not allowed mock for %s", url)
if not request.headers.get('X-auth-token', None):
return {'status_code': requests.codes.UNAUTHORIZED}
# Any resource
return {'status_code': requests.codes.METHOD_NOT_ALLOWED}
|
|
import unittest
from nose.tools import *
from sqlalchemy import *
from bson.objectid import ObjectId
from randomdomain.libs.mongodb import mongo_backend
from randomdomain.libs.dates import unix_utc_now
from mailsync.models.sync import Sync
from mailsync.models.adapter import Adapter
from mailsync.api.mailchimp import MailChimp
from mailsync.api.campaignmonitor import CampaignMonitor
class TestSync(object):
def setUp(self):
self.id = "5073d616dd0ef409aaaaeeec0fee"
self.mysql = {
"driver": "mysql",
"username": "root",
"password": "1234",
"host": "localhost",
"port": 3066,
"database": "testdbfortests"
}
self.adapter = Adapter()
self.adapter.setup(self.mysql)
self.details = mongo_backend.get_collection("details")
self.detail = self.details.find_one({"_id": ObjectId(self.id)})
self.sync = Sync(self.id)
def test_split_rows(self):
chunks = self.sync.split_rows(range(1005), 10)
chunks_len = len(chunks)
assert(chunks_len == 101)
for index, chunk in enumerate(chunks):
if index < (chunks_len - 1):
assert(len(chunk) == 10)
else:
assert(len(chunk) == 5)
chunks = self.sync.split_rows(range(0), 10)
chunks_len = len(chunks)
assert(chunks_len == 0)
chunks = self.sync.split_rows(range(5), 10)
chunks_len = len(chunks)
assert(chunks_len == 1)
assert(len(chunks[0]) == 5)
def test_get_last_inserted_id(self):
last_inserted_id = self.sync.get_last_inserted_id(self.detail["list"])
assert(last_inserted_id == 22816)
last_inserted_id = self.sync.get_last_inserted_id({})
assert(last_inserted_id == None)
last_inserted_id = self.sync.get_last_inserted_id({"status":{}})
assert(last_inserted_id == None)
def test_get_last_id(self):
last_id = self.sync.get_last_id(self.adapter, "user", "userid", self.detail["list"])
assert(last_id == 22816)
last_id = self.sync.get_last_id(self.adapter, "user", "userid", {})
assert(last_id == 1)
def test_update_status_running(self):
# setup
statebefore = self.detail
last_inserted_id = 1234
rows_to_be_inserted = 5678
assert(self.sync.update_status_running(last_inserted_id, rows_to_be_inserted) == True)
detail = self.details.find_one({"_id": ObjectId(self.id)})
assert(detail["list"]["status"]["status"] == "Running")
assert(detail["list"]["status"]["last_inserted_id"] == last_inserted_id)
assert(detail["list"]["status"]["rows_to_be_inserted"] == rows_to_be_inserted)
assert(detail["list"]["status"]["inserted_rows"] == 0)
# tear down
self.detail = statebefore
self.details.save(statebefore)
self.details.ensure_index("_id")
def test_update_status_completed(self):
# setup
statebefore = self.detail
last_id = 1234
assert(self.sync.update_status_completed(last_id) == True)
detail = self.details.find_one({"_id": ObjectId(self.id)})
assert(detail["list"]["status"]["status"] == "Completed")
assert(detail["list"]["status"]["last_inserted_id"] == last_id)
# tear down
self.detail = statebefore
self.details.save(statebefore)
self.details.ensure_index("_id")
def test_sync_users(self):
# setup
statebefore = self.detail
campaignmonitor = CampaignMonitor("787e70d714cdce3178610ddde2d7da08deeeeeddddd")
campaignmonitor_list_id = "fl8b40a4d15de4e7d83ead7e6b839544ib"
header_row = tuple(self.detail["columns"].keys())
user_chunks = [[('John', '[email protected]'), ('Susan', '[email protected]'), ('Carl', '[email protected]')]]
assert(self.sync.sync_users(campaignmonitor, campaignmonitor_list_id, header_row, user_chunks, 22816) == True)
detail = self.details.find_one({"_id": ObjectId(self.id)})
assert(detail["list"]["status"]["inserted_rows"] == statebefore["list"]["status"]["inserted_rows"] + 3)
# teardown
self.details.save(statebefore)
self.details.ensure_index("_id")
def test_get_provider(self):
provider = self.sync.get_provider(self.detail["provider"], "apikey")
assert(provider is not None)
assert(isinstance(provider, CampaignMonitor))
# mocking provider data
provider_data = {}
provider_data["provider"] = "mailchimp"
provider = self.sync.get_provider(provider_data, "apikey")
assert(provider is not None)
assert(isinstance(provider, MailChimp))
def test_status(self):
list_data = self.detail["list"]
status = self.sync.status("Completed", list_data)
assert(status == True)
status = self.sync.status("Completed", {})
assert(status == False)
rdetail = self.details.find_one({"_id": ObjectId("5073d5f0dd0ef409aaec0fed")})
status = self.sync.status("Completed", rdetail["list"])
assert(status == False)
def test_sync_status_completed(self):
time = unix_utc_now()
status = {
"status": "completed",
"message": "Successfully synced.",
"progress": 100,
"last_synced": time
}
assert(self.sync.sync_status_completed(time) == status)
def test_sync_status_running(self):
inserted_rows = 1234
rows_to_be_inserted = 5678
status = {
"status": "running",
"message": "Mailsync is still syncing.",
"progress": (inserted_rows * 100) / rows_to_be_inserted,
"inserted_rows": inserted_rows,
"rows_to_be_inserted": rows_to_be_inserted
}
rows_to_be_inserted = 0
status = {
"status": "running",
"message": "Mailsync is still syncing.",
"progress": 0,
"inserted_rows": inserted_rows,
"rows_to_be_inserted": rows_to_be_inserted
}
assert(self.sync.sync_status_running(inserted_rows, rows_to_be_inserted) == status)
def test_sync_status_error(self):
status = {
"status": "error",
"message": "An error occured, emails are not successfully synced."
}
assert(self.sync.sync_status_error() == status)
def test_sync_status_delete(self):
success_status = {
"status": "deleted",
"message": "Sync is successfully removed."
}
assert(self.sync.sync_status_delete("Success") == success_status)
error_status = {
"status": "error",
"message": "An error occured, sync is not successfully removed."
}
assert(self.sync.sync_status_delete("Error") == error_status)
assert(self.sync.sync_status_delete("Wrong") == error_status)
def test_delete(self):
# setup
statebefore = self.detail
self.sync._delete()
detail = self.details.find_one({"_id": ObjectId(self.id)})
assert(detail == None)
# teardown
self.details.save(statebefore)
self.details.ensure_index("_id")
def test_delete_sync(self):
# setup
statebefore = self.detail
result = self.sync.delete_sync()
detail = self.details.find_one({"_id": ObjectId(self.id)})
assert(detail == None)
success_status = {
"status": "deleted",
"message": "Sync is successfully removed."
}
assert(result == success_status)
# teardown
self.details.save(statebefore)
self.details.ensure_index("_id")
|
|
#lims
from SBaaS_LIMS.lims_experiment_postgresql_models import *
from SBaaS_LIMS.lims_sample_postgresql_models import *
from .stage01_physiology_data_postgresql_models import *
from SBaaS_base.sbaas_base_query_update import sbaas_base_query_update
from SBaaS_base.sbaas_base_query_drop import sbaas_base_query_drop
from SBaaS_base.sbaas_base_query_initialize import sbaas_base_query_initialize
from SBaaS_base.sbaas_base_query_insert import sbaas_base_query_insert
from SBaaS_base.sbaas_base_query_select import sbaas_base_query_select
from SBaaS_base.sbaas_base_query_delete import sbaas_base_query_delete
from SBaaS_base.sbaas_template_query import sbaas_template_query
class stage01_physiology_data_query(sbaas_template_query):
def initialize_supportedTables(self):
'''Set the supported tables dict for
'''
tables_supported = {'data_stage01_physiology_data':data_stage01_physiology_data,
};
self.set_supportedTables(tables_supported);
def reset_dataStage01_physiology_data(self,experiment_id_I = None):
try:
if experiment_id_I:
reset = self.session.query(data_stage01_physiology_data).filter(data_stage01_physiology_data.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
else:
reset = self.session.query(data_stage01_physiology_data).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
def add_dataStage01PhysiologyData(self, data_I):
'''add rows of data_stage01_physiology_data'''
if data_I:
for d in data_I:
try:
data_add = data_stage01_physiology_data(d
#d['experiment_id'],
#d['sample_id'],
##d['sample_name_short'],
##d['time_point'],
##d['sample_date'],
#d['met_id'],
#d['data_raw'],
#d['data_corrected'],
#d['data_units'],
#d['data_reference'],
#d['used_'],
#d['notes']
);
#d['comment_']
self.session.add(data_add);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def update_dataStage01PhysiologyData(self,data_I):
'''update rows of data_stage01_physiology_data'''
if data_I:
for d in data_I:
try:
data_update = self.session.query(data_stage01_physiology_data).filter(
#data_stage01_physiology_data.id == d['id'],
data_stage01_physiology_data.experiment_id.like(d['experiment_id']),
data_stage01_physiology_data.sample_id.like(d['sample_id']),
data_stage01_physiology_data.met_id.like(d['met_id']),
data_stage01_physiology_data.data_units.like(d['data_units']),
data_stage01_physiology_data.data_reference.like(d['data_reference'])).update(
{
'experiment_id':d['experiment_id'],
'sample_id':d['sample_id'],
#'sample_name_short':d['sample_name_short'],
#'time_point':d['time_point'],
#'sample_date':d['sample_date'],
'met_id':d['met_id'],
'data_raw':d['data_raw'],
'data_corrected':d['data_corrected'],
'data_units':d['data_units'],
'data_reference':d['data_reference'],
'used_':d['used_'],
'comment_':d['comment_']},
synchronize_session=False);
if data_update == 0:
print('row not found.')
print(d);
except SQLAlchemyError as e:
print(e);
self.session.commit();
# query sample names from data_stage01_physiology_data
def get_sampleNameShort_experimentID(self,experiment_id_I,exp_type_I):
'''Querry sample name short (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(sample_description.sample_name_short).filter(
data_stage01_physiology_data.experiment_id.like(experiment_id_I),
data_stage01_physiology_data.used_.is_(True),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_physiology_data.sample_id),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id)).group_by(
sample_description.sample_name_short).order_by(
sample_description.sample_name_short.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name_short);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
# query sample IDs from data_stage01_physiology_data
def get_sampleIDs_experimentID(self,experiment_id_I,exp_type_I):
'''Querry sample ids (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(data_stage01_physiology_data.sample_id).filter(
data_stage01_physiology_data.experiment_id.like(experiment_id_I),
data_stage01_physiology_data.used_.is_(True),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_physiology_data.sample_id)).group_by(
data_stage01_physiology_data.sample_id).order_by(
data_stage01_physiology_data.sample_id.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name_short);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
# query met_ids from data_stage01_physiology_data
def get_metIDs_experimentIDAndSampleNameShort(self,experiment_id_I,exp_type_I,sample_name_short_I):
'''Querry met_ids by sample name short that are used from
the experiment'''
try:
met_ids = self.session.query(data_stage01_physiology_data.met_id).filter(
data_stage01_physiology_data.experiment_id.like(experiment_id_I),
data_stage01_physiology_data.used_.is_(True),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_physiology_data.sample_id),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
sample_description.sample_name_short.like(sample_name_short_I)).group_by(
data_stage01_physiology_data.met_id).order_by(
data_stage01_physiology_data.met_id.asc()).all();
met_ids_O = [];
for met in met_ids: met_ids_O.append(met.met_id);
return met_ids_O;
except SQLAlchemyError as e:
print(e);
# query sample_date and data_corrected from data_stage01_physiology_data
def get_sampleDateAndDataCorrected_experimentIDAndSampleNameShortAndMetIDAndDataUnits(self,experiment_id_I,exp_type_I,sample_name_short_I,met_id_I,data_units_I):
'''Querry time and data_corrected by sample name short that are used from
the experiment sorted by time'''
try:
data = self.session.query(sample_description.sample_date,
data_stage01_physiology_data.data_corrected).filter(
data_stage01_physiology_data.experiment_id.like(experiment_id_I),
data_stage01_physiology_data.met_id.like(met_id_I),
data_stage01_physiology_data.data_units.like(data_units_I),
data_stage01_physiology_data.used_.is_(True),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_physiology_data.sample_id),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
sample_description.sample_name_short.like(sample_name_short_I)).order_by(
sample_description.sample_date.asc()).all();
sample_date_O = [];
data_corrected_O = [];
for d in data:
sample_date_O.append(d.sample_date);
data_corrected_O.append(d.data_corrected);
return sample_date_O,data_corrected_O;
except SQLAlchemyError as e:
print(e)
def get_sampleDateAndDataCorrectedAndSampleIDs_experimentIDAndSampleNameShortAndMetIDAndDataUnits(self,experiment_id_I,exp_type_I,sample_name_short_I,met_id_I,data_units_I):
'''Querry time and data_corrected by sample name short that are used from
the experiment sorted by time'''
try:
data = self.session.query(sample_description.sample_date,
data_stage01_physiology_data.data_corrected,
data_stage01_physiology_data.sample_id).filter(
data_stage01_physiology_data.experiment_id.like(experiment_id_I),
data_stage01_physiology_data.met_id.like(met_id_I),
data_stage01_physiology_data.data_units.like(data_units_I),
data_stage01_physiology_data.used_.is_(True),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_physiology_data.sample_id),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
sample_description.sample_name_short.like(sample_name_short_I)).order_by(
sample_description.sample_date.asc()).all();
sample_date_O = [];
data_corrected_O = [];
sample_id_O = [];
for d in data:
sample_date_O.append(d.sample_date);
data_corrected_O.append(d.data_corrected);
sample_id_O.append(d.sample_id);
return sample_date_O,data_corrected_O,sample_id_O;
except SQLAlchemyError as e:
print(e);
def get_sampleDateAndDataCorrected_experimentIDAndSampleNameShort(self,experiment_id_I,sample_name_short_I,data_units_I=['mM','OD600']):
'''Query time and data_corrected by sample name short that are used from
the experiment sorted by time'''
try:
data = self.session.query(sample_description.sample_date,
sample_description.sample_name_short,
sample_description.sample_name_abbreviation,
data_stage01_physiology_data.id,
data_stage01_physiology_data.data_corrected,
data_stage01_physiology_data.experiment_id,
data_stage01_physiology_data.sample_id,
data_stage01_physiology_data.met_id,
data_stage01_physiology_data.data_units,
data_stage01_physiology_data.data_reference).filter(
data_stage01_physiology_data.experiment_id.like(experiment_id_I),
data_stage01_physiology_data.data_units.in_(data_units_I),
data_stage01_physiology_data.used_.is_(True),
experiment.id.like(experiment_id_I),
experiment.sample_name.like(data_stage01_physiology_data.sample_id),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
sample_description.sample_name_short.like(sample_name_short_I)).order_by(
data_stage01_physiology_data.experiment_id.asc(),
data_stage01_physiology_data.data_units.asc(),
data_stage01_physiology_data.met_id.asc(),
sample_description.sample_date.asc()).all();
rows_O = [d._asdict() for d in data];
return rows_O;
except SQLAlchemyError as e:
print(e);
|
|
#!/usr/bin/env python
from datetime import datetime
from elasticsearch import Elasticsearch
import json
import screepsapi
from settings import getSettings
import six
import time
import os
import services.screeps as screeps_service
import sys
MAXPAGES = 200
es_settings_dir = os.path.join(os.path.dirname(__file__), 'mappings')
class ScreepsMemoryStats():
ELASTICSEARCH_HOST = 'elasticsearch' if 'ELASTICSEARCH' in os.environ else 'localhost'
es = Elasticsearch([ELASTICSEARCH_HOST])
def __init__(self, u=None, p=None, ptr=False):
self.user = u
self.password = p
self.ptr = ptr
self.processed_ticks = {}
def getScreepsAPI(self):
if not self.__api:
settings = getSettings()
self.__api = screepsapi.API(u=settings['screeps_username'],p=settings['screeps_password'],ptr=settings['screeps_ptr'])
return self.__api
__api = False
def run_forever(self):
lastrun = False
while True:
api = self.getScreepsAPI()
try:
shard_data = api.shard_info()['shards']
shards = [x['name'] for x in shard_data]
if len(shards) < 1:
shards = ['shard0']
except:
shards = ['shard0']
for shard in shards:
self.collectMemoryStats(shard)
# Market data changes much more rarely so process it less often.
if not lastrun or lastrun >= 20:
self.collectMarketHistory(shard)
lastrun = 1
# don't pause before next run as market collection adds its own
# delays
continue
lastrun += 1
time.sleep(7)
def collectMarketHistory(self, shard):
screeps = self.getScreepsAPI()
page = None
failures = 0
while True:
market_history = screeps.market_history(page, shard)
if 'list' not in market_history:
return
for item in market_history['list']:
if '_id' not in item:
continue
item['id'] = item['_id']
item['shard'] = shard
del item['_id']
if item['type'] == 'market.fee':
if 'extendOrder' in item['market']:
item['addAmount'] = item['market']['extendOrder']['addAmount']
elif 'order' in item['market']:
item['orderType'] = item['market']['order']['type']
item['resourceType'] = item['market']['order']['resourceType']
item['price'] = item['market']['order']['price']
item['totalAmount'] = item['market']['order']['totalAmount']
if 'roomName' in item['market']['order']:
item['roomName'] = item['market']['order']['roomName']
else:
continue
if self.saveFee(item):
failures = 0
else:
failures += 1
else:
item['resourceType'] = item['market']['resourceType']
item['price'] = item['market']['price']
item['totalAmount'] = item['market']['amount']
if 'roomName' in item['market']:
item['roomName'] = item['market']['roomName']
if 'targetRoomName' in item['market']:
item['targetRoomName'] = item['market']['targetRoomName']
user = screeps_service.getRoomOwner(item['targetRoomName'])
if user:
item['player'] = user
alliance = screeps_service.getAllianceFromUser(user)
if alliance:
item['alliance'] = alliance
if 'npc' in item['market']:
item['npc'] = item['market']['npc']
else:
item['npc'] = False
if self.saveOrder(item):
failures = 0
else:
failures += 1
if failures >= 10:
print('Too many already captured records')
return
if 'hasMore' not in market_history:
print('hasMore not present')
return
if not market_history['hasMore']:
print('hasMore is false')
return
page = int(market_history['page']) + 1
if page >= MAXPAGES:
return
def saveFee(self, order):
date_index = time.strftime("%Y_%m")
indexname = 'screeps-market-fees_' + date_index
if not self.es.indices.exists(indexname):
with open('%s/fees.json' % (es_settings_dir,), 'r') as settings_file:
settings=settings_file.read()
self.es.indices.create(index=indexname, ignore=400, body=settings)
order = self.clean(order)
if self.es.exists(index=indexname, doc_type="fees", id=order['id']):
return False
else:
self.es.index(index=indexname,
doc_type="fees",
id=order['id'],
timestamp=order['date'],
body=order)
print("Saving order (fee) %s" % (order['id'],))
return True
def saveOrder(self, order):
date_index = time.strftime("%Y_%m")
indexname = 'screeps-market-orders_' + date_index
if not self.es.indices.exists(indexname):
with open('%s/orders.json' % (es_settings_dir,), 'r') as settings_file:
settings=settings_file.read()
self.es.indices.create(index=indexname, ignore=400, body=settings)
order = self.clean(order)
if self.es.exists(index=indexname, doc_type="orders", id=order['id']):
return False
else:
self.es.index(index=indexname,
doc_type="orders",
id=order['id'],
timestamp=order['date'],
body=order)
print("Saving order (deal) %s" % (order['id'],))
return True
def collectMemoryStats(self, shard):
screeps = self.getScreepsAPI()
stats = screeps.memory(path='___screeps_stats')
if 'data' not in stats:
return False
if shard not in self.processed_ticks:
self.processed_ticks[shard] = []
# stats[tick][group][subgroup][data]
# stats[4233][rooms][W43S94] = {}
date_index = time.strftime("%Y_%m")
confirm_queue =[]
for tick,tick_index in stats['data'].items():
if int(tick) in self.processed_ticks[shard]:
continue
# Is tick_index a list of segments or the data itself?
if isinstance(tick_index, list):
rawstring = ''
for segment_id in tick_index:
segment = screeps.get_segment(segment=int(segment_id))
if 'data' in segment and len(segment['data']) > 1:
rawstring = segment['data']
else:
# Segment may not be ready yet - try again next run.
return
try:
tickstats = json.loads(rawstring)
except:
continue
else:
tickstats = tick_index
self.processed_ticks[shard].append(int(tick))
if len(self.processed_ticks[shard]) > 100:
self.processed_ticks[shard].pop(0)
for group, groupstats in tickstats.items():
indexname = 'screeps-stats-' + group + '_' + date_index
if not isinstance(groupstats, dict):
continue
if 'subgroups' in groupstats:
for subgroup, statdata in groupstats.items():
if subgroup == 'subgroups':
continue
statdata[group] = subgroup
savedata = self.clean(statdata)
savedata['tick'] = int(tick)
savedata['timestamp'] = tickstats['time']
savedata['shard'] = shard
self.es.index(index=indexname, doc_type="stats", body=savedata)
else:
savedata = self.clean(groupstats)
savedata['tick'] = int(tick)
savedata['timestamp'] = tickstats['time']
savedata['shard'] = shard
self.es.index(index=indexname, doc_type="stats", body=savedata)
confirm_queue.append(tick)
self.confirm(confirm_queue)
def confirm(self, ticks, shard):
javascript_clear = 'Stats.removeTick(' + json.dumps(ticks, separators=(',',':')) + ');'
sconn = self.getScreepsAPI()
sconn.console(javascript_clear, shard)
def clean(self, datadict):
newdict = {}
for key, value in datadict.iteritems():
if key == 'tick':
newdict[key] = int(value)
else:
try:
newdict[key] = float(value)
except:
newdict[key] = value
return datadict
if __name__ == "__main__":
settings = getSettings()
screepsconsole = ScreepsMemoryStats(u=settings['screeps_username'], p=settings['screeps_password'], ptr=settings['screeps_ptr'])
screepsconsole.run_forever()
|
|
import abc
import time
import typing
import asyncio
import logging
import weakref
import functools
import itertools
from urllib import parse
import furl
import aiohttp
from waterbutler.core import streams
from waterbutler.core import exceptions
from waterbutler.core import path as wb_path
from waterbutler import settings as wb_settings
from waterbutler.core.metrics import MetricsRecord
from waterbutler.core import metadata as wb_metadata
from waterbutler.core.utils import ZipStreamGenerator
from waterbutler.core.utils import RequestHandlerContext
logger = logging.getLogger(__name__)
_THROTTLES = weakref.WeakKeyDictionary() # type: weakref.WeakKeyDictionary
def throttle(concurrency=10, interval=1):
def _throttle(func):
@functools.wraps(func)
async def wrapped(*args, **kwargs):
if asyncio.get_event_loop() not in _THROTTLES:
count, last_call, event = 0, time.time(), asyncio.Event()
_THROTTLES[asyncio.get_event_loop()] = (count, last_call, event)
event.set()
else:
count, last_call, event = _THROTTLES[asyncio.get_event_loop()]
await event.wait()
count += 1
if count > concurrency:
count = 0
if (time.time() - last_call) < interval:
event.clear()
await asyncio.sleep(interval - (time.time() - last_call))
event.set()
last_call = time.time()
_THROTTLES[asyncio.get_event_loop()] = (count, last_call, event)
return await func(*args, **kwargs)
return wrapped
return _throttle
def build_url(base, *segments, **query):
url = furl.furl(base)
# Filters return generators
# Cast to list to force "spin" it
url.path.segments = list(filter(
lambda segment: segment,
map(
# Furl requires everything to be quoted or not, no mixtures allowed
# prequote everything so %signs don't break everything
lambda segment: parse.quote(segment.strip('/')),
# Include any segments of the original url, effectively list+list but returns a generator
itertools.chain(url.path.segments, segments)
)
))
url.args = query
return url.url
class BaseProvider(metaclass=abc.ABCMeta):
"""The base class for all providers. Every provider must, at the least, implement all abstract
methods in this class.
.. note::
When adding a new provider you must add it to setup.py's
`entry_points` under the `waterbutler.providers` key formatted
as: `<provider name> = waterbutler.providers.yourprovider:<FullProviderName>`
Keep in mind that `yourprovider` modules must export the provider class
"""
BASE_URL = None
def __init__(self, auth: dict,
credentials: dict,
settings: dict,
retry_on: typing.Set[int]={408, 502, 503, 504}) -> None:
"""
:param auth: ( :class:`dict` ) Information about the user this provider will act on the behalf of
:param credentials: ( :class:`dict` ) The credentials used to authenticate with the provider,
ofter an OAuth 2 token
:param settings: ( :class:`dict` ) Configuration settings for this provider,
often folder or repo
"""
self._retry_on = retry_on
self.auth = auth
self.credentials = credentials
self.settings = settings
self.provider_metrics = MetricsRecord('provider')
self.provider_metrics.add('auth', auth)
self.metrics = self.provider_metrics.new_subrecord(self.NAME)
@property
@abc.abstractmethod
def NAME(self) -> str:
raise NotImplementedError
def __eq__(self, other):
try:
return (
type(self) == type(other) and
self.credentials == other.credentials
)
except AttributeError:
return False
def serialized(self) -> dict:
return {
'name': self.NAME,
'auth': self.auth,
'settings': self.settings,
'credentials': self.credentials,
}
def build_url(self, *segments, **query) -> str:
"""A nice wrapper around furl, builds urls based on self.BASE_URL
:param \*segments: ( :class:`tuple` ) A tuple of strings joined into /foo/bar/..
:param \*\*query: ( :class:`dict` ) A dictionary that will be turned into query parameters ?foo=bar
:rtype: :class:`str`
"""
return build_url(self.BASE_URL, *segments, **query)
@property
def default_headers(self) -> dict:
"""Headers to be included with every request
Commonly OAuth headers or Content-Type
"""
return {}
def build_headers(self, **kwargs) -> dict:
headers = self.default_headers
headers.update(kwargs)
return {
key: value
for key, value in headers.items()
if value is not None
}
@throttle()
async def make_request(self, method: str, url: str, *args, **kwargs) -> aiohttp.client.ClientResponse:
"""A wrapper around :func:`aiohttp.request`. Inserts default headers.
:param method: ( :class:`str` ) The HTTP method
:param url: ( :class:`str` ) The url to send the request to
:keyword range: An optional tuple (start, end) that is transformed into a Range header
:keyword expects: An optional tuple of HTTP status codes as integers raises an exception
if the returned status code is not in it.
:type expects: tuple of ints
:param throws: ( :class:`Exception` ) The exception to be raised from expects
:param \*args: ( :class:`tuple` )args passed to :func:`aiohttp.request`
:param \*\*kwargs: ( :class:`dict` ) kwargs passed to :func:`aiohttp.request`
:rtype: :class:`aiohttp.ClientResponse`
:raises: :class:`.UnhandledProviderError` Raised if expects is defined
"""
kwargs['headers'] = self.build_headers(**kwargs.get('headers', {}))
retry = _retry = kwargs.pop('retry', 2)
range = kwargs.pop('range', None)
expects = kwargs.pop('expects', None)
throws = kwargs.pop('throws', exceptions.UnhandledProviderError)
if range:
kwargs['headers']['Range'] = self._build_range_header(range)
while retry >= 0:
# Don't overwrite the callable ``url`` so that signed URLs are refreshed for every retry
non_callable_url = url() if callable(url) else url
try:
self.provider_metrics.incr('requests.count')
self.provider_metrics.append('requests.urls', non_callable_url)
response = await aiohttp.request(method, non_callable_url, *args, **kwargs)
self.provider_metrics.append('requests.verbose',
['OK', response.status, non_callable_url])
if expects and response.status not in expects:
raise (await exceptions.exception_from_response(response, error=throws, **kwargs))
return response
except throws as e:
self.provider_metrics.append('requests.verbose', ['NO', e.code, non_callable_url])
if retry <= 0 or e.code not in self._retry_on:
raise
await asyncio.sleep((1 + _retry - retry) * 2)
retry -= 1
def request(self, *args, **kwargs):
return RequestHandlerContext(self.make_request(*args, **kwargs))
async def move(self,
dest_provider: 'BaseProvider',
src_path: wb_path.WaterButlerPath,
dest_path: wb_path.WaterButlerPath,
rename: str=None,
conflict: str='replace',
handle_naming: bool=True) -> typing.Tuple[wb_metadata.BaseMetadata, bool]:
"""Moves a file or folder from the current provider to the specified one
Performs a copy and then a delete.
Calls :func:`BaseProvider.intra_move` if possible.
:param dest_provider: ( :class:`.BaseProvider` ) The provider to move to
:param src_path: ( :class:`.WaterButlerPath` ) Path to where the resource can be found
:param dest_path: ( :class:`.WaterButlerPath` ) Path to where the resource will be moved
:param rename: ( :class:`str` ) The desired name of the resulting path, may be incremented
:param conflict: ( :class:`str` ) What to do in the event of a name conflict, ``replace`` or ``keep``
:param handle_naming: ( :class:`bool` ) If a naming conflict is detected, should it be automatically handled?
"""
args = (dest_provider, src_path, dest_path)
kwargs = {'rename': rename, 'conflict': conflict}
self.provider_metrics.add('move', {
'got_handle_naming': handle_naming,
'conflict': conflict,
'got_rename': rename is not None,
})
if handle_naming:
dest_path = await dest_provider.handle_naming(
src_path,
dest_path,
rename=rename,
conflict=conflict,
)
args = (dest_provider, src_path, dest_path)
kwargs = {}
# files and folders shouldn't overwrite themselves
if (
self.shares_storage_root(dest_provider) and
src_path.materialized_path == dest_path.materialized_path
):
raise exceptions.OverwriteSelfError(src_path)
self.provider_metrics.add('move.can_intra_move', False)
if self.can_intra_move(dest_provider, src_path):
self.provider_metrics.add('move.can_intra_move', True)
return await self.intra_move(*args)
if src_path.is_dir:
meta_data, created = await self._folder_file_op(self.move, *args, **kwargs) # type: ignore
else:
meta_data, created = await self.copy(*args, handle_naming=False, **kwargs) # type: ignore
await self.delete(src_path)
return meta_data, created
async def copy(self,
dest_provider: 'BaseProvider',
src_path: wb_path.WaterButlerPath,
dest_path: wb_path.WaterButlerPath,
rename: str=None, conflict: str='replace',
handle_naming: bool=True) \
-> typing.Tuple[wb_metadata.BaseMetadata, bool]:
args = (dest_provider, src_path, dest_path)
kwargs = {'rename': rename, 'conflict': conflict, 'handle_naming': handle_naming}
self.provider_metrics.add('copy', {
'got_handle_naming': handle_naming,
'conflict': conflict,
'got_rename': rename is not None,
})
if handle_naming:
dest_path = await dest_provider.handle_naming(
src_path,
dest_path,
rename=rename,
conflict=conflict,
)
args = (dest_provider, src_path, dest_path)
kwargs = {}
# files and folders shouldn't overwrite themselves
if (
self.shares_storage_root(dest_provider) and
src_path.materialized_path == dest_path.materialized_path
):
raise exceptions.OverwriteSelfError(src_path)
self.provider_metrics.add('copy.can_intra_copy', False)
if self.can_intra_copy(dest_provider, src_path):
self.provider_metrics.add('copy.can_intra_copy', True)
return await self.intra_copy(*args)
if src_path.is_dir:
return await self._folder_file_op(self.copy, *args, **kwargs) # type: ignore
download_stream = await self.download(src_path)
if getattr(download_stream, 'name', None):
dest_path.rename(download_stream.name)
return await dest_provider.upload(download_stream, dest_path)
async def _folder_file_op(self,
func: typing.Callable,
dest_provider: 'BaseProvider',
src_path: wb_path.WaterButlerPath,
dest_path: wb_path.WaterButlerPath,
**kwargs) -> typing.Tuple[wb_metadata.BaseFolderMetadata, bool]:
"""Recursively apply func to src/dest path.
Called from: func: copy and move if src_path.is_dir.
Calls: func: dest_provider.delete and notes result for bool: created
func: dest_provider.create_folder
func: dest_provider.revalidate_path
func: self.metadata
:param coroutine func: to be applied to src/dest path
:param *Provider dest_provider: Destination provider
:param *ProviderPath src_path: Source path
:param *ProviderPath dest_path: Destination path
"""
assert src_path.is_dir, 'src_path must be a directory'
assert asyncio.iscoroutinefunction(func), 'func must be a coroutine'
try:
await dest_provider.delete(dest_path)
created = False
except exceptions.ProviderError as e:
if e.code != 404:
raise
created = True
folder = await dest_provider.create_folder(dest_path, folder_precheck=False)
dest_path = await dest_provider.revalidate_path(dest_path.parent, dest_path.name, folder=dest_path.is_dir)
folder.children = []
items = await self.metadata(src_path) # type: ignore
# Metadata returns a union, which confuses mypy
self.provider_metrics.append('_folder_file_ops.item_counts', len(items)) # type: ignore
for i in range(0, len(items), wb_settings.OP_CONCURRENCY): # type: ignore
futures = []
for item in items[i:i + wb_settings.OP_CONCURRENCY]: # type: ignore
futures.append(asyncio.ensure_future(
func(
dest_provider,
# TODO figure out a way to cut down on all the requests made here
(await self.revalidate_path(src_path, item.name, folder=item.is_folder)),
(await dest_provider.revalidate_path(dest_path, item.name, folder=item.is_folder)),
handle_naming=False,
)
))
if item.is_folder:
await futures[-1]
if not futures:
continue
done, _ = await asyncio.wait(futures, return_when=asyncio.FIRST_EXCEPTION)
for fut in done:
folder.children.append(fut.result()[0])
return folder, created
async def handle_naming(self,
src_path: wb_path.WaterButlerPath,
dest_path: wb_path.WaterButlerPath,
rename: str=None,
conflict: str='replace') -> wb_path.WaterButlerPath:
"""Given a :class:`.WaterButlerPath` and the desired name, handle any potential naming issues.
i.e.:
::
cp /file.txt /folder/ -> /folder/file.txt
cp /folder/ /folder/ -> /folder/folder/
cp /file.txt /folder/file.txt -> /folder/file.txt
cp /file.txt /folder/file.txt -> /folder/file (1).txt
cp /file.txt /folder/doc.txt -> /folder/doc.txt
:param src_path: ( :class:`.WaterButlerPath` ) The object that is being copied
:param dest_path: ( :class:`.WaterButlerPath` ) The path that is being copied to or into
:param rename: ( :class:`str` ) The desired name of the resulting path, may be incremented
:param conflict: ( :class:`str` ) The conflict resolution strategy, ``replace`` or ``keep``
:rtype: :class:`.WaterButlerPath`
"""
if src_path.is_dir and dest_path.is_file:
# Cant copy a directory to a file
raise ValueError('Destination must be a directory if the source is')
if not dest_path.is_file:
# Directories always are going to be copied into
# cp /folder1/ /folder2/ -> /folder1/folder2/
dest_path = await self.revalidate_path(
dest_path,
rename or src_path.name,
folder=src_path.is_dir
)
dest_path, _ = await self.handle_name_conflict(dest_path, conflict=conflict)
return dest_path
def can_intra_copy(self,
other: 'BaseProvider',
path: wb_path.WaterButlerPath=None) -> bool:
"""Indicates if a quick copy can be performed between the current provider and `other`.
.. note::
Defaults to False
:param other: ( :class:`.BaseProvider` ) The provider to check against
:param path: ( :class:`.WaterButlerPath` ) The path of the desired resource
:rtype: :class:`bool`
"""
return False
def can_intra_move(self,
other: 'BaseProvider',
path: wb_path.WaterButlerPath=None) -> bool:
"""Indicates if a quick move can be performed between the current provider and `other`.
.. note::
Defaults to False
:param other: ( :class:`.BaseProvider` ) The provider to check against
:param path: ( :class:`.WaterButlerPath` ) The path of the desired resource
:rtype: :class:`bool`
"""
return False
async def intra_copy(self,
dest_provider: 'BaseProvider',
source_path: wb_path.WaterButlerPath,
dest_path: wb_path.WaterButlerPath) -> typing.Tuple[wb_metadata.BaseFileMetadata, bool]:
"""If the provider supports copying files and/or folders within itself by some means other
than download/upload, then ``can_intra_copy`` should return ``True``. This method will
implement the copy. It accepts the destination provider, a source path, and the
destination path. Returns the metadata for the newly created file and a boolean indicating
whether the copied entity is completely new (``True``) or overwrote a previously-existing
file (``False``).
:param dest_provider: ( :class:`.BaseProvider` ) a provider instance for the destination
:param src_path: ( :class:`.WaterButlerPath` ) the Path of the entity being copied
:param dest_path: ( :class:`.WaterButlerPath` ) the Path of the destination being copied to
:rtype: (:class:`.BaseFileMetadata`, :class:`bool`)
"""
raise NotImplementedError
async def intra_move(self,
dest_provider: 'BaseProvider',
src_path: wb_path.WaterButlerPath,
dest_path: wb_path.WaterButlerPath) -> typing.Tuple[wb_metadata.BaseFileMetadata, bool]:
"""If the provider supports moving files and/or folders within itself by some means other
than download/upload/delete, then ``can_intra_move`` should return ``True``. This method
will implement the move. It accepts the destination provider, a source path, and the
destination path. Returns the metadata for the newly created file and a boolean indicating
whether the moved entity is completely new (``True``) or overwrote a previously-existing
file (``False``).
:param dest_provider: ( :class:`.BaseProvider` ) a provider instance for the destination
:param src_path: ( :class:`.WaterButlerPath` ) the Path of the entity being moved
:param dest_path: ( :class:`.WaterButlerPath` ) the Path of the destination being moved to
:rtype: (:class:`.BaseFileMetadata`, :class:`bool`)
"""
data, created = await self.intra_copy(dest_provider, src_path, dest_path)
await self.delete(src_path)
return data, created
async def exists(self, path: wb_path.WaterButlerPath, **kwargs) \
-> typing.Union[bool, wb_metadata.BaseMetadata, typing.List[wb_metadata.BaseMetadata]]:
"""Check for existence of WaterButlerPath
Attempt to retrieve provider metadata to determine existence of a WaterButlerPath. If
successful, will return the result of `self.metadata()` which may be `[]` for empty
folders.
:param path: ( :class:`.WaterButlerPath` ) path to check for
:rtype: (`self.metadata()` or False)
"""
try:
return await self.metadata(path, **kwargs)
except exceptions.NotFoundError:
return False
except exceptions.MetadataError as e:
if e.code != 404:
raise
return False
async def handle_name_conflict(self,
path: wb_path.WaterButlerPath,
conflict: str='replace',
**kwargs) -> typing.Tuple[wb_path.WaterButlerPath, bool]:
"""Check WaterButlerPath and resolve conflicts
Given a WaterButlerPath and a conflict resolution pattern determine
the correct file path to upload to and indicate if that file exists or not
:param path: ( :class:`.WaterButlerPath` ) Desired path to check for conflict
:param conflict: ( :class:`str` ) replace, keep, warn
:rtype: (:class:`.WaterButlerPath` or False)
:raises: :class:`.NamingConflict`
"""
exists = await self.exists(path, **kwargs)
if (not exists and not exists == []) or conflict == 'replace':
return path, exists # type: ignore
if conflict == 'warn':
raise exceptions.NamingConflict(path.name)
while True:
path.increment_name()
test_path = await self.revalidate_path(
path.parent,
path.name,
folder=path.is_dir
)
exists = await self.exists(test_path, **kwargs)
if not (exists or exists == []):
break
return path, False
async def revalidate_path(self,
base: wb_path.WaterButlerPath,
path: str,
folder: bool=False) -> wb_path.WaterButlerPath:
"""Take a path and a base path and build a WaterButlerPath representing `/base/path`. For
id-based providers, this will need to lookup the id of the new child object.
:param base: ( :class:`.WaterButlerPath` ) The base folder to look under
:param path: ( :class:`str`) the path of a child of `base`, relative to `base`
:param folder: ( :class:`bool` )whether the returned WaterButlerPath should represent a folder
:rtype: :class:`.WaterButlerPath`
"""
return base.child(path, folder=folder)
async def zip(self, path: wb_path.WaterButlerPath, **kwargs) -> asyncio.StreamReader:
"""Streams a Zip archive of the given folder
:param path: ( :class:`.WaterButlerPath` ) The folder to compress
"""
meta_data = await self.metadata(path) # type: ignore
if path.is_file:
meta_data = [meta_data] # type: ignore
path = path.parent
return streams.ZipStreamReader(ZipStreamGenerator(self, path, *meta_data)) # type: ignore
def shares_storage_root(self, other: 'BaseProvider') -> bool:
"""Returns True if ``self`` and ``other`` both point to the same storage root. Used to
detect when a file move/copy action might result in the file overwriting itself. Most
providers have enough uniquely identifing information in the settings to detect this,
but some providers may need to override this to do further detection.
:param other: ( :class:`.BaseProvider`) another provider instance to compare with
:rtype: :class:`bool` (True if both providers use the same storage root)
"""
return self.NAME == other.NAME and self.settings == other.settings
@abc.abstractmethod
def can_duplicate_names(self) -> bool:
"""Returns True if a file and a folder in the same directory can have identical names."""
raise NotImplementedError
@abc.abstractmethod
async def download(self, src_path: wb_path.WaterButlerPath, **kwargs) -> streams.ResponseStreamReader:
"""Download a file from this provider.
:param src_path: ( :class:`.WaterButlerPath` ) Path to the file to be downloaded
:param \*\*kwargs: ( :class:`dict` ) Arguments to be parsed by child classes
:rtype: :class:`.ResponseStreamReader`
:raises: :class:`.DownloadError`
"""
raise NotImplementedError
@abc.abstractmethod
async def upload(self, stream: streams.BaseStream, path: wb_path.WaterButlerPath, *args, **kwargs) \
-> typing.Tuple[wb_metadata.BaseFileMetadata, bool]:
"""Uploads the given stream to the provider. Returns the metadata for the newly created
file and a boolean indicating whether the file is completely new (``True``) or overwrote
a previously-existing file (``False``)
:param path: ( :class:`.WaterButlerPath` ) Where to upload the file to
:param stream: ( :class:`.BaseStream` ) The content to be uploaded
:param \*\*kwargs: ( :class:`dict` ) Arguments to be parsed by child classes
:rtype: (:class:`.BaseFileMetadata`, :class:`bool`)
:raises: :class:`.DeleteError`
"""
raise NotImplementedError
@abc.abstractmethod
async def delete(self, src_path: wb_path.WaterButlerPath, **kwargs) -> None:
"""
:param src_path: ( :class:`.WaterButlerPath` ) Path to be deleted
:param \*\*kwargs: ( :class:`dict` ) Arguments to be parsed by child classes
:rtype: :class:`None`
:raises: :class:`.DeleteError`
"""
raise NotImplementedError
@abc.abstractmethod
async def metadata(self, path: wb_path.WaterButlerPath, **kwargs) \
-> typing.Union[wb_metadata.BaseMetadata, typing.List[wb_metadata.BaseMetadata]]:
"""Get metadata about the specified resource from this provider. Will be a :class:`list`
if the resource is a directory otherwise an instance of
:class:`.BaseFileMetadata`
.. note::
Mypy doesn't seem to do very well with functions that can return more than one type of thing.
See: https://github.com/python/mypy/issues/1693
:param path: ( :class:`.WaterButlerPath` ) The path to a file or folder
:param \*\*kwargs: ( :class:`dict` ) Arguments to be parsed by child classes
:rtype: :class:`.BaseMetadata`
:rtype: :class:`list` of :class:`.BaseMetadata`
:raises: :class:`.MetadataError`
"""
raise NotImplementedError
@abc.abstractmethod
async def validate_v1_path(self, path: str, **kwargs) -> wb_path.WaterButlerPath:
"""API v1 requires that requests against folder endpoints always end with a slash, and
requests against files never end with a slash. This method checks the provider's metadata
for the given id and throws a 404 Not Found if the implicit and explicit types don't
match. This method duplicates the logic in the provider's validate_path method, but
validate_path must currently accomodate v0 AND v1 semantics. After v0's retirement, this
method can replace validate_path.
``path`` is the string in the url after the provider name and refers to the entity to be
acted on. For v1, this must *always exist*. If it does not, ``validate_v1_path`` should
return a 404. Creating a new file in v1 is done by making a PUT request against the parent
folder and specifying the file name as a query parameter. If a user attempts to create a
file by PUTting to its inferred path, validate_v1_path should reject this request with a 404.
:param path: ( :class:`str` ) user-supplied path to validate
:rtype: :class:`.WaterButlerPath`
:raises: :class:`.NotFoundError`
"""
raise NotImplementedError
@abc.abstractmethod
async def validate_path(self, path: str, **kwargs) -> wb_path.WaterButlerPath:
"""Validates paths passed in via the v0 API. v0 paths are much less strict than v1 paths.
They may represent things that exist or something that should be created. As such, the goal
of ``validate_path`` is to split the path into its component parts and attempt to determine
the ID of each part on the external provider. For instance, if the ``googledrive`` provider
receives a path of ``/foo/bar/baz.txt``, it will split those into ``/``, ``foo/``, ``bar/``,
and ``baz.txt``, and query Google Drive for the ID of each. ``validate_path`` then builds a
WaterButlerPath object with an ID, name tuple for each path part. The last part is
permitted to not have an ID, since it may represent a file that has not yet been created.
All other parts should have an ID.
The WaterButler v0 API is deprecated and will be removed in a future release. At that time
this method will be obsolete and will be removed from all providers.
:param path: ( :class:`str` ) user-supplied path to validate
:rtype: :class:`.WaterButlerPath`
:raises: :class:`.NotFoundError`
"""
raise NotImplementedError
def path_from_metadata(self,
parent_path: wb_path.WaterButlerPath,
meta_data: wb_metadata.BaseMetadata) -> wb_path.WaterButlerPath:
return parent_path.child(meta_data.name, _id=meta_data.path.strip('/'),
folder=meta_data.is_folder)
async def revisions(self, path: wb_path.WaterButlerPath, **kwargs):
"""Return a list of :class:`.BaseFileRevisionMetadata` objects representing the revisions
available for the file at ``path``.
"""
return [] # TODO Raise 405 by default h/t @rliebz
async def create_folder(self, path: wb_path.WaterButlerPath,
**kwargs) -> wb_metadata.BaseFolderMetadata:
"""Create a folder in the current provider at `path`. Returns a `BaseFolderMetadata` object
if successful. May throw a 409 Conflict if a directory with the same name already exists.
:param path: ( :class:`.WaterButlerPath` ) User-supplied path to create. Must be a directory.
:rtype: :class:`.BaseFileMetadata`
:raises: :class:`.CreateFolderError`
"""
raise exceptions.ProviderError({'message': 'Folder creation not supported.'}, code=405)
def _build_range_header(self, slice_tup: typing.Tuple[int, int]) -> str:
start, end = slice_tup
return 'bytes={}-{}'.format(
'' if start is None else start,
'' if end is None else end
)
def __repr__(self):
# Note: credentials are not included on purpose.
return '<{}({}, {})>'.format(self.__class__.__name__, self.auth, self.settings)
|
|
# Natural Language Toolkit: Sequential Backoff Taggers
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Edward Loper <[email protected]>
# Steven Bird <[email protected]> (minor additions)
# Tiago Tresoldi <[email protected]> (original affix tagger)
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Classes for tagging sentences sequentially, left to right. The
abastract base class L{SequentialBackoffTagger} serves as the base
class for all the taggers in this module. Tagging of individual words
is performed by the method L{choose_tag()}, which is defined by
subclasses of L{SequentialBackoffTagger}. If a tagger is unable to
determine a tag for the specified token, then its I{backoff tagger} is
consulted instead. Any C{SequentialBackoffTagger} may serve as a
backoff tagger for any other C{SequentialBackoffTagger}.
"""
import re, yaml
from nltk import FreqDist, ConditionalFreqDist
from nltk.tag.api import *
from nltk.tag.util import *
from nltk.internals import deprecated, Deprecated
######################################################################
#{ Abstract Base Classes
######################################################################
class SequentialBackoffTagger(TaggerI):
"""
An abstract base class for taggers that tags words sequentially,
left to right. Tagging of individual words is performed by the
method L{choose_tag()}, which should be defined by subclasses. If
a tagger is unable to determine a tag for the specified token,
then its backoff tagger is consulted.
@ivar _taggers: A list of all the taggers that should be tried to
tag a token (i.e., C{self} and its backoff taggers).
"""
def __init__(self, backoff=None):
if backoff is None:
self._taggers = [self]
else:
self._taggers = [self] + backoff._taggers
def _get_backoff(self):
if len(self._taggers) < 2: return None
else: return self._taggers[1]
backoff = property(_get_backoff, doc='''
The backoff tagger for this tagger.''')
def tag(self, tokens):
# docs inherited from TaggerI
tags = []
for i in range(len(tokens)):
tags.append(self.tag_one(tokens, i, tags))
return zip(tokens, tags)
def tag_one(self, tokens, index, history):
"""
Determine an appropriate tag for the specified token, and
return that tag. If this tagger is unable to determine a tag
for the specified token, then its backoff tagger is consulted.
@rtype: C{str}
@type tokens: C{list}
@param tokens: The list of words that are being tagged.
@type index: C{int}
@param index: The index of the word whose tag should be
returned.
@type history: C{list} of C{str}
@param history: A list of the tags for all words before
C{index}.
"""
tag = None
for tagger in self._taggers:
tag = tagger.choose_tag(tokens, index, history)
if tag is not None: break
return tag
def choose_tag(self, tokens, index, history):
"""
Decide which tag should be used for the specified token, and
return that tag. If this tagger is unable to determine a tag
for the specified token, return C{None} -- do I{not} consult
the backoff tagger. This method should be overridden by
subclasses of C{SequentialBackoffTagger}.
@rtype: C{str}
@type tokens: C{list}
@param tokens: The list of words that are being tagged.
@type index: C{int}
@param index: The index of the word whose tag should be
returned.
@type history: C{list} of C{str}
@param history: A list of the tags for all words before
C{index}.
"""
raise AssertionError('SequentialBackoffTagger is an abstract class')
#////////////////////////////////////////////////////////////
#{ Deprecated
@deprecated('Use batch_tag instead.')
def tag_sents(self, sents, verbose=False):
self.tag_batch(sents, verbose)
class ContextTagger(SequentialBackoffTagger):
"""
An abstract base class for sequential backoff taggers that choose
a tag for a token based on the value of its "context". Different
subclasses are used to define different contexts.
A C{ContextTagger} chooses the tag for a token by calculating the
token's context, and looking up the corresponding tag in a table.
This table can be constructed manually; or it can be automatically
constructed based on a training corpus, using the L{train()}
factory method.
"""
def __init__(self, context_to_tag, backoff=None):
"""
@param context_to_tag: A dictionary mapping contexts to tags.
@param backoff: The backoff tagger that should be used for this tagger.
"""
SequentialBackoffTagger.__init__(self, backoff)
if context_to_tag:
self._context_to_tag = context_to_tag
else:
self._context_to_tag = {}
def context(self, tokens, index, history):
"""
@return: the context that should be used to look up the tag
for the specified token; or C{None} if the specified token
should not be handled by this tagger.
@rtype: (hashable)
"""
raise AssertionError('Abstract base class')
def choose_tag(self, tokens, index, history):
context = self.context(tokens, index, history)
return self._context_to_tag.get(context)
def size(self):
"""
@return: The number of entries in the table used by this
tagger to map from contexts to tags.
"""
return len(self._context_to_tag)
def __repr__(self):
return '<%s: size=%d>' % (self.__class__.__name__, self.size())
def _train(self, tagged_corpus, cutoff=1, verbose=False):
"""
Initialize this C{ContextTagger}'s L{_context_to_tag} table
based on the given training data. In particular, for each
context C{I{c}} in the training data, set
C{_context_to_tag[I{c}]} to the most frequent tag for that
context. However, exclude any contexts that are already
tagged perfectly by the backoff tagger(s).
The old value of C{self._context_to_tag} (if any) is discarded.
@param tagged_corpus: A tagged corpus. Each item should be
a C{list} of C{(word, tag)} tuples.
@param cutoff: If the most likely tag for a context occurs
fewer than C{cutoff} times, then exclude it from the
context-to-tag table for the new tagger.
"""
token_count = hit_count = 0
# A context is considered 'useful' if it's not already tagged
# perfectly by the backoff tagger.
useful_contexts = set()
# Count how many times each tag occurs in each context.
fd = ConditionalFreqDist()
for sentence in tagged_corpus:
tokens, tags = zip(*sentence)
for index, (token, tag) in enumerate(sentence):
# Record the event.
token_count += 1
context = self.context(tokens, index, tags[:index])
if context is None: continue
fd[context].inc(tag)
# If the backoff got it wrong, this context is useful:
if (self.backoff is None or
tag != self.backoff.tag_one(tokens, index, tags[:index])):
useful_contexts.add(context)
# Build the context_to_tag table -- for each context, figure
# out what the most likely tag is. Only include contexts that
# we've seen at least `cutoff` times.
for context in useful_contexts:
best_tag = fd[context].max()
hits = fd[context][best_tag]
if hits > cutoff:
self._context_to_tag[context] = best_tag
hit_count += hits
# Display some stats, if requested.
if verbose:
size = len(self._context_to_tag)
backoff = 100 - (hit_count * 100.0)/ token_count
pruning = 100 - (size * 100.0) / len(fd.conditions())
print "[Trained Unigram tagger:",
print "size=%d, backoff=%.2f%%, pruning=%.2f%%]" % (
size, backoff, pruning)
######################################################################
#{ Tagger Classes
######################################################################
class DefaultTagger(SequentialBackoffTagger, yaml.YAMLObject):
"""
A tagger that assigns the same tag to every token.
"""
yaml_tag = '!nltk.DefaultTagger'
def __init__(self, tag):
"""
Construct a new tagger that assigns C{tag} to all tokens.
"""
self._tag = tag
SequentialBackoffTagger.__init__(self, None)
def choose_tag(self, tokens, index, history):
return self._tag # ignore token and history
def __repr__(self):
return '<DefaultTagger: tag=%s>' % self._tag
class NgramTagger(ContextTagger, yaml.YAMLObject):
"""
A tagger that chooses a token's tag based on its word string and
on the preceeding I{n} word's tags. In particular, a tuple
C{(tags[i-n:i-1], words[i])} is looked up in a table, and the
corresponding tag is returned. N-gram taggers are typically
trained them on a tagged corpus.
"""
yaml_tag = '!nltk.NgramTagger'
def __init__(self, n, train=None, model=None, backoff=None,
cutoff=1, verbose=False):
"""
Train a new C{NgramTagger} using the given training data or
the supplied model. In particular, construct a new tagger
whose table maps from each context C{(tag[i-n:i-1], word[i])}
to the most frequent tag for that context. But exclude any
contexts that are already tagged perfectly by the backoff
tagger.
@param train: A tagged corpus. Each item should be
a C{list} of C{(word, tag)} tuples.
@param backoff: A backoff tagger, to be used by the new
tagger if it encounters an unknown context.
@param cutoff: If the most likely tag for a context occurs
fewer than C{cutoff} times, then exclude it from the
context-to-tag table for the new tagger.
"""
self._n = n
if (train and model) or (not train and not model):
raise ValueError('Must specify either training data or trained model.')
ContextTagger.__init__(self, model, backoff)
if train:
self._train(train, cutoff, verbose)
def context(self, tokens, index, history):
tag_context = tuple(history[max(0,index-self._n+1):index])
return (tag_context, tokens[index])
class UnigramTagger(NgramTagger):
"""
A tagger that chooses a token's tag based its word string.
Unigram taggers are typically trained on a tagged corpus.
"""
yaml_tag = '!nltk.UnigramTagger'
def __init__(self, train=None, model=None, backoff=None,
cutoff=1, verbose=False):
NgramTagger.__init__(self, 1, train, model, backoff, cutoff, verbose)
def context(self, tokens, index, history):
return tokens[index]
class BigramTagger(NgramTagger):
"""
A tagger that chooses a token's tag based its word string and on
the preceeding words' tag. In particular, a tuple consisting
of the previous tag and the word is looked up in a table, and
the corresponding tag is returned. Bigram taggers are typically
trained on a tagged corpus.
"""
yaml_tag = '!nltk.BigramTagger'
def __init__(self, train=None, model=None, backoff=None,
cutoff=1, verbose=False):
NgramTagger.__init__(self, 2, train, model, backoff, cutoff, verbose)
class TrigramTagger(NgramTagger):
"""
A tagger that chooses a token's tag based its word string and on
the preceeding two words' tags. In particular, a tuple consisting
of the previous two tags and the word is looked up in a table, and
the corresponding tag is returned. Trigram taggers are typically
trained them on a tagged corpus.
"""
yaml_tag = '!nltk.TrigramTagger'
def __init__(self, train=None, model=None, backoff=None,
cutoff=1, verbose=False):
NgramTagger.__init__(self, 3, train, model, backoff, cutoff, verbose)
class AffixTagger(ContextTagger, yaml.YAMLObject):
"""
A tagger that chooses a token's tag based on a leading or trailing
substring of its word string. (It is important to note that these
substrings are not necessarily "true" morphological affixes). In
particular, a fixed-length substring of the word is looked up in a
table, and the corresponding tag is returned. Affix taggers are
typically constructed by training them on a tagged corpys; see
L{train()}.
"""
yaml_tag = '!nltk.AffixTagger'
def __init__(self, train=None, model=None, affix_length=-3,
min_stem_length=2, backoff=None, cutoff=1, verbose=False):
"""
Construct a new affix tagger.
@param affix_length: The length of the affixes that should be
considered during training and tagging. Use negative
numbers for suffixes.
@param min_stem_length: Any words whose length is less than
C{min_stem_length+abs(affix_length)} will be assigned a
tag of C{None} by this tagger.
"""
if (train and model) or (not train and not model):
raise ValueError('Must specify either training data or '
'trained model')
ContextTagger.__init__(self, model, backoff)
self._affix_length = affix_length
self._min_word_length = min_stem_length + abs(affix_length)
if train:
self._train(train, cutoff, verbose)
def context(self, tokens, index, history):
token = tokens[index]
if len(token) < self._min_word_length:
return None
elif self._affix_length > 0:
return token[:self._affix_length]
else:
return token[self._affix_length:]
class RegexpTagger(SequentialBackoffTagger, yaml.YAMLObject):
"""
A tagger that assigns tags to words based on regular expressions
over word strings.
"""
yaml_tag = '!nltk.RegexpTagger'
def __init__(self, regexps, backoff=None):
"""
Construct a new regexp tagger.
@type regexps: C{list} of C{(str, str)}
@param regexps: A list of C{(regexp, tag)} pairs, each of
which indicates that a word matching C{regexp} should
be tagged with C{tag}. The pairs will be evalutated in
order. If none of the regexps match a word, then the
optional backoff tagger is invoked, else it is
assigned the tag C{None}.
"""
self._regexps = regexps
SequentialBackoffTagger.__init__(self, backoff)
def choose_tag(self, tokens, index, history):
for regexp, tag in self._regexps:
if re.match(regexp, tokens[index]): # ignore history
return tag
return None
def __repr__(self):
return '<Regexp Tagger: size=%d>' % len(self._regexps)
|
|
from __future__ import print_function
import requests
import sys
import robot_util
import json
import schedule
import platform
import subprocess
import tts.tts as tts
import watchdog
from socketIO_client import SocketIO, LoggingNamespace
if (sys.version_info > (3, 0)):
# import _thread as thread
import urllib.request as urllib2
else:
# import thread
import urllib2
controlHostPort = None
chatHostPort = None
videoPort = None
audioPort = None
infoServer = None
apiServer = None
robot_id = None
camera_id = None
messengerEnabled = None
messengerHost = None
messengerPort = None
messengerName = None
messengerUsername = None
messengerPassword = None
messengerQueue = []
appServerSocketIO = None
controlSocketIO = None
chatSocket = None
messengerSocket = None
no_chat_server = None
secure_cert = None
debug_messages = None
onHandleChatMesasge = None
def getControlHostPort():
url = 'https://%s/get_control_host_port/%s' % (infoServer, robot_id)
response = robot_util.getWithRetry(url, secure=secure_cert)
return json.loads(response)
def getChatHostPort():
url = 'https://%s/get_chat_host_port/%s' % (infoServer, robot_id)
response = robot_util.getWithRetry(url, secure=secure_cert)
return json.loads(response)
def getOwnerDetails(username):
url = 'https://%s/api/v1/accounts/%s' % (apiServer, username)
# url = 'https://api.letsrobot.tv/api/v1/robocasters/%s' % (username)
response = robot_util.getWithRetry(url, secure=secure_cert)
return json.loads(response)
def getVideoPort():
url = 'https://%s/get_video_port/%s' % (infoServer, camera_id)
response = robot_util.getWithRetry(url)
return json.loads(response)['mpeg_stream_port']
def getAudioPort():
url = 'https://%s/get_audio_port/%s' % (infoServer, camera_id)
response = robot_util.getWithRetry(url)
return json.loads(response)['audio_stream_port']
def getWebsocketRelayHost():
url = 'https://%s/get_websocket_relay_host/%s' % (infoServer, camera_id)
response = robot_util.getWithRetry(url)
return json.loads(response)
def getOnlineRobotSettings(robotID):
url = 'https://%s/api/v1/robots/%s' % (apiServer, robotID)
response = robot_util.getWithRetry(url).decode('utf-8')
return json.loads(response)
def getMessengerAuthToken():
url = 'https://%s/api/v1/authenticate' % (apiServer)
payload = {'username': messengerUsername, 'password': messengerPassword}
authToken = robot_util.getAuthToken(url, payload)
return authToken
def waitForAppServer():
while True:
try:
appServerSocketIO.wait(seconds=1)
except AttributeError:
if debug_messages:
print("Warning: App Server Socket not connected.");
def waitForControlServer():
while True:
try:
controlSocketIO.wait(seconds=1)
except AttributeError:
if debug_messages:
print("Warning: Control Server Socket not connected.");
def waitForChatServer():
global chatSocket
while True:
try:
chatSocket.wait(seconds=1)
except AttributeError:
if debug_messages:
print("Warning: Chat Server Socket not connected.");
except IndexError:
print("Error: Chat Server Socket has FAILED");
startListenForChatServer()
return
def waitForMessengerServer():
while True:
if len(messengerQueue):
messengerSocket.emit('chat_message', messengerQueue.pop(0))
try:
messengerSocket.wait(seconds=1)
except AttributeError:
if debug_messages:
print("Warning: Messenger Chat Socket not connected.");a
startListenForMessengerServer()
return
def startListenForAppServer():
watchdog.start("AppServerListen", waitForAppServer)
def startListenForControlServer():
watchdog.start("ControlServerListen", waitForControlServer)
def startListenForChatServer():
global chatSocket
chatSocket = SocketIO(chatHostPort['host'], chatHostPort['port'], LoggingNamespace)
print("Connected to chat socket.io")
chatSocket.on('chat_message_with_name', onHandleChatMessage)
chatSocket.on('connect', onHandleChatConnect)
chatSocket.on('reconnect', onHandleChatReconnect)
if debug_messages:
chatSocket.on('disconnect', onHandleChatDisconnect)
watchdog.start("ChatServerListen", waitForChatServer)
return chatSocket
def startListenForMessengerServer():
global messengerSocket
cookie = getMessengerAuthToken()
if not cookie.status_code == 200:
print('ERROR : Messenger username / password rejected by server')
sys.exit()
messengerSocket = SocketIO('https://%s' % messengerHost, messengerPort, LoggingNamespace, cookies={'connect.sid': cookie.cookies['connect.sid']})
print("Connected to messenger chat socket.io")
messengerSocket.on('connect', onHandleMessengerConnect)
messengerSocket.on('reconnect', onHandleMessengerReconnect)
if debug_messages:
messengerSocket.on('disconnect', onHandleMessengerDisconnect)
watchdog.start("MessengerServerListen", waitForMessengerServer)
return messengerSocket
def onHandleAppServerConnect(*args):
identifyRobotID()
if debug_messages:
print
print("app socket.io connect")
print
def onHandleAppServerReconnect(*args):
identifyRobotID()
if debug_messages:
print
print("app server socket.io reconnect")
print
def onHandleAppServerDisconnect(*args):
print
print("app server socket.io disconnect")
print
def onHandleChatConnect(*args):
identifyRobotID()
if debug_messages:
print
print("chat socket.io connect")
print
def onHandleChatReconnect(*args):
identifyRobotID()
if debug_messages:
print
print("chat socket.io reconnect")
print
def onHandleChatDisconnect(*args):
print
print("chat socket.io disconnect")
print
def onHandleControlConnect(*args):
identifyRobotID()
if debug_messages:
print
print("control socket.io connect")
print
def onHandleControlReconnect(*args):
identifyRobotID()
if debug_messages:
print
print("control socket.io reconnect")
print
def onHandleControlDisconnect(*args):
print
print("control socket.io disconnect")
print
def onHandleMessengerConnect(*args):
if debug_messages:
print
print("messenger chat socket.io connect")
print
def onHandleMessengerReconnect(*args):
if debug_messages:
print
print("messenger chat socket.io reconnect")
print
def onHandleMessengerDisconnect(*args):
print
print("messenger chat socket.io disconnect")
print
#TODO: Fix this to setup a new control socket instead of terminatition the program.
# newControlHostPort = getControlHostPort() #Reget control port will start if it closed for whatever reason
# if controlHostPort['port'] != newControlHostPort['port']: #See if the port is not the same as before
# print "restart: control host port changed"
# sys.exit(1) #Auto restart script will restart if the control port is not the same (which is unlikely)
def setupSocketIO(robot_config):
global infoServer
global apiServer
global controlHostPort
global chatHostPort
global videoPort
global audioPort
global robot_id
global camera_id
global no_chat_server
global secure_cert
global debug_messages
global messengerEnable
global messengerHost
global messengerPort
global messengerName
global messengerUsername
global messengerPassword
debug_messages = robot_config.getboolean('misc', 'debug_messages')
robot_id = robot_config.get('robot', 'robot_id')
camera_id = robot_config.getint('robot', 'camera_id')
infoServer = robot_config.get('misc', 'info_server')
apiServer =robot_config.get('misc', 'api_server')
no_chat_server = robot_config.getboolean('misc', 'no_chat_server')
secure_cert = robot_config.getboolean('misc', 'secure_cert')
messengerEnable = robot_config.get('messenger', 'enable')
messengerHost = robot_config.get('messenger', 'host')
messengerPort = robot_config.getint('messenger', 'port')
messengerUsername = robot_config.get('messenger', 'username')
messengerPassword = robot_config.get('messenger', 'password')
messengerName = robot_config.get('messenger', 'robot_name')
controlHostPort = getControlHostPort()
chatHostPort = getChatHostPort()
videoPort = getVideoPort()
audioPort = getAudioPort()
schedule.repeat_task(60, identifyRobot_task)
if debug_messages:
print("using socket io to connect to control", controlHostPort)
print("using socket io to connect to chat", chatHostPort)
print("using video port %d" % videoPort)
print("using audio port %d" % audioPort)
if robot_config.getboolean('misc', 'check_internet'):
#schedule a task to check internet status
schedule.task(robot_config.getint('misc', 'check_freq'), internetStatus_task)
def setupControlSocket(on_handle_command):
global controlSocketIO
if debug_messages:
print("Connecting socket.io to control host port", controlHostPort)
controlSocketIO = SocketIO(controlHostPort['host'], int(controlHostPort['port']), LoggingNamespace)
print("Connected to control socket.io")
startListenForControlServer()
controlSocketIO.on('connect', onHandleControlConnect)
controlSocketIO.on('reconnect', onHandleControlReconnect)
if debug_messages:
controlSocketIO.on('disconnect', onHandleControlDisconnect)
controlSocketIO.on('command_to_robot', on_handle_command)
return controlSocketIO
def setupChatSocket(on_handle_chat_message):
global onHandleChatMessage
if not no_chat_server:
if debug_messages:
print('Connecting socket.io to chat host port', chatHostPort)
onHandleChatMessage = on_handle_chat_message
startListenForChatServer()
return chatSocket
else:
print("chat server connection disabled")
def setupAppSocket(on_handle_exclusive_control):
global appServerSocketIO
if debug_messages:
print("Connecting to socket.io to app server")
appServerSocketIO = SocketIO('letsrobot.tv', 8022, LoggingNamespace)
print("Connected to app server")
startListenForAppServer()
appServerSocketIO.on('exclusive_control', on_handle_exclusive_control)
appServerSocketIO.on('connect', onHandleAppServerConnect)
appServerSocketIO.on('reconnect', onHandleAppServerReconnect)
if debug_messages:
appServerSocketIO.on('disconnect', onHandleAppServerDisconnect)
return appServerSocketIO
def setupMessengerSocket():
global messengerSocket
if not no_chat_server:
if debug_messages:
print('Connecting socket.io to messenger chat host port', "%s %s" % (messengerHost, messengerPort))
startListenForMessengerServer()
return messengerSocket
else:
print("messenger chat server connection disabled")
def sendChargeState(charging):
chargeState = {'robot_id': robot_id, 'charging': charging}
try:
appServerSocketIO.emit('charge_state', chargeState)
except AttributeError:
if debug_messages:
print("Error: Can't update server on charge state, no app socket")
print("charge state:", chargeState)
def sendOnlineState(state):
onlineState = {'send_video_process_exists': state, 'camera_id': camera_id}
try:
appServerSocketIO.emit('send_video_status', onlineState)
except AttributeError:
if debug_messages:
print("Error: Can't update server on charge state, no app socket")
print("online state: %s" % onlineState)
def ipInfoUpdate():
try:
appServerSocketIO.emit('ip_information',
{'ip': subprocess.check_output(["hostname", "-I"]).decode('utf-8'), 'robot_id': robot_id})
except AttributeError:
if debug_messages:
print("Error: Cant send ip address update, no app socket")
def identifyRobotID():
"""tells the server which robot is using the connection"""
if debug_messages:
print("Sending identify robot id message")
if not no_chat_server and not chatSocket == None:
chatSocket.emit('identify_robot_id', robot_id);
if not appServerSocketIO == None:
appServerSocketIO.emit('identify_robot_id', robot_id);
#schedule a task to tell the server our robot id.
def identifyRobot_task():
# tell the server what robot id is using this connection
identifyRobotID()
if platform.system() == 'Linux':
ipInfoUpdate()
def isInternetConnected():
try:
urllib2.urlopen('https://www.google.com', timeout=1)
return True
except urllib2.URLError as err:
return False
lastInternetStatus = False
def internetStatus_task():
global lastInternetStatus
internetStatus = isInternetConnected()
if internetStatus != lastInternetStatus:
if internetStatus:
tts.say("ok")
else:
tts.say("missing internet connection")
lastInternetStatus = internetStatus
def sendChatMessage(message):
new_message = "[%s] %s" % (messengerName, message)
print ("%s %s %s" % (new_message, messengerName, robot_id))
chat_message = { 'message': new_message,
'robot_id': robot_id,
'robot_name': messengerName,
'secret': "iknowyourelookingatthisthatsfine" }
if messengerEnable:
if not no_chat_server:
messengerQueue.append(chat_message)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""DICOM IO connector
This module implements several tools to facilitate the interaction between
a Google Cloud Healthcare DICOM store and a Beam pipeline.
For more details on DICOM store and API:
https://cloud.google.com/healthcare/docs/how-tos/dicom
The DICOM IO connector can be used to search metadata or write DICOM files
to DICOM store.
When used together with Google Pubsub message connector, the
`FormatToQido` PTransform implemented in this module can be used
to convert Pubsub messages to search requests.
Since Traceability is crucial for healthcare
API users, every input or error message will be recorded in the output of
the DICOM IO connector. As a result, every PTransform in this module will
return a PCollection of dict that encodes results and detailed error messages.
Search instance's metadata (QIDO request)
===================================================
DicomSearch() wraps the QIDO request client and supports 3 levels of search.
Users should specify the level by setting the 'search_type' entry in the input
dict. They can also refine the search by adding tags to filter the results using
the 'params' entry. Here is a sample usage:
with Pipeline() as p:
input_dict = p | beam.Create(
[{'project_id': 'abc123', 'type': 'instances',...},
{'project_id': 'dicom_go', 'type': 'series',...}])
results = input_dict | io.gcp.DicomSearch()
results | 'print successful search' >> beam.Map(
lambda x: print(x['result'] if x['success'] else None))
results | 'print failed search' >> beam.Map(
lambda x: print(x['result'] if not x['success'] else None))
In the example above, successful qido search results and error messages for
failed requests are printed. When used in real life, user can choose to filter
those data and output them to wherever they want.
Convert DICOM Pubsub message to Qido search request
===================================================
Healthcare API users might read messages from Pubsub to monitor the store
operations (e.g. new file) in a DICOM storage. Pubsub message encode
DICOM as a web store path as well as instance ids. If users are interested in
getting new instance's metadata, they can use the `FormatToQido` transform
to convert the message into Qido Search dict then use the `DicomSearch`
transform. Here is a sample usage:
pipeline_options = PipelineOptions()
pipeline_options.view_as(StandardOptions).streaming = True
p = beam.Pipeline(options=pipeline_options)
pubsub = p | beam.io.ReadStringFromPubsub(subscription='a_dicom_store')
results = pubsub | FormatToQido()
success = results | 'filter message' >> beam.Filter(lambda x: x['success'])
qido_dict = success | 'get qido request' >> beam.Map(lambda x: x['result'])
metadata = qido_dict | DicomSearch()
In the example above, the pipeline is listening to a pubsub topic and waiting
for messages from DICOM API. When a new DICOM file comes into the storage, the
pipeline will receive a pubsub message, convert it to a Qido request dict and
feed it to DicomSearch() PTransform. As a result, users can get the metadata for
every new DICOM file. Note that not every pubsub message received is from DICOM
API, so we to filter the results first.
Store a DICOM file in a DICOM storage
===================================================
UploadToDicomStore() wraps store request API and users can use it to send a
DICOM file to a DICOM store. It supports two types of input: 1.file data in
byte[] 2.fileio object. Users should set the 'input_type' when initialzing
this PTransform. Here are the examples:
with Pipeline() as p:
input_dict = {'project_id': 'abc123', 'type': 'instances',...}
path = "gcs://bucketname/something/a.dcm"
match = p | fileio.MatchFiles(path)
fileio_obj = match | fileio.ReadAll()
results = fileio_obj | UploadToDicomStore(input_dict, 'fileio')
with Pipeline() as p:
input_dict = {'project_id': 'abc123', 'type': 'instances',...}
f = open("abc.dcm", "rb")
dcm_file = f.read()
byte_file = p | 'create byte file' >> beam.Create([dcm_file])
results = byte_file | UploadToDicomStore(input_dict, 'bytes')
The first example uses a PCollection of fileio objects as input.
UploadToDicomStore will read DICOM files from the objects and send them
to a DICOM storage.
The second example uses a PCollection of byte[] as input. UploadToDicomStore
will directly send those DICOM files to a DICOM storage.
Users can also get the operation results in the output PCollection if they want
to handle the failed store requests.
"""
# pytype: skip-file
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import as_completed
import apache_beam as beam
from apache_beam.io.gcp.dicomclient import DicomApiHttpClient
from apache_beam.transforms import PTransform
class DicomSearch(PTransform):
"""A PTransform used for retrieving DICOM instance metadata from Google
Cloud DICOM store. It takes a PCollection of dicts as input and return
a PCollection of dict as results:
INPUT:
The input dict represents DICOM web path parameters, which has the following
string keys and values:
{
'project_id': str,
'region': str,
'dataset_id': str,
'dicom_store_id': str,
'search_type': str,
'params': dict(str,str) (Optional),
}
Key-value pairs:
project_id: Id of the project in which the DICOM store is
located. (Required)
region: Region where the DICOM store resides. (Required)
dataset_id: Id of the dataset where DICOM store belongs to. (Required)
dicom_store_id: Id of the dicom store. (Required)
search_type: Which type of search it is, could only be one of the three
values: 'instances', 'series', or 'studies'. (Required)
params: A dict of str:str pairs used to refine QIDO search. (Optional)
Supported tags in three categories:
1.Studies:
* StudyInstanceUID,
* PatientName,
* PatientID,
* AccessionNumber,
* ReferringPhysicianName,
* StudyDate,
2.Series: all study level search terms and
* SeriesInstanceUID,
* Modality,
3.Instances: all study/series level search terms and
* SOPInstanceUID,
e.g. {"StudyInstanceUID":"1","SeriesInstanceUID":"2"}
OUTPUT:
The output dict wraps results as well as error messages:
{
'result': a list of dicts in JSON style.
'success': boolean value telling whether the operation is successful.
'input': detail ids and dicomweb path for this retrieval.
'status': status code from the server, used as error message.
}
"""
def __init__(
self, buffer_size=8, max_workers=5, client=None, credential=None):
"""Initializes DicomSearch.
Args:
buffer_size: # type: Int. Size of the request buffer.
max_workers: # type: Int. Maximum number of threads a worker can
create. If it is set to one, all the request will be processed
sequentially in a worker.
client: # type: object. If it is specified, all the Api calls will
made by this client instead of the default one (DicomApiHttpClient).
credential: # type: Google credential object, if it is specified, the
Http client will use it to create sessions instead of the default.
"""
self.buffer_size = buffer_size
self.max_workers = max_workers
self.client = client or DicomApiHttpClient()
self.credential = credential
def expand(self, pcoll):
return pcoll | beam.ParDo(
_QidoReadFn(
self.buffer_size, self.max_workers, self.client, self.credential))
class _QidoReadFn(beam.DoFn):
"""A DoFn for executing every qido query request."""
def __init__(self, buffer_size, max_workers, client, credential=None):
self.buffer_size = buffer_size
self.max_workers = max_workers
self.client = client
self.credential = credential
def start_bundle(self):
self.buffer = []
def finish_bundle(self):
for item in self._flush():
yield item
def validate_element(self, element):
# Check if all required keys present.
required_keys = [
'project_id', 'region', 'dataset_id', 'dicom_store_id', 'search_type'
]
for key in required_keys:
if key not in element:
error_message = 'Must have %s in the dict.' % (key)
return False, error_message
# Check if return type is correct.
if element['search_type'] in ['instances', "studies", "series"]:
return True, None
else:
error_message = (
'Search type can only be "studies", '
'"instances" or "series"')
return False, error_message
def process(
self,
element,
window=beam.DoFn.WindowParam,
timestamp=beam.DoFn.TimestampParam):
# Check if the element is valid
valid, error_message = self.validate_element(element)
if valid:
self.buffer.append((element, window, timestamp))
if len(self.buffer) >= self.buffer_size:
for item in self._flush():
yield item
else:
# Return this when the input dict dose not meet the requirements
out = {}
out['result'] = []
out['status'] = error_message
out['input'] = element
out['success'] = False
yield out
def make_request(self, element):
# Sending Qido request to DICOM Api
project_id = element['project_id']
region = element['region']
dataset_id = element['dataset_id']
dicom_store_id = element['dicom_store_id']
search_type = element['search_type']
params = element['params'] if 'params' in element else None
# Call qido search http client
result, status_code = self.client.qido_search(
project_id, region, dataset_id, dicom_store_id,
search_type, params, self.credential
)
out = {}
out['result'] = result
out['status'] = status_code
out['input'] = element
out['success'] = (status_code == 200)
return out
def process_buffer_element(self, buffer_element):
# Thread job runner - each thread makes a Qido search request
value = self.make_request(buffer_element[0])
windows = [buffer_element[1]]
timestamp = buffer_element[2]
return beam.utils.windowed_value.WindowedValue(
value=value, timestamp=timestamp, windows=windows)
def _flush(self):
# Create thread pool executor and process the buffered elements in paralllel
executor = ThreadPoolExecutor(max_workers=self.max_workers)
futures = [
executor.submit(self.process_buffer_element, ele) for ele in self.buffer
]
self.buffer = []
for f in as_completed(futures):
yield f.result()
class FormatToQido(PTransform):
"""A PTransform for converting pubsub messages into search input dict.
Takes PCollection of string as input and returns a PCollection of dict as
results. Note that some pubsub messages may not be from DICOM API, which
will be recorded as failed conversions.
INPUT:
The input are normally strings from Pubsub topic:
"projects/PROJECT_ID/locations/LOCATION/datasets/DATASET_ID/
dicomStores/DICOM_STORE_ID/dicomWeb/studies/STUDY_UID/
series/SERIES_UID/instances/INSTANCE_UID"
OUTPUT:
The output dict encodes results as well as error messages:
{
'result': a dict representing instance level qido search request.
'success': boolean value telling whether the conversion is successful.
'input': input pubsub message string.
}
"""
def __init__(self, credential=None):
"""Initializes FormatToQido.
Args:
credential: # type: Google credential object, if it is specified, the
Http client will use it instead of the default one.
"""
self.credential = credential
def expand(self, pcoll):
return pcoll | beam.ParDo(_ConvertStringToQido())
class _ConvertStringToQido(beam.DoFn):
"""A DoFn for converting pubsub string to qido search parameters."""
def process(self, element):
# Some constants for DICOM pubsub message
NUM_PUBSUB_STR_ENTRIES = 15
NUM_DICOM_WEBPATH_PARAMETERS = 5
NUM_TOTAL_PARAMETERS = 8
INDEX_PROJECT_ID = 1
INDEX_REGION = 3
INDEX_DATASET_ID = 5
INDEX_DICOMSTORE_ID = 7
INDEX_STUDY_ID = 10
INDEX_SERIE_ID = 12
INDEX_INSTANCE_ID = 14
entries = element.split('/')
# Output dict with error message, used when
# receiving invalid pubsub string.
error_dict = {}
error_dict['result'] = {}
error_dict['input'] = element
error_dict['success'] = False
if len(entries) != NUM_PUBSUB_STR_ENTRIES:
return [error_dict]
required_keys = [
'projects',
'locations',
'datasets',
'dicomStores',
'dicomWeb',
'studies',
'series',
'instances'
]
# Check if the required keys present and
# the positions of those keys are correct
for i in range(NUM_DICOM_WEBPATH_PARAMETERS):
if required_keys[i] != entries[i * 2]:
return [error_dict]
for i in range(NUM_DICOM_WEBPATH_PARAMETERS, NUM_TOTAL_PARAMETERS):
if required_keys[i] != entries[i * 2 - 1]:
return [error_dict]
# Compose dicom webpath parameters for qido search
qido_dict = {}
qido_dict['project_id'] = entries[INDEX_PROJECT_ID]
qido_dict['region'] = entries[INDEX_REGION]
qido_dict['dataset_id'] = entries[INDEX_DATASET_ID]
qido_dict['dicom_store_id'] = entries[INDEX_DICOMSTORE_ID]
qido_dict['search_type'] = 'instances'
# Compose instance level params for qido search
params = {}
params['StudyInstanceUID'] = entries[INDEX_STUDY_ID]
params['SeriesInstanceUID'] = entries[INDEX_SERIE_ID]
params['SOPInstanceUID'] = entries[INDEX_INSTANCE_ID]
qido_dict['params'] = params
out = {}
out['result'] = qido_dict
out['input'] = element
out['success'] = True
return [out]
class UploadToDicomStore(PTransform):
"""A PTransform for storing instances to a DICOM store.
Takes PCollection of byte[] as input and return a PCollection of dict as
results. The inputs are normally DICOM file in bytes or str filename.
INPUT:
This PTransform supports two types of input:
1. Byte[]: representing dicom file.
2. Fileio object: stream file object.
OUTPUT:
The output dict encodes status as well as error messages:
{
'success': boolean value telling whether the store is successful.
'input': undeliverable data. Exactly the same as the input,
only set if the operation is failed.
'status': status code from the server, used as error messages.
}
"""
def __init__(
self,
destination_dict,
input_type,
buffer_size=8,
max_workers=5,
client=None,
credential=None):
"""Initializes UploadToDicomStore.
Args:
destination_dict: # type: python dict, encodes DICOM endpoint information:
{
'project_id': str,
'region': str,
'dataset_id': str,
'dicom_store_id': str,
}
Key-value pairs:
* project_id: Id of the project in which DICOM store locates. (Required)
* region: Region where the DICOM store resides. (Required)
* dataset_id: Id of the dataset where DICOM store belongs to. (Required)
* dicom_store_id: Id of the dicom store. (Required)
input_type: # type: string, could only be 'bytes' or 'fileio'
buffer_size: # type: Int. Size of the request buffer.
max_workers: # type: Int. Maximum number of threads a worker can
create. If it is set to one, all the request will be processed
sequentially in a worker.
client: # type: object. If it is specified, all the Api calls will
made by this client instead of the default one (DicomApiHttpClient).
credential: # type: Google credential object, if it is specified, the
Http client will use it instead of the default one.
"""
self.destination_dict = destination_dict
# input_type pre-check
if input_type not in ['bytes', 'fileio']:
raise ValueError("input_type could only be 'bytes' or 'fileio'")
self.input_type = input_type
self.buffer_size = buffer_size
self.max_workers = max_workers
self.client = client
self.credential = credential
def expand(self, pcoll):
return pcoll | beam.ParDo(
_StoreInstance(
self.destination_dict,
self.input_type,
self.buffer_size,
self.max_workers,
self.client,
self.credential))
class _StoreInstance(beam.DoFn):
"""A DoFn read or fetch dicom files then push it to a dicom store."""
def __init__(
self,
destination_dict,
input_type,
buffer_size,
max_workers,
client,
credential=None):
# pre-check destination dict
required_keys = ['project_id', 'region', 'dataset_id', 'dicom_store_id']
for key in required_keys:
if key not in destination_dict:
raise ValueError('Must have %s in the dict.' % (key))
self.destination_dict = destination_dict
self.input_type = input_type
self.buffer_size = buffer_size
self.max_workers = max_workers
self.client = client
self.credential = credential
def start_bundle(self):
self.buffer = []
def finish_bundle(self):
for item in self._flush():
yield item
def process(
self,
element,
window=beam.DoFn.WindowParam,
timestamp=beam.DoFn.TimestampParam):
self.buffer.append((element, window, timestamp))
if len(self.buffer) >= self.buffer_size:
for item in self._flush():
yield item
def make_request(self, dicom_file):
# Send file to DICOM store and records the results.
project_id = self.destination_dict['project_id']
region = self.destination_dict['region']
dataset_id = self.destination_dict['dataset_id']
dicom_store_id = self.destination_dict['dicom_store_id']
# Feed the dicom file into store client
if self.client:
_, status_code = self.client.dicomweb_store_instance(
project_id, region, dataset_id, dicom_store_id, dicom_file,
self.credential
)
else:
_, status_code = DicomApiHttpClient().dicomweb_store_instance(
project_id, region, dataset_id, dicom_store_id, dicom_file,
self.credential
)
out = {}
out['status'] = status_code
out['success'] = (status_code == 200)
return out
def read_dicom_file(self, buffer_element):
# Read the file based on different input. If the read fails ,return
# an error dict which records input and error messages.
try:
if self.input_type == 'fileio':
f = buffer_element.open()
data = f.read()
f.close()
return True, data
else:
return True, buffer_element
except Exception as error_message:
error_out = {}
error_out['status'] = error_message
error_out['success'] = False
return False, error_out
def process_buffer_element(self, buffer_element):
# Thread job runner - each thread stores a DICOM file
success, read_result = self.read_dicom_file(buffer_element[0])
windows = [buffer_element[1]]
timestamp = buffer_element[2]
value = None
if success:
value = self.make_request(read_result)
else:
value = read_result
# save the undeliverable data
if not value['success']:
value['input'] = buffer_element[0]
return beam.utils.windowed_value.WindowedValue(
value=value, timestamp=timestamp, windows=windows)
def _flush(self):
# Create thread pool executor and process the buffered elements in paralllel
executor = ThreadPoolExecutor(max_workers=self.max_workers)
futures = [
executor.submit(self.process_buffer_element, ele) for ele in self.buffer
]
self.buffer = []
for f in as_completed(futures):
yield f.result()
|
|
"""Device automation helpers for toggle entity."""
from __future__ import annotations
from typing import Any
import voluptuous as vol
from homeassistant.components.automation import (
AutomationActionType,
AutomationTriggerInfo,
)
from homeassistant.components.homeassistant.triggers import state as state_trigger
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_CONDITION,
CONF_ENTITY_ID,
CONF_FOR,
CONF_PLATFORM,
CONF_TYPE,
)
from homeassistant.core import CALLBACK_TYPE, Context, HomeAssistant, callback
from homeassistant.helpers import condition, config_validation as cv
from homeassistant.helpers.entity_registry import async_entries_for_device
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from . import DEVICE_TRIGGER_BASE_SCHEMA, entity
from .const import (
CONF_IS_OFF,
CONF_IS_ON,
CONF_TOGGLE,
CONF_TURN_OFF,
CONF_TURN_ON,
CONF_TURNED_OFF,
CONF_TURNED_ON,
)
# mypy: allow-untyped-calls, allow-untyped-defs
ENTITY_ACTIONS = [
{
# Turn entity off
CONF_TYPE: CONF_TURN_OFF
},
{
# Turn entity on
CONF_TYPE: CONF_TURN_ON
},
{
# Toggle entity
CONF_TYPE: CONF_TOGGLE
},
]
ENTITY_CONDITIONS = [
{
# True when entity is turned off
CONF_CONDITION: "device",
CONF_TYPE: CONF_IS_OFF,
},
{
# True when entity is turned on
CONF_CONDITION: "device",
CONF_TYPE: CONF_IS_ON,
},
]
ENTITY_TRIGGERS = [
{
# Trigger when entity is turned off
CONF_PLATFORM: "device",
CONF_TYPE: CONF_TURNED_OFF,
},
{
# Trigger when entity is turned on
CONF_PLATFORM: "device",
CONF_TYPE: CONF_TURNED_ON,
},
]
DEVICE_ACTION_TYPES = [CONF_TOGGLE, CONF_TURN_OFF, CONF_TURN_ON]
ACTION_SCHEMA = cv.DEVICE_ACTION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(DEVICE_ACTION_TYPES),
}
)
CONDITION_SCHEMA = cv.DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In([CONF_IS_OFF, CONF_IS_ON]),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
)
_TOGGLE_TRIGGER_SCHEMA = DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In([CONF_TURNED_OFF, CONF_TURNED_ON]),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
)
TRIGGER_SCHEMA = vol.Any(entity.TRIGGER_SCHEMA, _TOGGLE_TRIGGER_SCHEMA)
async def async_call_action_from_config(
hass: HomeAssistant,
config: ConfigType,
variables: TemplateVarsType,
context: Context | None,
domain: str,
) -> None:
"""Change state based on configuration."""
action_type = config[CONF_TYPE]
if action_type == CONF_TURN_ON:
action = "turn_on"
elif action_type == CONF_TURN_OFF:
action = "turn_off"
else:
action = "toggle"
service_data = {ATTR_ENTITY_ID: config[CONF_ENTITY_ID]}
await hass.services.async_call(
domain, action, service_data, blocking=True, context=context
)
@callback
def async_condition_from_config(
hass: HomeAssistant, config: ConfigType
) -> condition.ConditionCheckerType:
"""Evaluate state based on configuration."""
if config[CONF_TYPE] == CONF_IS_ON:
stat = "on"
else:
stat = "off"
state_config = {
condition.CONF_CONDITION: "state",
condition.CONF_ENTITY_ID: config[CONF_ENTITY_ID],
condition.CONF_STATE: stat,
}
if CONF_FOR in config:
state_config[CONF_FOR] = config[CONF_FOR]
state_config = cv.STATE_CONDITION_SCHEMA(state_config)
state_config = condition.state_validate_config(hass, state_config)
return condition.state_from_config(state_config)
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: AutomationTriggerInfo,
) -> CALLBACK_TYPE:
"""Listen for state changes based on configuration."""
if config[CONF_TYPE] not in [CONF_TURNED_ON, CONF_TURNED_OFF]:
return await entity.async_attach_trigger(hass, config, action, automation_info)
if config[CONF_TYPE] == CONF_TURNED_ON:
to_state = "on"
else:
to_state = "off"
state_config = {
CONF_PLATFORM: "state",
state_trigger.CONF_ENTITY_ID: config[CONF_ENTITY_ID],
state_trigger.CONF_TO: to_state,
}
if CONF_FOR in config:
state_config[CONF_FOR] = config[CONF_FOR]
state_config = await state_trigger.async_validate_trigger_config(hass, state_config)
return await state_trigger.async_attach_trigger(
hass, state_config, action, automation_info, platform_type="device"
)
async def _async_get_automations(
hass: HomeAssistant,
device_id: str,
automation_templates: list[dict[str, str]],
domain: str,
) -> list[dict[str, str]]:
"""List device automations."""
automations: list[dict[str, str]] = []
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entries = [
entry
for entry in async_entries_for_device(entity_registry, device_id)
if entry.domain == domain
]
for entry in entries:
automations.extend(
{
**template,
"device_id": device_id,
"entity_id": entry.entity_id,
"domain": domain,
}
for template in automation_templates
)
return automations
async def async_get_actions(
hass: HomeAssistant, device_id: str, domain: str
) -> list[dict[str, str]]:
"""List device actions."""
return await _async_get_automations(hass, device_id, ENTITY_ACTIONS, domain)
async def async_get_conditions(
hass: HomeAssistant, device_id: str, domain: str
) -> list[dict[str, str]]:
"""List device conditions."""
return await _async_get_automations(hass, device_id, ENTITY_CONDITIONS, domain)
async def async_get_triggers(
hass: HomeAssistant, device_id: str, domain: str
) -> list[dict[str, Any]]:
"""List device triggers."""
triggers = await entity.async_get_triggers(hass, device_id, domain)
triggers.extend(
await _async_get_automations(hass, device_id, ENTITY_TRIGGERS, domain)
)
return triggers
async def async_get_condition_capabilities(
hass: HomeAssistant, config: ConfigType
) -> dict[str, vol.Schema]:
"""List condition capabilities."""
return {
"extra_fields": vol.Schema(
{vol.Optional(CONF_FOR): cv.positive_time_period_dict}
)
}
async def async_get_trigger_capabilities(
hass: HomeAssistant, config: ConfigType
) -> dict[str, vol.Schema]:
"""List trigger capabilities."""
if config[CONF_TYPE] not in [CONF_TURNED_ON, CONF_TURNED_OFF]:
return await entity.async_get_trigger_capabilities(hass, config)
return {
"extra_fields": vol.Schema(
{vol.Optional(CONF_FOR): cv.positive_time_period_dict}
)
}
|
|
# #!/usr/bin/python
# last edit abigailc@Actaeon on october 24 2016
### usage $ python Concatenate_2.py /Users/abigailc/Documents/Test/ -f "Asco1.fasta Asco2.fasta Asco3.fasta Asco4.fasta" -o AscoCC.fasta
#this program will open n aligned files, and concatenate them.
#sequences must either have exactly identicle tip names across files
#or be in shortened format " >Taxonomy|Info|Whatever|Species_name|##########" and use the -s flag
###########start##############
def master(fasta_list, output_name, species_mode = False):
#generate your fasta objects and lists of identifiers
list_spec_lists, list_id_lists, list_of_fasta_objects = make_fasta_info(fasta_list)
#either correlate them my species_name or by seqid
if species_mode is False:
corr_ids = correlate_ids(list_id_lists)
elif species_mode is True:
corr_ids = correlate_ids(list_spec_lists)
final = Concatenate(corr_ids, list_of_fasta_objects, output_name)
print("Finished!")
print("Your new concat file is located at: "+final)
CCfas = Fasta(final)
CCfas.gen_original_lists(final)
print("There are: "+str(CCfas.number_seqs())+" sequences")
print("And "+str(CCfas.number_of_sites())+" sites per sequence")
#this generates an object of class Fasta for each input .fasta file, and generates the species_lists and id lists.
def make_fasta_info(fasta_list):
list_of_species_lists = []
list_of_idlists = []
list_of_fas = []
for f in fasta_list:
g = Fasta(f)
g.gen_original_lists(f)
g.gen_species_lists()
species_list = g.species_list
ids_list = g.ids
list_of_idlists.append(ids_list)
list_of_species_lists.append(species_list)
list_of_fas.append(g)
return list_of_species_lists, list_of_idlists, list_of_fas
# this will correlated IDs based on Genus_species or full id name across datasets (all of which need to be opened in class Fasta
def correlate_ids(list_of_id_lists):
numlists = len(list_of_id_lists)
output_list = []
used = []
for item in list_of_id_lists:
# list1
for ids in item:
# id1 of list1
name = ids
if name in used:
pass
else:
used.append(name)
id_index_list = []
id_index_list.append(ids)
# check the index of that id in each list and append to "id_index_list"
# if the id is not in that list, should append "NA"
for eachlist in list_of_id_lists:
try:
index = eachlist.index(ids)
except ValueError:
index = "NA"
id_index_list.append(index)
# add the result of scanning that id to overall output list.
output_list.append(id_index_list)
# output list looks like:
# outputlist = [ ["Cat",1,2,3,4] , ["Dog", 2,1,13,14] ]
print("Correlated sequences")
print(output_list)
return output_list
#does the actual concat and prints the output file.
def Concatenate(indexed_ids_list, fasta_class_list, output_name):
# indexed_ids_list in form [ ["Cat", 1,2,3],["dog",2,1,2] ]
# this part will create a new, concatenated .fasta file
# requires indexed_ids_list, fasta_class_list, function that returns
# number of sites. self.number_of_sites
lenlist_final = []
with open(output_name, "w") as new:
for item in indexed_ids_list:
#lenlist will collect lengths of each seq to verify at the end that each seq is the same length as each other (nothing was messed up)
lenlist = []
#write the first id
new.write(">" + item[0].strip()+"\n")
fas_num = 0
allseq = ""
#ensure that for each fas in fasta class list, all sequences are of the same length.
for fas in fasta_class_list:
fas_num += 1
# fas_num keeps track of what number fasta we are on, which
# correlates to the index of the index in indexed_ids_list
search_index = item[fas_num]
# search_index will be "2" if this is the first fasta (fas_num = 1) and item = ["cat", "2", "3", "23"]
#represents "look at the second sequence of fasta (fas)"
# if search_index is NA, generate a str of "-" that is n long
if search_index == "NA":
ndash = fas.number_of_sites()
retreived_seq = ""
for i in range(int(ndash)):
retreived_seq = retreived_seq + ("-")
else:
retreived_seq = fas.seqs[search_index]
# retreived_seq wil be something like "the 22nd sequence in
# object Fas's sequence list... " or "BLAHSEQUENCEDATA"
#be sure to remove any \n that may have been saved up in there.
retreived_seq = re.sub("\n", "", retreived_seq)
#save how long the retreived seq is for future verification that all are same length
lenlist.append(len(retreived_seq))
#count is how many characters have been written to new concat file
count = 0
#retreived seq is like "Cat_gene_1"
#allseq is like "Cat_gene_1"+"Cat_gene_2"+"Cat_gene_3"
allseq = allseq + retreived_seq
#add length total for verification purposes
lenlist_final.append(len(allseq))
newseq = ""
#print in 80 character lines so it looks nice / fasta standard
for letter in allseq:
if count > 79:
count = 0
newseq = newseq + ("\n")
newseq = newseq + letter
count += 1
#write the sequence to your new file.
new.write(newseq.strip()+"\n")
#verification bit
for length in lenlist_final:
if length == lenlist_final[0]:
pass
else:
print("ERROR your concat sequences are not all of the same length something has gone horribly wrong! aborting.")
raise SystemExit
return output_name
#necessary class
class Fasta:
def __init__(self, name="whatever"):
# all ids should be stripped and have ">" removed for reasons.
# for now, sequences do not have any stripping applied
self.name = name
self.ids = []
self.original_ids = []
self.original_seqs = []
self.seqs = []
self.species_list = []
def gen_original_lists(self, fastaname):
try:
with open(fastaname) as fastafile:
for line in fastafile:
if "\n" == line:
pass
if ">" in line:
# write the previous AA seq
try:
AAseq = AAseq.strip()
self.seqs.append(AAseq)
self.original_seqs.append(AAseq)
except:
pass
# initialize a new AAseq
AAseq = ""
# format the seqID
newline = line.strip()
newline = newline.strip(">")
# write the seqID
self.ids.append(newline)
self.original_ids.append(newline)
else:
AAseq = AAseq + line
AAseq=AAseq.strip()
# catch the last AAseq pass
self.seqs.append(AAseq)
self.original_seqs.append(AAseq)
print("Initial sequence and ID lists created for "+self.name+". Contains " +
str(len(self.ids)) + " sequences")
except UnboundLocalError:
print("probably this file :" + fastaname +
" has nothing in it. skipping.")
pass
except IOError:
print("no file named: " + fastaname +
" exists... creating a blank file")
with open(fastaname, "w") as new:
pass
print("hopefully you intended that!")
def number_seqs(self):
a = len(self.ids)
return a
def number_of_sites(self, num = 0):
testseq = self.original_seqs[num]
testseq = re.sub("\n", "", testseq)
#print(testseq)
#print (len(self.original_seqs[num]))
#print (len(testseq))
return len(testseq)
def gen_species_lists(self):
for item in self.ids:
# item will be "Nostoc_punctiforme_PCC_73102|gi#|186468349" or "Blah|Rank|Nostoc_punctiforme_PCC_73102|gi#|186468349"
# for now, ignores anything that isn't Genus_species.
# for example, ignores strain, ., things with an extra
# word, etc.
taxon = re.sub("([^_]*)([A-Z][a-z]*_[a-z]*)(.*)", "\\2", item)
if "#" in taxon:
print ("TAXON error in gen_species_lists():" + taxon)
self.species_list.append(taxon)
return self.species_list
# parser
if __name__ == "__main__":
print("Running in terminal")
import sys
import argparse
import os
import re
parser = argparse.ArgumentParser(description="All")
parser.add_argument("directory", nargs='?', default=os.getcwd(), type=str, help="type name of directory to run in (where .nex resides)")
parser.add_argument("-f", "--fasta", action = "store", default = False, help="give each pre-aligned fasta file to concatenate within quotes, seperated by spaces. eg \"first.fasta second.fasta third.fasta\" ")
parser.add_argument("-o", "--output", action = "store", default = "Concat.fasta", help="provide a name for your output")
parser.add_argument("-s", "--species_mode", action = "store_true", default = False, help="use species_name, not entire seqID")
args = parser.parse_args()
#change dir if given
print("hopefully your seqIDs (or species_names with -s) are identical across each file and have no repeats.")
try:
os.chdir(args.directory)
except:
print ("didn't change dir")
#run the thing
if args.fasta is False:
print("Please give a list of .fasta files!")
fastas = args.fasta.split()
if len(fastas) is 1:
print("Need more than one file to concatenate???")
if args.species_mode is True:
master(fastas, args.output, True)
else:
master(fastas, args.output)
print("Well, it ought to be done now!")
|
|
"""Weak reference support for Python.
This module is an implementation of PEP 205:
http://www.python.org/dev/peps/pep-0205/
"""
# Naming convention: Variables named "wr" are weak reference objects;
# they are called this instead of "ref" to avoid name collisions with
# the module-global ref() function imported from _weakref.
from _weakref import (
getweakrefcount,
getweakrefs,
ref,
proxy,
CallableProxyType,
ProxyType,
ReferenceType,
_remove_dead_weakref)
from _weakrefset import WeakSet, _IterationGuard
import _collections_abc # Import after _weakref to avoid circular import.
import sys
import itertools
ProxyTypes = (ProxyType, CallableProxyType)
__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
"WeakKeyDictionary", "ReferenceType", "ProxyType",
"CallableProxyType", "ProxyTypes", "WeakValueDictionary",
"WeakSet", "WeakMethod", "finalize"]
class WeakMethod(ref):
"""
A custom `weakref.ref` subclass which simulates a weak reference to
a bound method, working around the lifetime problem of bound methods.
"""
__slots__ = "_func_ref", "_meth_type", "_alive", "__weakref__"
def __new__(cls, meth, callback=None):
try:
obj = meth.__self__
func = meth.__func__
except AttributeError:
raise TypeError("argument should be a bound method, not {}"
.format(type(meth))) from None
def _cb(arg):
# The self-weakref trick is needed to avoid creating a reference
# cycle.
self = self_wr()
if self._alive:
self._alive = False
if callback is not None:
callback(self)
self = ref.__new__(cls, obj, _cb)
self._func_ref = ref(func, _cb)
self._meth_type = type(meth)
self._alive = True
self_wr = ref(self)
return self
def __call__(self):
obj = super().__call__()
func = self._func_ref()
if obj is None or func is None:
return None
return self._meth_type(func, obj)
def __eq__(self, other):
if isinstance(other, WeakMethod):
if not self._alive or not other._alive:
return self is other
return ref.__eq__(self, other) and self._func_ref == other._func_ref
return False
def __ne__(self, other):
if isinstance(other, WeakMethod):
if not self._alive or not other._alive:
return self is not other
return ref.__ne__(self, other) or self._func_ref != other._func_ref
return True
__hash__ = ref.__hash__
class WeakValueDictionary(_collections_abc.MutableMapping):
"""Mapping class that references values weakly.
Entries in the dictionary will be discarded when no strong
reference to the value exists anymore
"""
# We inherit the constructor without worrying about the input
# dictionary; since it uses our .update() method, we get the right
# checks (if the other dictionary is a WeakValueDictionary,
# objects are unwrapped on the way out, and we always wrap on the
# way in).
def __init__(*args, **kw):
if not args:
raise TypeError("descriptor '__init__' of 'WeakValueDictionary' "
"object needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
def remove(wr, selfref=ref(self), _atomic_removal=_remove_dead_weakref):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(wr.key)
else:
# Atomic removal is necessary since this function
# can be called asynchronously by the GC
_atomic_removal(d, wr.key)
self._remove = remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
self.data = d = {}
self.update(*args, **kw)
def _commit_removals(self):
l = self._pending_removals
d = self.data
# We shouldn't encounter any KeyError, because this method should
# always be called *before* mutating the dict.
while l:
key = l.pop()
_remove_dead_weakref(d, key)
def __getitem__(self, key):
if self._pending_removals:
self._commit_removals()
o = self.data[key]()
if o is None:
raise KeyError(key)
else:
return o
def __delitem__(self, key):
if self._pending_removals:
self._commit_removals()
del self.data[key]
def __len__(self):
if self._pending_removals:
self._commit_removals()
return len(self.data)
def __contains__(self, key):
if self._pending_removals:
self._commit_removals()
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def __repr__(self):
return "<%s at %#x>" % (self.__class__.__name__, id(self))
def __setitem__(self, key, value):
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(value, self._remove, key)
def copy(self):
if self._pending_removals:
self._commit_removals()
new = WeakValueDictionary()
with _IterationGuard(self):
for key, wr in self.data.items():
o = wr()
if o is not None:
new[key] = o
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
if self._pending_removals:
self._commit_removals()
new = self.__class__()
with _IterationGuard(self):
for key, wr in self.data.items():
o = wr()
if o is not None:
new[deepcopy(key, memo)] = o
return new
def get(self, key, default=None):
if self._pending_removals:
self._commit_removals()
try:
wr = self.data[key]
except KeyError:
return default
else:
o = wr()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
if self._pending_removals:
self._commit_removals()
with _IterationGuard(self):
for k, wr in self.data.items():
v = wr()
if v is not None:
yield k, v
def keys(self):
if self._pending_removals:
self._commit_removals()
with _IterationGuard(self):
for k, wr in self.data.items():
if wr() is not None:
yield k
__iter__ = keys
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
if self._pending_removals:
self._commit_removals()
with _IterationGuard(self):
yield from self.data.values()
def values(self):
if self._pending_removals:
self._commit_removals()
with _IterationGuard(self):
for wr in self.data.values():
obj = wr()
if obj is not None:
yield obj
def popitem(self):
if self._pending_removals:
self._commit_removals()
while True:
key, wr = self.data.popitem()
o = wr()
if o is not None:
return key, o
def pop(self, key, *args):
if self._pending_removals:
self._commit_removals()
try:
o = self.data.pop(key)()
except KeyError:
o = None
if o is None:
if args:
return args[0]
else:
raise KeyError(key)
else:
return o
def setdefault(self, key, default=None):
try:
o = self.data[key]()
except KeyError:
o = None
if o is None:
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
return o
def update(*args, **kwargs):
if not args:
raise TypeError("descriptor 'update' of 'WeakValueDictionary' "
"object needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
dict = args[0] if args else None
if self._pending_removals:
self._commit_removals()
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, o in dict.items():
d[key] = KeyedRef(o, self._remove, key)
if len(kwargs):
self.update(kwargs)
def valuerefs(self):
"""Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
if self._pending_removals:
self._commit_removals()
return list(self.data.values())
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
This is used in the WeakValueDictionary to avoid having to create
a function object for each key stored in the mapping. A shared
callback object can use the 'key' attribute of a KeyedRef instead
of getting a reference to the key from an enclosing scope.
"""
__slots__ = "key",
def __new__(type, ob, callback, key):
self = ref.__new__(type, ob, callback)
self.key = key
return self
def __init__(self, ob, callback, key):
super().__init__(ob, callback)
class WeakKeyDictionary(_collections_abc.MutableMapping):
""" Mapping class that references keys weakly.
Entries in the dictionary will be discarded when there is no
longer a strong reference to the key. This can be used to
associate additional data with an object owned by other parts of
an application without adding attributes to those objects. This
can be especially useful with objects that override attribute
accesses.
"""
def __init__(self, dict=None):
self.data = {}
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(k)
else:
del self.data[k]
self._remove = remove
# A list of dead weakrefs (keys to be removed)
self._pending_removals = []
self._iterating = set()
self._dirty_len = False
if dict is not None:
self.update(dict)
def _commit_removals(self):
# NOTE: We don't need to call this method before mutating the dict,
# because a dead weakref never compares equal to a live weakref,
# even if they happened to refer to equal objects.
# However, it means keys may already have been removed.
l = self._pending_removals
d = self.data
while l:
try:
del d[l.pop()]
except KeyError:
pass
def _scrub_removals(self):
d = self.data
self._pending_removals = [k for k in self._pending_removals if k in d]
self._dirty_len = False
def __delitem__(self, key):
self._dirty_len = True
del self.data[ref(key)]
def __getitem__(self, key):
return self.data[ref(key)]
def __len__(self):
if self._dirty_len and self._pending_removals:
# self._pending_removals may still contain keys which were
# explicitly removed, we have to scrub them (see issue #21173).
self._scrub_removals()
return len(self.data) - len(self._pending_removals)
def __repr__(self):
return "<%s at %#x>" % (self.__class__.__name__, id(self))
def __setitem__(self, key, value):
self.data[ref(key, self._remove)] = value
def copy(self):
new = WeakKeyDictionary()
with _IterationGuard(self):
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = value
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
with _IterationGuard(self):
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = deepcopy(value, memo)
return new
def get(self, key, default=None):
return self.data.get(ref(key),default)
def __contains__(self, key):
try:
wr = ref(key)
except TypeError:
return False
return wr in self.data
def items(self):
with _IterationGuard(self):
for wr, value in self.data.items():
key = wr()
if key is not None:
yield key, value
def keys(self):
with _IterationGuard(self):
for wr in self.data:
obj = wr()
if obj is not None:
yield obj
__iter__ = keys
def values(self):
with _IterationGuard(self):
for wr, value in self.data.items():
if wr() is not None:
yield value
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return list(self.data)
def popitem(self):
self._dirty_len = True
while True:
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
def pop(self, key, *args):
self._dirty_len = True
return self.data.pop(ref(key), *args)
def setdefault(self, key, default=None):
return self.data.setdefault(ref(key, self._remove),default)
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
d[ref(key, self._remove)] = value
if len(kwargs):
self.update(kwargs)
class finalize:
"""Class for finalization of weakrefable objects
finalize(obj, func, *args, **kwargs) returns a callable finalizer
object which will be called when obj is garbage collected. The
first time the finalizer is called it evaluates func(*arg, **kwargs)
and returns the result. After this the finalizer is dead, and
calling it just returns None.
When the program exits any remaining finalizers for which the
atexit attribute is true will be run in reverse order of creation.
By default atexit is true.
"""
# Finalizer objects don't have any state of their own. They are
# just used as keys to lookup _Info objects in the registry. This
# ensures that they cannot be part of a ref-cycle.
__slots__ = ()
_registry = {}
_shutdown = False
_index_iter = itertools.count()
_dirty = False
_registered_with_atexit = False
class _Info:
__slots__ = ("weakref", "func", "args", "kwargs", "atexit", "index")
def __init__(self, obj, func, *args, **kwargs):
if not self._registered_with_atexit:
# We may register the exit function more than once because
# of a thread race, but that is harmless
import atexit
atexit.register(self._exitfunc)
finalize._registered_with_atexit = True
info = self._Info()
info.weakref = ref(obj, self)
info.func = func
info.args = args
info.kwargs = kwargs or None
info.atexit = True
info.index = next(self._index_iter)
self._registry[self] = info
finalize._dirty = True
def __call__(self, _=None):
"""If alive then mark as dead and return func(*args, **kwargs);
otherwise return None"""
info = self._registry.pop(self, None)
if info and not self._shutdown:
return info.func(*info.args, **(info.kwargs or {}))
def detach(self):
"""If alive then mark as dead and return (obj, func, args, kwargs);
otherwise return None"""
info = self._registry.get(self)
obj = info and info.weakref()
if obj is not None and self._registry.pop(self, None):
return (obj, info.func, info.args, info.kwargs or {})
def peek(self):
"""If alive then return (obj, func, args, kwargs);
otherwise return None"""
info = self._registry.get(self)
obj = info and info.weakref()
if obj is not None:
return (obj, info.func, info.args, info.kwargs or {})
@property
def alive(self):
"""Whether finalizer is alive"""
return self in self._registry
@property
def atexit(self):
"""Whether finalizer should be called at exit"""
info = self._registry.get(self)
return bool(info) and info.atexit
@atexit.setter
def atexit(self, value):
info = self._registry.get(self)
if info:
info.atexit = bool(value)
def __repr__(self):
info = self._registry.get(self)
obj = info and info.weakref()
if obj is None:
return '<%s object at %#x; dead>' % (type(self).__name__, id(self))
else:
return '<%s object at %#x; for %r at %#x>' % \
(type(self).__name__, id(self), type(obj).__name__, id(obj))
@classmethod
def _select_for_exit(cls):
# Return live finalizers marked for exit, oldest first
L = [(f,i) for (f,i) in cls._registry.items() if i.atexit]
L.sort(key=lambda item:item[1].index)
return [f for (f,i) in L]
@classmethod
def _exitfunc(cls):
# At shutdown invoke finalizers for which atexit is true.
# This is called once all other non-daemonic threads have been
# joined.
reenable_gc = False
try:
if cls._registry:
import gc
if gc.isenabled():
reenable_gc = True
gc.disable()
pending = None
while True:
if pending is None or finalize._dirty:
pending = cls._select_for_exit()
finalize._dirty = False
if not pending:
break
f = pending.pop()
try:
# gc is disabled, so (assuming no daemonic
# threads) the following is the only line in
# this function which might trigger creation
# of a new finalizer
f()
except Exception:
sys.excepthook(*sys.exc_info())
assert f not in cls._registry
finally:
# prevent any more finalizers from executing during shutdown
finalize._shutdown = True
if reenable_gc:
gc.enable()
|
|
from __future__ import unicode_literals
from datetime import timedelta
from django.core.exceptions import ValidationError
from django.test import TransactionTestCase
from django.utils import timezone
from chamber.exceptions import PersistenceException
from chamber.models import ChangedFields, Comparator
from germanium.tools import assert_equal, assert_false, assert_raises, assert_true # pylint: disable=E0401
from test_chamber.models import ComparableModel, DiffModel, RelatedSmartModel, TestSmartModel # pylint: disable=E0401
from .dispatchers import * # NOQA
from .fields import * # NOQA
from .humanized_helpers import * # NOQA
class NameComparator(Comparator):
def compare(self, a, b):
return a.name == b.name
class TestProxySmartModel(TestSmartModel):
def clean_name(self):
if len(self.name) >= 10:
raise ValidationError('name must be lower than 10')
class Meta:
proxy = True
class TestPreProxySmartModel(TestSmartModel):
def __init__(self, **kwargs):
self.name = None
super(TestPreProxySmartModel, self).__init__(**kwargs)
def _pre_save(self, *args, **kwargs):
self.name = 'test pre save'
def _pre_delete(self, *args, **kwargs):
self.name = 'test pre delete'
class Meta:
proxy = True
class TestPostProxySmartModel(TestSmartModel):
def __init__(self, **kwargs):
self.name = None
super(TestPostProxySmartModel, self).__init__(**kwargs)
def _post_save(self, *args, **kwargs):
self.name = 'test post save'
def _post_delete(self, *args, **kwargs):
self.name = 'test post delete'
class Meta:
proxy = True
class ModelsTestCase(TransactionTestCase):
def test_smart_model_changed_fields(self):
obj = TestProxySmartModel.objects.create(name='a')
changed_fields = ChangedFields(obj)
assert_equal(len(changed_fields), 0)
obj.name = 'b'
assert_equal(len(changed_fields), 1)
assert_equal(changed_fields['name'].initial, 'a')
assert_equal(changed_fields['name'].current, 'b')
obj.save()
# Initial values is not changed
assert_equal(len(changed_fields), 2)
assert_equal(set(changed_fields.keys()), {'name', 'changed_at'})
assert_equal(changed_fields['name'].initial, 'a')
assert_equal(changed_fields['name'].current, 'b')
assert_true(changed_fields.has_any_key('name', 'crated_at'))
assert_false(changed_fields.has_any_key('invalid', 'crated_at'))
assert_raises(AttributeError, changed_fields.__delitem__, 'name')
assert_raises(AttributeError, changed_fields.clear)
assert_raises(AttributeError, changed_fields.pop, 'name')
def test_model_diff(self):
obj = DiffModel.objects.create(name='test', datetime=timezone.now(), number=2)
assert_false(obj.has_changed)
obj.name = 'test2'
assert_true(obj.has_changed)
assert_equal(set(obj.changed_fields.keys()), {'name'})
assert_equal((obj.changed_fields['name'].initial, obj.changed_fields['name'].current), ('test', 'test2'))
obj.name = 'test'
assert_false(obj.has_changed)
assert_false(obj.changed_fields)
obj.name = 'test2'
obj.number = 3
obj.datetime = obj.datetime + timedelta(days=2)
assert_true(obj.has_changed)
assert_equal(set(obj.changed_fields.keys()), {'name', 'number', 'datetime'})
obj.save()
assert_false(obj.has_changed)
assert_false(obj.changed_fields)
def test_comparator(self):
obj1 = ComparableModel.objects.create(name='test')
obj2 = ComparableModel.objects.create(name='test')
obj3 = ComparableModel.objects.create(name='test2')
comparator = NameComparator()
assert_true(obj1.equals(obj2, comparator))
assert_true(obj2.equals(obj1, comparator))
assert_false(obj1.equals(obj3, comparator))
assert_false(obj3.equals(obj1, comparator))
def test_should_raise_exception_due_calling_default_comparator(self):
obj1 = ComparableModel.objects.create(name='test')
obj2 = ComparableModel.objects.create(name='test')
assert_raises(NotImplementedError, obj1.equals, obj2, Comparator())
def test_smart_model_clean_pre_save(self):
assert_raises(PersistenceException, TestProxySmartModel.objects.create, name=10 * 'a')
obj = TestProxySmartModel.objects.create(name=9 * 'a')
obj.name = 11 * 'a'
assert_raises(PersistenceException, obj.save)
assert_equal(len(TestProxySmartModel.objects.get(pk=obj.pk).name), 9)
obj.save(is_cleaned_pre_save=False)
assert_equal(len(TestProxySmartModel.objects.get(pk=obj.pk).name), 11)
def test_smart_model_clean_post_save(self):
class PostSaveTestProxySmartModel(TestProxySmartModel):
class Meta:
proxy = True
verbose_name = 'testmodel'
verbose_name_plural = 'testmodels'
class SmartMeta:
is_cleaned_pre_save = False
is_cleaned_post_save = True
assert_false(PostSaveTestProxySmartModel.objects.filter(name=10 * 'a').exists())
assert_raises(PersistenceException, PostSaveTestProxySmartModel.objects.create, name=10 * 'a')
assert_true(PostSaveTestProxySmartModel.objects.filter(name=10 * 'a').exists())
obj = PostSaveTestProxySmartModel.objects.create(name=9 * 'a')
obj.name = 11 * 'a'
assert_raises(PersistenceException, obj.save)
assert_equal(len(PostSaveTestProxySmartModel.objects.get(pk=obj.pk).name), 11)
obj.name = 12 * 'a'
obj.save(is_cleaned_post_save=False)
assert_equal(len(PostSaveTestProxySmartModel.objects.get(pk=obj.pk).name), 12)
def test_smart_model_clean_atomic_post_save(self):
class AtomicPostSaveTestProxySmartModel(TestProxySmartModel):
class Meta:
proxy = True
verbose_name = 'testmodel'
verbose_name_plural = 'testmodels'
class SmartMeta:
is_cleaned_pre_save = False
is_cleaned_post_save = True
is_save_atomic = True
assert_false(AtomicPostSaveTestProxySmartModel.objects.filter(name=10 * 'a').exists())
assert_raises(PersistenceException, AtomicPostSaveTestProxySmartModel.objects.create, name=10 * 'a')
assert_false(AtomicPostSaveTestProxySmartModel.objects.filter(name=10 * 'a').exists())
obj = AtomicPostSaveTestProxySmartModel.objects.create(name=9 * 'a')
obj.name = 11 * 'a'
assert_raises(PersistenceException, obj.save)
assert_equal(len(AtomicPostSaveTestProxySmartModel.objects.get(pk=obj.pk).name), 9)
obj.name = 12 * 'a'
obj.save(is_cleaned_post_save=False)
assert_equal(len(AtomicPostSaveTestProxySmartModel.objects.get(pk=obj.pk).name), 12)
def test_smart_model_clean_pre_delete(self):
class PreDeleteTestProxySmartModel(TestProxySmartModel):
class Meta:
proxy = True
verbose_name = 'testmodel'
verbose_name_plural = 'testmodels'
class SmartMeta:
is_cleaned_pre_save = False
is_cleaned_pre_delete = True
obj = PreDeleteTestProxySmartModel.objects.create(name=10 * 'a')
obj_pk = obj.pk
assert_raises(PersistenceException, obj.delete)
assert_true(PreDeleteTestProxySmartModel.objects.filter(pk=obj_pk).exists())
obj = PreDeleteTestProxySmartModel.objects.create(name=10 * 'a')
obj_pk = obj.pk
obj.delete(is_cleaned_pre_delete=False)
assert_false(PreDeleteTestProxySmartModel.objects.filter(pk=obj_pk).exists())
def test_smart_model_clean_post_delete(self):
class PostDeleteTestProxySmartModel(TestProxySmartModel):
class Meta:
proxy = True
verbose_name = 'testmodel'
verbose_name_plural = 'testmodels'
class SmartMeta:
is_cleaned_pre_save = False
is_cleaned_post_delete = True
obj = PostDeleteTestProxySmartModel.objects.create(name=10 * 'a')
obj_pk = obj.pk
assert_raises(PersistenceException, obj.delete)
assert_false(PostDeleteTestProxySmartModel.objects.filter(pk=obj_pk).exists())
obj = PostDeleteTestProxySmartModel.objects.create(name=10 * 'a')
obj_pk = obj.pk
obj.delete(is_cleaned_post_delete=False)
assert_false(PostDeleteTestProxySmartModel.objects.filter(pk=obj_pk).exists())
def test_smart_model_clean_atomic_post_delete(self):
class AtomicPostDeleteTestProxySmartModel(TestProxySmartModel):
class Meta:
proxy = True
verbose_name = 'testmodel'
verbose_name_plural = 'testmodels'
class SmartMeta:
is_cleaned_pre_save = False
is_cleaned_post_delete = True
is_delete_atomic = True
obj = AtomicPostDeleteTestProxySmartModel.objects.create(name=10 * 'a')
obj_pk = obj.pk
assert_raises(PersistenceException, obj.delete)
assert_true(AtomicPostDeleteTestProxySmartModel.objects.filter(pk=obj_pk).exists())
obj = AtomicPostDeleteTestProxySmartModel.objects.create(name=10 * 'a')
obj_pk = obj.pk
obj.delete(is_cleaned_post_delete=False)
assert_false(AtomicPostDeleteTestProxySmartModel.objects.filter(pk=obj_pk).exists())
def test_smart_model_pre_save(self):
obj = TestPreProxySmartModel.objects.create()
assert_equal(obj.name, 'test pre save')
obj.name = 10 * 'a'
obj.save()
assert_equal(obj.name, 'test pre save')
assert_true(TestPreProxySmartModel.objects.filter(name='test pre save').exists())
def test_smart_model_pre_delete(self):
obj = TestPreProxySmartModel.objects.create()
assert_equal(obj.name, 'test pre save')
obj.delete()
assert_equal(obj.name, 'test pre delete')
def test_smart_model_post_save(self):
assert_raises(PersistenceException, TestPostProxySmartModel.objects.create)
obj = TestPostProxySmartModel.objects.create(name=10 * 'a')
assert_equal(obj.name, 'test post save')
assert_false(TestPreProxySmartModel.objects.filter(name='test post save').exists())
assert_true(TestPreProxySmartModel.objects.filter(name=10 * 'a').exists())
obj.save()
assert_true(TestPreProxySmartModel.objects.filter(name='test post save').exists())
obj.name = 10 * 'a'
obj.save()
assert_equal(obj.name, 'test post save')
assert_false(TestPreProxySmartModel.objects.filter(name='test post save').exists())
def test_smart_model_post_delete(self):
obj = TestPostProxySmartModel.objects.create(name=10 * 'a')
assert_equal(obj.name, 'test post save')
obj.delete()
assert_equal(obj.name, 'test post delete')
def test_smart_queryset_fast_distinct(self):
t = TestSmartModel.objects.create(name='name')
RelatedSmartModel.objects.create(test_smart_model=t)
RelatedSmartModel.objects.create(test_smart_model=t)
qs = TestSmartModel.objects.filter(test_smart_models__test_smart_model=t)
assert_equal(qs.count(), 2)
assert_equal(tuple(qs.values_list('pk', flat=True)), (t.pk, t.pk))
assert_equal(qs.fast_distinct().count(), 1)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding=utf-8
import os
import sys
from .http import protocol_type as PT
from .http import method_type as MT
from .http import format_type as FT
from .auth import rpc_signature_composer as rpc_signer
from .auth import roa_signature_composer as roa_signer
from .auth import oss_signature_composer as oss_signer
from .auth import md5_tool
import abc
import base64
"""
Acs request model.
Created on 6/15/2015
@author: alex jiang
"""
STYLE_RPC = 'RPC'
STYLE_ROA = 'ROA'
STYLE_OSS = 'OSS'
class AcsRequest:
"""
Acs request base class. This class wraps up common parameters for a request.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, product, version=None,
action_name=None,
location_service_code=None,
accept_format=None,
protocol_type=PT.HTTP,
method=None):
"""
:param product:
:param version:
:param action_name:
:param params:
:param resource_owner_account:
:param protocol_type:
:param accept_format:
:return:
"""
self.__version = version
self.__product = product
self.__action_name = action_name
self.__protocol_type = protocol_type
self.__accept_format = accept_format
self.__params = {}
self.__method = method
self.__header = {}
self.__uri_pattern = None
self.__uri_params = None
self.__content = None
self.__location_service_code = location_service_code
def add_query_param(self, k, v):
if self.__params is None:
self.__params = {}
self.__params[k] = v
def get_uri_pattern(self):
return self.__uri_pattern
def get_uri_params(self):
return self.__uri_params
def get_product(self):
return self.__product
def get_version(self):
return self.__version
def get_action_name(self):
return self.__action_name
def get_accept_format(self):
return self.__accept_format
def get_protocol_type(self):
return self.__protocol_type
def get_query_params(self):
return self.__params
def get_method(self):
return self.__method
def set_uri_pattern(self, pattern):
self.__uri_pattern = pattern
def set_uri_params(self, params):
self.__uri_params = params
def set_method(self, method):
self.__method = method
def set_product(self, product):
self.__product = product
def set_version(self, version):
self.__version = version
def set_action_name(self, action_name):
self.__action_name = action_name
def set_accept_format(self, accept_format):
self.__accept_format = accept_format
def set_protocol_type(self, protocol_type):
self.__protocol_type = protocol_type
def set_query_params(self, params):
self.__params = params
def set_content(self, content):
"""
:param content: ByteArray
:return:
"""
self.__content = content
def get_content(self):
"""
:return: ByteArray
"""
return self.__content
def get_headers(self):
"""
:return: Dict
"""
return self.__header
def set_headers(self, headers):
"""
:param headers: Dict
:return:
"""
self.__header = headers
def add_header(self, k, v):
if self.__header is None:
self.__header = dict(k=v)
else:
self.__header[k] = v
def set_user_agent(self, agent):
self.add_header('User-Agent', agent)
def set_location_service_code(self, location_service_code):
self.__location_service_code = location_service_code
def get_location_service_code(self):
return self.__location_service_code
@abc.abstractmethod
def get_style(self):
pass
@abc.abstractmethod
def get_url(self, region_id, ak, secret):
pass
@abc.abstractmethod
def get_signed_header(self, region_id, ak, secret):
pass
class RpcRequest(AcsRequest):
"""
Class to compose an RPC style request with.
"""
def __init__(
self,
product,
version,
action_name,
location_service_code=None,
format=None,
protocol=None):
AcsRequest.__init__(
self,
product,
version,
action_name,
location_service_code,
format,
protocol,
MT.GET)
self.__style = STYLE_RPC
def get_style(self):
return self.__style
def __get_sign_params(self):
req_params = self.get_query_params()
if req_params is None:
req_params = {}
req_params['Version'] = self.get_version()
req_params['Action'] = self.get_action_name()
req_params['Format'] = self.get_accept_format()
return req_params
def get_url(self, region_id, ak, secret):
sign_params = self.__get_sign_params()
if 'RegionId' not in sign_params.keys():
sign_params['RegionId'] = region_id
url = rpc_signer.get_signed_url(
sign_params,
ak,
secret,
self.get_accept_format(),
self.get_method())
return url
def get_signed_header(self, region_id=None, ak=None, secret=None):
return {}
class RoaRequest(AcsRequest):
"""
Class to compose an ROA style request with.
"""
def __init__(
self,
product,
version,
action_name,
location_service_code=None,
method=None,
headers=None,
uri_pattern=None,
path_params=None,
protocol=None):
"""
:param product: String, mandatory
:param version: String, mandatory
:param action_name: String, mandatory
:param method: String
:param headers: Dict
:param uri_pattern: String
:param path_params: Dict
:param protocol: String
:return:
"""
AcsRequest.__init__(
self,
product,
version,
action_name,
location_service_code,
FT.RAW,
protocol,
method)
self.__style = STYLE_ROA
self.__method = method
self.__header = headers
self.__uri_pattern = uri_pattern
self.__path_params = path_params
def get_style(self):
"""
:return: String
"""
return self.__style
def get_path_params(self):
return self.__path_params
def set_path_params(self, path_params):
self.__path_params = path_params
def add_path_param(self, k, v):
if self.__path_params is None:
self.__path_params = {}
self.__path_params[k] = v
def __get_sign_params(self):
req_params = self.get_query_params()
if req_params is None:
req_params = {}
req_params['Version'] = self.get_version()
req_params['Action'] = self.get_action_name()
req_params['Format'] = self.get_accept_format()
return req_params
def get_signed_header(self, region_id, ak, secret):
"""
Generate signed header
:param region_id: String
:param ak: String
:param secret: String
:return: Dict
"""
sign_params = self.get_query_params()
if (self.get_content() is not None):
md5_str = md5_tool.get_md5_base64_str(self.get_content())
self.add_header('Content-MD5', md5_str)
if 'RegionId' not in sign_params.keys():
sign_params['RegionId'] = region_id
signed_headers = roa_signer.get_signature_headers(
sign_params,
ak,
secret,
self.get_accept_format(),
self.get_headers(),
self.get_uri_pattern(),
self.get_path_params(),
self.get_method())
return signed_headers
def get_url(self, region_id, ak=None, secret=None):
"""
Compose request url without domain
:param region_id: String
:return: String
"""
sign_params = self.get_query_params()
if region_id not in sign_params.keys():
sign_params['RegionId'] = region_id
url = roa_signer.get_url(
self.get_uri_pattern(),
sign_params,
self.get_path_params())
return url
class OssRequest(AcsRequest):
def __init__(
self,
product,
version,
action_name,
location_service_code,
bucket=None,
method=None,
headers=None,
uri_pattern=None,
path_params=None,
protocol=None):
"""
:param product: String, mandatory
:param version: String, mandatory
:param action_name: String, mandatory
:param bucket: String
:param method: String
:param headers: Dict
:param uri_pattern: String
:param path_params: Dict
:param protocol: String
:return:
"""
AcsRequest.__init__(
self,
product,
version,
action_name,
location_service_code,
FT.XML,
protocol,
method)
self.__style = STYLE_OSS
self.__bucket = bucket
self.__method = method
self.__header = headers
self.__uri_pattern = uri_pattern
self.__path_params = path_params
def get_style(self):
return self.__style
def get_path_params(self):
"""
:return: dict
"""
return self.__path_params
def set_path_params(self, path_params):
self.__path_params = path_params
def add_path_param(self, k, v):
if self.__path_params is None:
self.__path_params = {}
self.__path_params[k] = v
def __get_sign_params(self):
req_params = self.get_query_params()
if req_params is None:
req_params = {}
req_params['Version'] = self.get_version()
req_params['Action'] = self.get_action_name()
req_params['Format'] = self.get_accept_format()
return req_params
def get_signed_header(self, region_id, ak, secret, ):
"""
Compose signed headers.
:param region_id: String
:param ak: String
:param secret: String
:return:
"""
sign_params = self.get_query_params()
if 'RegionId' not in sign_params.keys():
sign_params['RegionId'] = region_id
signed_headers = oss_signer.get_signature_headers(
sign_params,
ak,
secret,
self.get_accept_format(),
self.get_headers(),
self.get_uri_pattern(),
self.get_path_params(),
self.get_method(),
self.__bucket)
return signed_headers
def get_url(self, region_id, ak=None, secret=None):
"""
Generate request url without domain
:param region_id: String
:return: String
"""
sign_params = self.get_query_params()
if 'RegionId' not in sign_params.keys():
sign_params['RegionId'] = region_id
url = oss_signer.get_url(
sign_params,
self.get_uri_pattern(),
self.get_path_params())
return url
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os, sys, math, glob
total = {}
ccxx = {}
def reporttotals(n):
print 'GREPABILITY1, ',n,
for x in ('void','nearly void','perfect','nearly perfect','nearly exclusive','probable','inclusion','embracing','block','probable block','max','none'):
if x in total.keys():
print ',',total[x],
else:
print ', 0',
print
print 'GREPABILITY2, ',n,
for x in ('universal','void','nearly void','perfect','nearly perfect','nearly exclusive','probable','inclusion','embracing','block','max','probable block'):
if x in ccxx.keys():
print ',',ccxx[x],
else:
print ', 0',
print
def inc(t,n):
if n == 0:
return
if t != 'universal':
if t not in total.keys():
total[t] = 0
total[t] += n
if t != 'none':
if t not in ccxx.keys():
ccxx[t] = 0
ccxx[t] += 1
return
def setminus(a,b):
c = []
for x in a:
if x not in b:
c.append(x)
return c
def setinter(a,b):
c = []
for x in a:
if x in b:
c.append(x)
return c
def seteq(a,b):
for x in a:
if x not in b:
return False
for x in b:
if x not in a:
return False
return True
def readDashSep(s):
# aaa\n----------\nbbb...
f = open(s,'r')
res = [['']]
i = 0
for line in f.readlines():
if line.strip()[-10:] == '----------':
i += 1
res.append(['',line.split('-')[0].split('.')[0][2:],line.split('-')[1],line.split('-')[2],line.split('-')[3].split('.')[0]])
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not necessary
else:
res[i][0] += line.strip()
f.close()
for i in range(1,len(res)):
if res[i][0] in ('','pass'):
res[i][0] = True
else:
res[i][0] = False
return res[1:]
def unique(a):
b = []
for x in a:
if x not in b:
b.append(x)
return b
if __name__ == "__main__":
if len(sys.argv) != 4:
print 'Please provide two grammar names and a CSV file name for report, e.g.:'
print '\t./report-matches.py 00001 00010 sorted.csv'
sys.exit(1)
one = sys.argv[1]
two = sys.argv[2]
report = {}
repkeys = []
pairs = []
allcols = []
allrows = []
for f in glob.glob(one+'*'+two+'.bgf.res'):
out = readDashSep(f)
vs = f.split('-')[4].split('.')[0]+'::'+f.split('-')[2]
matcheswith = unique(map(lambda x:x[1]+'::'+x[2],filter(lambda x:x[0],out)))
allrows = unique(map(lambda x:x[2],out))
m = vs.split('::')[1]
if m not in allcols:
allcols.append(m)
for m in matcheswith:
ours = filter(lambda x:[x[1],x[2]]==m.split('::'),out)
totals = len(ours)
ok = filter(lambda x:x[0],ours)
passed = len(ok)
#print '%-22s'%m,'vs','%-22s'%vs,':',passed,'/',total,map(lambda x:x[3],ok)
pairs.append([m,vs,passed,totals])
# now we have pairs.
#allrows = unique(map(lambda x:x[0].split('::')[1],pairs))
#allcols = unique(map(lambda x:x[1].split('::')[1],pairs))
matches = {}
amatches = {}
cx = 0
for p in pairs:
n1 = p[0].split('::')[1]
n2 = p[1].split('::')[1]
if n1 not in matches.keys():
matches[n1] = {}
if n2 not in amatches.keys():
amatches[n2] = {}
matches [n1][n2] = (0.0+p[2])/p[3]
amatches[n2][n1] = (0.0+p[2])/p[3]
# resorting the table
rows = []
cols = []
# universal
for n in amatches.keys():
# the second criterion is imperfect
if len(amatches[n])>0.75*len(matches.keys()) and sum(amatches[n].values())>0.75*len(amatches[n]):
print 'UNIVERSAL match with',n
cx += 1
inc('universal',1)
cols.append(n)
for k in amatches[n].keys():
matches[k].pop(n)
amatches.pop(n)
if n in matches.keys():
matches.pop(n)
# main loop
change = True
while change:
change = False
# void
for n in matches.keys():
if len(matches[n])==0:
print 'VOID match for',n
cx += 1
inc('void',1)
rows.append(n)
change = True
matches.pop(n)
if change: continue
# nearly void
for n in matches.keys():
if max(matches[n].values()) < 0.25:
print 'NEARLY VOID match of',n
cx += 1
#print n,'-->',matches[n]
inc('nearly void',1)
rows.append(n)
change = True
matches.pop(n)
if change: continue
# perfect
for n in matches.keys():
# only one red match
if len(matches[n])==1 and matches[n].values()==[1.0]:
m = matches[n].keys()[0]
if len(amatches[m])==1:
print 'PERFECT match of',n,'with',m
cx += 1
inc('perfect',1)
rows.append(n)
cols.append(m)
change = True
matches.pop(n)
amatches.pop(m)
for k in matches.keys():
if m in matches[k].keys():
matches[k].pop(m)
for k in amatches.keys():
if n in amatches[k].keys():
amatches[k].pop(n)
if change: continue
# nearly perfect
for n in matches.keys():
# only one green match
if len(filter(lambda x:x>0.75,matches[n].values()))==1:
for k in matches[n].keys():
if matches[n][k]>0.75:
m = k
back = []
for k in amatches[m].keys():
if amatches[m][k]>0.5:
back.append(k)
if len(back)!=1:
# when more match back, it's not NEARLY PERFECT, it's something like BLOCK
continue
#if len(filter(lambda x:x>0.75,amatches[m].values()))==1 and amatches[m][n]>0.75:
if amatches[m][n]>0.75:
print 'NEARLY PERFECT match of',n,'with',m
cx += 1
inc('nearly perfect',1)
rows.append(n)
cols.append(m)
change = True
matches.pop(n)
amatches.pop(m)
for k in matches.keys():
if m in matches[k].keys():
matches[k].pop(m)
for k in amatches.keys():
if n in amatches[k].keys():
amatches[k].pop(n)
if change: continue
for n in matches.keys():
# only one green or red match back
for m in matches[n].keys():
if matches[n][m] > 0.75:
if len(filter(lambda x:x>matches[n][m],matches[n].values()))>0:
# special case when there is a better match than m
continue
if m not in amatches.keys():
# whut?!
continue
back = amatches[m].values()
back.remove(matches[n][m])
if len(back) == 0 or max(back) < 0.5:
print 'NEARLY EXCLUSIVE match of',n,'with',m
cx += 1
inc('nearly exclusive',1)
rows.append(n)
cols.append(m)
change = True
matches.pop(n)
amatches.pop(m)
for k in matches.keys():
if m in matches[k].keys():
matches[k].pop(m)
for k in amatches.keys():
if n in amatches[k].keys():
amatches[k].pop(n)
break
if change: continue
# one to many --- quite hard and nothing to test with
#for n in matches.keys():
# if matches[n].values() == [1.0]*len(matches[n]):
# print 'ONE2MANY? match of',n,'with',matches[n].keys()
# print 'ONE2MANY:',n,'matches with',matches[n]
# for k in matches[n].keys():
# print 'ONE2MANY:',k,'amatches with',amatches[k]
# #for m in matches[n].keys():
# # amatches[m].pop(n)
# #matches.pop(n)
#if change: continue
#
# probable
for n in matches.keys():
# only one blue match
if len(filter(lambda x:x>0.25,matches[n].values()))==1:
for k in matches[n].keys():
if matches[n][k]>0.25:
m = k
break
back = []
for k in amatches[m].keys():
if amatches[m][k]>0.5:
back.append(k)
if len(back)!=1:
# when more match back, it's not PROBABLE, it's something like BLOCK
continue
#if len(filter(lambda x:x>0.75,amatches[m].values()))==1 and amatches[m][n]>0.75:
if amatches[m][n]>0.25:
print 'PROBABLE match of',n,'with',m
cx += 1
inc('probable',1)
rows.append(n)
cols.append(m)
change = True
matches.pop(n)
amatches.pop(m)
for k in matches.keys():
if m in matches[k].keys():
matches[k].pop(m)
for k in amatches.keys():
if n in amatches[k].keys():
amatches[k].pop(n)
if change: continue
# inclusion - TEST?
for n in matches.keys():
if matches[n].keys() and matches[n].values() == [1.0]*len(matches[n]):
yes = True
for m in matches[n].keys():
if len(amatches[m])>1:
yes = False
if not yes:
continue
print 'INCLUSION match of',n,'with',matches[n].keys()
cx += 1
inc('inclusion',1)
rows.append(n)
for m in matches[n].keys():
cols.append(m)
amatches.pop(m)
change = True
matches.pop(n)
if change: continue
# embrace
for m in amatches.keys():
if amatches[m].keys() and amatches[m].values() == [1.0]*len(amatches[m]):
yes = True
for n in amatches[m].keys():
if len(matches[n])>1:
yes = False
if not yes:
continue
print 'EMBRACING match of',amatches[m].keys(),'with',m
cx += 1
inc('embracing',len(amatches[m].keys()))
for n in amatches[m].keys():
rows.append(n)
matches.pop(n)
cols.append(m)
amatches.pop(m)
change = True
if change: continue
# block
for n in matches.keys():
if min(matches[n].values())>0.75:
lblock = [n]
rblock = matches[n].keys()
for m in matches.keys():
#print 'L:',lblock
#print 'R:',rblock
if m in lblock:
continue
valz = filter(lambda x:x[1]>0.75,matches[m].items())
keyz = map(lambda x:x[0],valz)
valz = map(lambda x:x[1],valz)
if len(valz)>0:
if seteq(rblock,keyz):
lblock.append(m)
elif len(setminus(rblock,keyz)) in (1,2):
lblock.append(m)
rblock=setinter(keyz,rblock)
if len(lblock)<2 or len(rblock)<2:
continue
print 'BLOCK match of\n\t',lblock,'\nwith\n\t',rblock
cx += 1
inc('block',len(lblock))
for n1 in lblock:
rows.append(n1)
for n2 in rblock:
cols.append(n2)
change = True
for x in lblock:
if x in matches.keys():
matches.pop(x)
for y in amatches.keys():
if x in amatches[y]:
amatches[y].pop(x)
for x in rblock:
if x in amatches.keys():
amatches.pop(x)
for y in matches.keys():
if x in matches[y]:
matches[y].pop(x)
break
if change: continue
# probable block
for n in matches.keys():
if matches[n].values() and min(matches[n].values())>0.25:
lblock = [n]
rblock = matches[n].keys()
for m in matches.keys():
#print 'L:',lblock
#print 'R:',rblock
if m in lblock:
continue
valz = filter(lambda x:x[1]>0.25,matches[m].items())
keyz = map(lambda x:x[0],valz)
valz = map(lambda x:x[1],valz)
if len(valz)>0:
if seteq(rblock,keyz):
lblock.append(m)
elif len(setminus(rblock,keyz)) in (1,2):
lblock.append(m)
rblock=setinter(keyz,rblock)
if len(lblock)<2 or len(rblock)<2:
continue
print 'PROBABLE BLOCK match of\n\t',lblock,'\nwith\n\t',rblock
cx += 1
inc('probable block',len(lblock))
for n1 in lblock:
rows.append(n1)
for n2 in rblock:
cols.append(n2)
change = True
for x in lblock:
if x in matches.keys():
matches.pop(x)
for y in amatches.keys():
if x in amatches[y]:
amatches[y].pop(x)
for x in rblock:
if x in amatches.keys():
amatches.pop(x)
for y in matches.keys():
if x in matches[y]:
matches[y].pop(x)
break
if change: continue
# max
for n in matches.keys():
valz = matches[n].values()
mx = max(valz)
valz.remove(mx)
if len(valz)==0:
# little trick, never mind; bottom line: max of a sequence of one is always happening
valz=[-1]
if mx>max(valz):
# only one maximum
for k in matches[n].keys():
if matches[n][k] == mx:
m = k
break
print 'MAX match of',n,'with',m
cx += 1
inc('max',1)
rows.append(n)
cols.append(m)
change = True
matches.pop(n)
amatches.pop(m)
for k in matches.keys():
if m in matches[k].keys():
matches[k].pop(m)
for k in amatches.keys():
if n in amatches[k].keys():
amatches[k].pop(n)
break
# nothing to do, let's print
inc('none',len(matches.keys()))
if len(matches)>0:
print 'THE REST are:',matches.keys()
for k in matches.keys():
print k,'matches with',matches[k]
rows.append(k)
for k in amatches.keys():
cols.append(k)
#print 'ROWS:',rows
for n in allrows:
if n not in rows:
print 'Fixing an empty row for',n
rows.append(n)
inc('none',1)
#print 'COLS:',cols
for n in allcols:
if n not in cols:
print 'Fixing an empty col for',n
cols.append(n)
print 'TOTAL:',cx,'(',sum(total.values()),')','matches,',len(matches),'unmatched.'
reporttotals(two)
# write a table
f = open(sys.argv[3],'w')
matrix = []
for i in range(0,len(rows)):
matrix.append([0]*len(cols))
for p in pairs:
matrix[rows.index(p[0].split('::')[1])][cols.index(p[1].split('::')[1])] = (0.0+p[2])/p[3]
for i in cols:
f.write(' , '+str(i))
f.write('\n')
for i in range(0,len(matrix)):
f.write(str(rows[i]))
for j in range(0,len(matrix[i])):
f.write(' , '+str(matrix[i][j]))
f.write('\n')
f.close()
sys.exit(0)
|
|
# -*- coding:utf-8 -*-
import ast
import sys
from high_entropy_string import PythonStringData
import bandit
from bandit.core import test_properties as test
def gen_config(name):
"""
Default configuration for available configuration options.
"""
if name == 'patterns_to_ignore' or name == 'entropy_patterns_to_discount':
return []
@test.takes_config
@test.checks('FunctionDef')
@test.test_id('BHES100')
def high_entropy_funcdef(context, config):
# looks for "def function(some_arg='candidate')"
# this pads the list of default values with "None" if nothing is given
defs = [None] * (len(context.node.args.args) -
len(context.node.args.defaults))
defs.extend(context.node.args.defaults)
strings = []
# go through all (param, value)s and look for candidates
for key, val in zip(context.node.args.args, defs):
if isinstance(key, ast.Name):
target = key.arg if sys.version_info.major > 2 else key.id # Py3
if isinstance(val, ast.Str):
string_data = PythonStringData(
string=val.s,
target=target,
node_type='argument',
patterns_to_ignore=config.get('patterns_to_ignore'),
entropy_patterns_to_discount=config.get(
'entropy_patterns_to_discount'
)
)
strings.append(string_data)
return _report(strings)
@test.takes_config
@test.checks('Call')
@test.test_id('BHES101')
def high_entropy_funcarg(context, config):
# looks for "function('candidate', some_arg='candidate')"
node = context.node
strings = []
try:
caller = context.call_function_name_qual
except AttributeError:
caller = None
for kw in node.keywords:
if isinstance(kw.value, ast.Str):
string_data = PythonStringData(
string=kw.value.s,
target=kw.arg,
caller=caller,
node_type='kwargument',
patterns_to_ignore=config.get('patterns_to_ignore'),
entropy_patterns_to_discount=config.get(
'entropy_patterns_to_discount'
)
)
strings.append(string_data)
if isinstance(node.parent, ast.Assign):
for targ in node.parent.targets:
try:
target = targ.id
except AttributeError:
target = None
for arg in node.args:
if isinstance(arg, ast.Str):
string_data = PythonStringData(
string=arg.s,
caller=caller,
target=target,
node_type='argument',
patterns_to_ignore=config.get('patterns_to_ignore'),
entropy_patterns_to_discount=config.get(
'entropy_patterns_to_discount'
)
)
strings.append(string_data)
else:
for arg in node.args:
if isinstance(arg, ast.Str):
string_data = PythonStringData(
string=arg.s,
caller=caller,
node_type='argument',
patterns_to_ignore=config.get('patterns_to_ignore'),
entropy_patterns_to_discount=config.get(
'entropy_patterns_to_discount'
)
)
strings.append(string_data)
return _report(strings)
def _get_assign(node):
if isinstance(node, ast.Assign):
return node
else:
return _get_assign(node.parent)
@test.takes_config
@test.checks('Dict')
@test.checks('List')
@test.checks('Tuple')
@test.checks('Set')
@test.test_id('BHES102')
def high_entropy_iter(context, config):
node = context.node
if isinstance(node, ast.Dict):
# looks for "some_string = {'target': 'candidate'}"
_dict = dict(zip(node.keys, node.values))
strings = []
for key, val in _dict.iteritems():
if isinstance(key, ast.Str):
target = key.s
if isinstance(key, ast.Name):
target = key.id
else:
target = None
if not isinstance(val, ast.Str):
continue
string_data = PythonStringData(
string=val.s,
target=target,
node_type='dict',
patterns_to_ignore=config.get('patterns_to_ignore'),
entropy_patterns_to_discount=config.get(
'entropy_patterns_to_discount'
)
)
strings.append(string_data)
return _report(strings)
elif (isinstance(node, ast.List) or
isinstance(node, ast.Tuple) or
isinstance(node, ast.Set)):
# looks for "target = ['candidate', 'candidate']"
# looks for "target = ('candidate', 'candidate')"
# looks for "target = set('candidate', 'candidate')"
strings = []
for etl in node.elts:
if isinstance(etl, ast.Str):
string = etl.s
else:
continue
try:
assign = _get_assign(node.parent)
for targ in assign.targets:
try:
target = targ.id
except AttributeError:
target = None
string_data = PythonStringData(
string=string,
target=target,
node_type='assignment',
patterns_to_ignore=config.get('patterns_to_ignore'),
entropy_patterns_to_discount=config.get(
'entropy_patterns_to_discount'
)
)
strings.append(string_data)
except AttributeError:
string_data = PythonStringData(
string=string,
node_type='assignment',
patterns_to_ignore=config.get('patterns_to_ignore'),
entropy_patterns_to_discount=config.get(
'entropy_patterns_to_discount'
)
)
strings.append(string_data)
return _report(strings)
@test.takes_config
@test.checks('Str')
@test.test_id('BHES103')
def high_entropy_assign(context, config):
node = context.node
if isinstance(node.parent, ast.Assign):
strings = []
# looks for "some_var='candidate'"
for targ in node.parent.targets:
try:
target = targ.id
except AttributeError:
target = None
string_data = PythonStringData(
string=node.s,
target=target,
node_type='assignment',
patterns_to_ignore=config.get('patterns_to_ignore'),
entropy_patterns_to_discount=config.get(
'entropy_patterns_to_discount'
)
)
strings.append(string_data)
return _report(strings)
elif isinstance(node.parent, ast.Index):
# looks for "dict[target]='candidate'"
# assign -> subscript -> index -> string
assign = node.parent.parent.parent
if isinstance(assign, ast.Assign):
if isinstance(assign.value, ast.Str):
string = assign.value.s
else:
return
string_data = PythonStringData(
string=string,
target=node.s,
node_type='assignment',
patterns_to_ignore=config.get('patterns_to_ignore'),
entropy_patterns_to_discount=config.get(
'entropy_patterns_to_discount'
)
)
return _report([string_data])
elif isinstance(node.parent, ast.Compare):
# looks for "target == 'candidate'"
comp = node.parent
if isinstance(comp.left, ast.Name):
if isinstance(comp.comparators[0], ast.Str):
string_data = PythonStringData(
string=comp.comparators[0].s,
target=comp.left.id,
node_type='comparison',
patterns_to_ignore=config.get('patterns_to_ignore'),
entropy_patterns_to_discount=config.get(
'entropy_patterns_to_discount'
)
)
return _report([string_data])
elif isinstance(node.parent, ast.Attribute):
# looks for "target == 'candidate{0}'.format('some_string')"
strings = []
if isinstance(node.parent.value, ast.Str):
string = node.parent.value.s
else:
return
try:
caller = node.parent.attr
except AttributeError:
caller = None
try:
assign = _get_assign(node.parent)
for targ in assign.targets:
try:
target = targ.id
except AttributeError:
target = None
string_data = PythonStringData(
string=string,
caller=caller,
target=target,
node_type='assignment',
patterns_to_ignore=config.get('patterns_to_ignore'),
entropy_patterns_to_discount=config.get(
'entropy_patterns_to_discount'
)
)
strings.append(string_data)
except AttributeError:
string_data = PythonStringData(
string=string,
caller=caller,
node_type='assignment',
patterns_to_ignore=config.get('patterns_to_ignore'),
entropy_patterns_to_discount=config.get(
'entropy_patterns_to_discount'
)
)
strings.append(string_data)
return _report(strings)
# TODO: Handle BinOp
# TODO: Handle Return
def _report(strings):
reports = []
for string_data in strings:
if string_data.confidence == 1:
confidence = bandit.LOW
elif string_data.confidence == 2:
confidence = bandit.MEDIUM
elif string_data.confidence >= 3:
confidence = bandit.HIGH
if string_data.severity == 1:
severity = bandit.LOW
elif string_data.severity == 2:
severity = bandit.MEDIUM
elif string_data.severity >= 3:
severity = bandit.HIGH
if type(string_data.string) is not unicode:
string_data.string = string_data.string.decode(
'utf-8',
errors='replace'
)
string_data.string = string_data.string.encode(
'ascii',
errors='replace'
)
if len(string_data.string) > 12:
secret_start = string_data.string[:4]
secret_end = string_data.string[-4:]
try:
secret_start = secret_start
secret_end = secret_end
except (UnicodeDecodeError, UnicodeEncodeError):
pass
secret = '\'{0!s}...{1!s}\''.format(secret_start, secret_end)
else:
secret = string_data.string
if string_data.confidence >= 1:
reports.append(secret)
if reports:
return bandit.Issue(
severity=severity,
confidence=confidence,
text=u'Possible hardcoded secret(s) {0}.'.format(', '.join(reports))
)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from OvmCommonModule import *
import traceback
import time
import re
logger = OvmLogger("OvmNetwork")
class Filter:
class Network:
IFNAME_LO = r'(lo)'
IFNAME_BRIDGE = r'(xenbr\d+|vlan\d+)'
IFNAME_PIF = r'(eth\d+$|bond\d+$)'
IFNAME_VLAN = r'(eth\d+.\d+$|bond\d+.\d+$)'
class Parser(object):
'''
classdocs
'''
def findall(self, pattern, samples):
"""
@param pattern: search pattern
@param result: Parser line execution result
@return : list of search
find result of Parser which has same pattern
findall Parser find all pattern in a string
"""
result = []
for line in samples:
items = re.findall(pattern, line)
for item in items:
result.append(item)
return result
def checkPattern(self, pattern, cmd_result):
"""
@param pattern: search pattern
@param cmd_result: Parser line execution result
@return : True (if pattern is occurred)
"""
for line in cmd_result:
items = re.findall(pattern, line)
if len(items) > 0:
return True
return False
def search(self, cmd_result, pattern):
return None
class OvmVlanDecoder(json.JSONDecoder):
def decode(self, jStr):
deDict = asciiLoads(jStr)
vlan = OvmVlan()
setAttrFromDict(vlan, 'vid', deDict, int)
setAttrFromDict(vlan, 'pif', deDict)
return vlan
class OvmVlanEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, OvmVlan): raise Exception("%s is not instance of OvmVlan"%type(obj))
dct = {}
safeDictSet(obj, dct, 'name')
safeDictSet(obj, dct, 'vid')
safeDictSet(obj, dct, 'pif')
return dct
def toOvmVlan(jStr):
return json.loads(jStr, cls=OvmVlanDecoder)
def fromOvmVlan(vlan):
return normalizeToGson(json.dumps(vlan, cls=OvmVlanEncoder))
class OvmBridgeDecoder(json.JSONDecoder):
def decode(self, jStr):
deDic = asciiLoads(jStr)
bridge = OvmBridge()
setAttrFromDict(bridge, 'name', deDic)
setAttrFromDict(bridge, 'attach', deDic)
return bridge
class OvmBridgeEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, OvmBridge): raise Exception("%s is not instance of OvmBridge"%type(obj))
dct = {}
safeDictSet(obj, dct, 'name')
safeDictSet(obj, dct, 'attach')
safeDictSet(obj, dct, 'interfaces')
return dct
def toOvmBridge(jStr):
return json.loads(jStr, cls=OvmBridgeDecoder)
def fromOvmBridge(bridge):
return normalizeToGson(json.dumps(bridge, cls=OvmBridgeEncoder))
class OvmInterface(OvmObject):
name = ''
class OvmVlan(OvmInterface):
vid = 0
pif = ''
class OvmBridge(OvmInterface):
attach = ''
interfaces = []
class OvmNetwork(OvmObject):
'''
Network
'''
@property
def pifs(self):
return self._getInterfaces("pif")
@property
def vlans(self):
return self._getInterfaces("vlan")
@property
def bridges(self):
return self._getInterfaces("bridge")
def __init__(self):
self.Parser = Parser()
def _createVlan(self, vlan):
"""
@param jsonString : parameter from client side
@return : succ xxxxx
ex. jsonString => {vid:100, pif:eth0}
ex. return =>
"""
#Pre-condition
#check Physical Interface Name
if vlan.pif not in self.pifs.keys():
msg = "Physical Interface(%s) does not exist" % vlan.pif
logger.debug(self._createVlan, msg)
raise Exception(msg)
#Pre-condition
#check Vlan Interface Name
ifName = "%s.%s" % (vlan.pif, vlan.vid)
if ifName in self.vlans.keys():
msg = "Vlan Interface(%s) already exist, return it" % ifName
logger.debug(self._createVlan, msg)
return self.vlans[ifName]
doCmd(['vconfig', 'add', vlan.pif, vlan.vid])
self.bringUP(ifName)
logger.debug(self._createVlan, "Create vlan %s successfully"%ifName)
return self.vlans[ifName]
def _deleteVlan(self, name):
if name not in self.vlans.keys():
raise Exception("No vlan device %s found"%name)
vlan = self.vlans[name]
self.bringDown(vlan.name)
doCmd(['vconfig', 'rem', vlan.name])
logger.debug(self._deleteVlan, "Delete vlan %s successfully"%vlan.name)
def _createBridge(self, bridge):
"""
@return : success
ex. {bridge:xapi100, attach:eth0.100}
create bridge interface, and attached it
cmd 1: ip link add bridge
cmd 2: ip link set dev
"""
if "xenbr" not in bridge.name and "vlan" not in bridge.name:
raise Exception("Invalid bridge name %s. Bridge name must be in partten xenbr/vlan, e.g. xenbr0"%bridge.name)
#pre-condition
#check Bridge Interface Name
if bridge.name in self.bridges.keys():
msg = "Bridge(%s) already exist, return it" % bridge.name
logger.debug(self._createBridge, msg)
return self.bridges[bridge.name]
#pre-condition
#check attach must exist
#possible to attach in PIF or VLAN
if bridge.attach not in self.vlans.keys() and bridge.attach not in self.pifs.keys():
msg = "%s is not either pif or vlan" % bridge.attach
logger.error(self._createBridge, msg)
raise Exception(msg)
doCmd(['ip', 'link', 'add', 'name', bridge.name, 'type', 'bridge'])
doCmd(['ip', 'link', 'set', 'dev', bridge.attach, 'master', bridge.name])
self.bringUP(bridge.name)
logger.debug(self._createBridge, "Create bridge %s on %s successfully"%(bridge.name, bridge.attach))
return self.bridges[bridge.name]
def _getBridges(self):
return self.bridges.keys()
def _getVlans(self):
return self.vlans.keys()
def _deleteBridge(self, name):
if name not in self.bridges.keys():
raise Exception("Can not find bridge %s"%name)
bridge = self.bridges[name]
if bridge.attach in bridge.interfaces: bridge.interfaces.remove(bridge.attach)
if len(bridge.interfaces) != 0:
logger.debug(self._deleteBridge, "There are still some interfaces(%s) on bridge %s"%(bridge.interfaces, bridge.name))
return False
self.bringDown(bridge.name)
doCmd(['ip', 'link', 'del', bridge.name])
logger.debug(self._deleteBridge, "Delete bridge %s successfully"%bridge.name)
return True
def _getInterfaces(self, type):
"""
@param type : ["pif", "bridge", "tap"]
@return : dictionary of Interface Objects
get All Interfaces based on type
"""
devices = os.listdir('/sys/class/net')
ifs = {}
if type == "pif":
devs = self.Parser.findall(Filter.Network.IFNAME_PIF, devices)
for dev in set(devs):
ifInst = OvmInterface()
ifInst.name = dev
ifs[dev] = ifInst
elif type == "vlan":
devs = self.Parser.findall(Filter.Network.IFNAME_VLAN, devices)
for dev in set(devs):
ifInst = OvmVlan()
ifInst.name = dev
(pif, vid) = dev.split('.')
ifInst.pif = pif
ifInst.vid = vid
ifs[dev] = ifInst
elif type == "bridge":
devs = self.Parser.findall(Filter.Network.IFNAME_BRIDGE, devices)
for dev in set(devs):
ifInst = OvmBridge()
ifInst.name = dev
devs = os.listdir(join('/sys/class/net', dev, 'brif'))
ifInst.interfaces = devs
attches = self.Parser.findall(Filter.Network.IFNAME_PIF, devs) + self.Parser.findall(Filter.Network.IFNAME_VLAN, devs)
if len(attches) > 1: raise Exception("Multiple PIF on bridge %s (%s)"%(dev, attches))
elif len(attches) == 0: ifInst.attach = "null"
elif len(attches) == 1: ifInst.attach = attches[0]
ifs[dev] = ifInst
return ifs
def bringUP(self, ifName):
doCmd(['ifconfig', ifName, 'up'])
def bringDown(self, ifName):
doCmd(['ifconfig', ifName, 'down'])
@staticmethod
def createBridge(jStr):
try:
network = OvmNetwork()
network._createBridge(toOvmBridge(jStr))
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.createBridge, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.createBridge), errmsg)
@staticmethod
def deleteBridge(name):
try:
network = OvmNetwork()
network._deleteBridge(name)
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.deleteBridge, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.deleteBridge), errmsg)
@staticmethod
def getAllBridges():
try:
network = OvmNetwork()
rs = toGson(network._getBridges())
logger.debug(OvmNetwork.getAllBridges, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.getAllBridges, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.getAllBridges), errmsg)
@staticmethod
def getBridgeByIp(ip):
try:
routes = doCmd(['ip', 'route']).split('\n')
brName = None
for r in routes:
if ip in r and "xenbr" in r or "vlan" in r:
brName = r.split(' ')[2]
break
if not brName: raise Exception("Cannot find bridge with IP %s"%ip)
logger.debug(OvmNetwork.getBridgeByIp, "bridge:%s, ip:%s"%(brName, ip))
return toGson({"bridge":brName})
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.getBridgeByIp, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.getBridgeByIp), errmsg)
@staticmethod
def getVlans():
try:
network = OvmNetwork()
rs = toGson(network._getVlans())
logger.debug(OvmNetwork.getVlans, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.getVlans, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.getVlans), errmsg)
@staticmethod
def createVlan(jStr):
try:
network = OvmNetwork()
vlan = network._createVlan(toOvmVlan(jStr))
rs = fromOvmVlan(vlan)
logger.debug(OvmNetwork.createVlan, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.createVlan, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.createVlan), errmsg)
@staticmethod
def createVlanBridge(bridgeDetails, vlanDetails):
try:
network = OvmNetwork()
v = toOvmVlan(vlanDetails)
b = toOvmBridge(bridgeDetails)
vlan = network._createVlan(v)
b.attach = vlan.name
network._createBridge(b)
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.createVlanBridge, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.createVlanBridge), errmsg)
@staticmethod
def deleteVlanBridge(name):
try:
network = OvmNetwork()
if name not in network.bridges.keys():
logger.debug(OvmNetwork.deleteVlanBridge, "No bridge %s found"%name)
return SUCC()
bridge = network.bridges[name]
vlanName = bridge.attach
if network._deleteBridge(name):
if vlanName != "null":
network._deleteVlan(vlanName)
else:
logger.warning(OvmNetwork.deleteVlanBridge, "Bridge %s has no vlan device"%name)
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.deleteVlanBridge, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.deleteVlanBridge), errmsg)
@staticmethod
def getBridgeDetails(name):
try:
network = OvmNetwork()
if name not in network.bridges.keys():
raise Exception("No bridge %s found"%name)
bridge = network.bridges[name]
rs = fromOvmBridge(bridge)
logger.debug(OvmNetwork.getBridgeDetails, rs)
return rs
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.getBridgeDetails, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.getBridgeDetails), errmsg)
@staticmethod
def deleteVlan(name):
try:
network = OvmNetwork()
network._deleteVlan(name)
return SUCC()
except Exception, e:
errmsg = fmt_err_msg(e)
logger.error(OvmNetwork.deleteVlan, errmsg)
raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.deleteVlan), errmsg)
if __name__ == "__main__":
try:
OvmNetwork.getBridgeDetails(sys.argv[1])
#=======================================================================
# txt = json.dumps({"vid":104, "pif":"eth0"})
# txt2 = json.dumps({"name":"xapi3", "attach":"eth0.104"})
# print nw.createVlan(txt)
# print nw.createBridge(txt2)
#
# nw.deleteBridge("xapi3")
# nw.deleteVlan("eth0.104")
#=======================================================================
except Exception, e:
print e
|
|
"""
>>> from blaze import *
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> deadbeats = accounts[accounts['amount'] < 0]['name']
>>> from sqlalchemy import Table, Column, MetaData, Integer, String
>>> t = Table('accounts', MetaData(),
... Column('name', String, primary_key = True),
... Column('amount', Integer))
>>> print(compute(deadbeats, t)) # doctest: +SKIP
SELECT accounts.name
FROM accounts
WHERE accounts.amount < :amount_1
"""
from __future__ import absolute_import, division, print_function
import itertools
from itertools import chain
from operator import and_, eq, attrgetter
from copy import copy
import sqlalchemy as sa
from sqlalchemy import sql, Table, MetaData
from sqlalchemy.sql import Selectable, Select, functions as safuncs
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.elements import ClauseElement, ColumnElement, ColumnClause
from sqlalchemy.sql.selectable import FromClause, ScalarSelect
from sqlalchemy.engine import Engine
import toolz
from toolz import unique, concat, pipe, first
from toolz.compatibility import zip
from toolz.curried import map
import numpy as np
import numbers
import warnings
from multipledispatch import MDNotImplementedError
from odo.backends.sql import metadata_of_engine, dshape_to_alchemy
from datashape.predicates import iscollection, isscalar, isrecord
from ..dispatch import dispatch
from .core import compute_up, compute, base
from ..expr import (
Projection, Selection, Field, Broadcast, Expr, IsIn, Slice, BinOp, UnaryOp,
Join, mean, var, std, Reduction, count, FloorDiv, UnaryStringFunction,
strlen, DateTime, Coerce, nunique, Distinct, By, Sort, Head, Label, Concat,
ReLabel, Merge, common_subexpression, Summary, Like, nelements, notnull
)
from ..expr.broadcast import broadcast_collect
from ..expr.math import isnan
from ..compatibility import reduce
from ..utils import listpack
__all__ = ['sa', 'select']
def inner_columns(s):
try:
return s.inner_columns
except AttributeError:
return s.c
raise TypeError()
@dispatch(Projection, Selectable)
def compute_up(t, s, scope=None, **kwargs):
d = dict((c.name, c) for c in inner_columns(s))
return select(s).with_only_columns([d[field] for field in t.fields])
@dispatch((Field, Projection), Select)
def compute_up(t, s, **kwargs):
cols = list(s.inner_columns)
cols = [lower_column(cols[t._child.fields.index(c)]) for c in t.fields]
return s.with_only_columns(cols)
@dispatch(Field, ClauseElement)
def compute_up(t, s, **kwargs):
return s.c.get(t._name)
@dispatch(Broadcast, Select)
def compute_up(t, s, **kwargs):
cols = list(inner_columns(s))
d = dict((t._scalars[0][c], cols[i])
for i, c in enumerate(t._scalars[0].fields))
name = t._scalar_expr._name
result = compute(t._scalar_expr, d, post_compute=False).label(name)
s = copy(s)
s.append_column(result)
return s.with_only_columns([result])
@dispatch(Broadcast, Selectable)
def compute_up(t, s, **kwargs):
cols = list(inner_columns(s))
d = dict((t._scalars[0][c], cols[i])
for i, c in enumerate(t._scalars[0].fields))
name = t._scalar_expr._name
return compute(t._scalar_expr, d, post_compute=False).label(name)
@dispatch(Concat, (Select, Selectable), (Select, Selectable))
def compute_up(t, lhs, rhs, **kwargs):
if t.axis != 0:
raise ValueError(
'Cannot concat along a non-zero axis in sql; perhaps you want'
" 'merge'?",
)
return select(lhs).union_all(select(rhs)).alias()
@dispatch(Broadcast, sa.Column)
def compute_up(t, s, **kwargs):
expr = t._scalar_expr
return compute(expr, s, post_compute=False).label(expr._name)
@dispatch(BinOp, ColumnElement)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return t.op(data, t.rhs)
else:
return t.op(t.lhs, data)
@dispatch(BinOp, Select)
def compute_up(t, data, **kwargs):
assert len(data.c) == 1, \
'Select cannot have more than a single column when doing arithmetic'
column = first(data.inner_columns)
if isinstance(t.lhs, Expr):
return t.op(column, t.rhs)
else:
return t.op(t.lhs, column)
@compute_up.register(BinOp, (ColumnElement, base), ColumnElement)
@compute_up.register(BinOp, ColumnElement, (ColumnElement, base))
def binop_sql(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(FloorDiv, ColumnElement)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return sa.func.floor(data / t.rhs)
else:
return sa.func.floor(t.rhs / data)
@compute_up.register(FloorDiv, (ColumnElement, base), ColumnElement)
@compute_up.register(FloorDiv, ColumnElement, (ColumnElement, base))
def binop_sql(t, lhs, rhs, **kwargs):
return sa.func.floor(lhs / rhs)
@dispatch(isnan, ColumnElement)
def compute_up(t, s, **kwargs):
return s == float('nan')
@dispatch(UnaryOp, ColumnElement)
def compute_up(t, s, **kwargs):
sym = t.symbol
return getattr(t, 'op', getattr(safuncs, sym, getattr(sa.func, sym)))(s)
@dispatch(Selection, sa.sql.ColumnElement)
def compute_up(expr, data, scope=None, **kwargs):
predicate = compute(expr.predicate, data, post_compute=False)
return sa.select([data]).where(predicate)
@dispatch(Selection, Selectable)
def compute_up(t, s, scope=None, **kwargs):
ns = dict((t._child[col.name], col)
for col in getattr(s, 'inner_columns', s.columns))
predicate = compute(t.predicate, toolz.merge(ns, scope),
optimize=False, post_compute=False)
try:
return s.where(predicate)
except AttributeError:
return select([s]).where(predicate)
def select(s):
""" Permissive SQL select
Idempotent sa.select
Wraps input in list if neccessary
"""
if not isinstance(s, sa.sql.Select):
if not isinstance(s, (tuple, list)):
s = [s]
s = sa.select(s)
return s
table_names = ('table_%d' % i for i in itertools.count(1))
def name(sel):
""" Name of a selectable """
if hasattr(sel, 'name'):
return sel.name
if hasattr(sel, 'froms'):
if len(sel.froms) == 1:
return name(sel.froms[0])
return next(table_names)
@dispatch(Select, Select)
def _join_selectables(a, b, condition=None, **kwargs):
return a.join(b, condition, **kwargs)
@dispatch(Select, ClauseElement)
def _join_selectables(a, b, condition=None, **kwargs):
if len(a.froms) > 1:
raise MDNotImplementedError()
return a.replace_selectable(a.froms[0],
a.froms[0].join(b, condition, **kwargs))
@dispatch(ClauseElement, Select)
def _join_selectables(a, b, condition=None, **kwargs):
if len(b.froms) > 1:
raise MDNotImplementedError()
return b.replace_selectable(b.froms[0],
a.join(b.froms[0], condition, **kwargs))
@dispatch(ClauseElement, ClauseElement)
def _join_selectables(a, b, condition=None, **kwargs):
return a.join(b, condition, **kwargs)
_getname = attrgetter('name')
def _clean_join_name(opposite_side_colnames, suffix, c):
if c.name not in opposite_side_colnames:
return c
else:
return c.label(c.name + suffix)
@dispatch(Join, ClauseElement, ClauseElement)
def compute_up(t, lhs, rhs, **kwargs):
if isinstance(lhs, ColumnElement):
lhs = select(lhs)
if isinstance(rhs, ColumnElement):
rhs = select(rhs)
if name(lhs) == name(rhs):
left_suffix, right_suffix = t.suffixes
lhs = lhs.alias('%s%s' % (name(lhs), left_suffix))
rhs = rhs.alias('%s%s' % (name(rhs), right_suffix))
lhs = alias_it(lhs)
rhs = alias_it(rhs)
if isinstance(lhs, Select):
lhs = lhs.alias(next(aliases))
left_conds = [lhs.c.get(c) for c in listpack(t.on_left)]
else:
ldict = dict((c.name, c) for c in inner_columns(lhs))
left_conds = [ldict.get(c) for c in listpack(t.on_left)]
if isinstance(rhs, Select):
rhs = rhs.alias(next(aliases))
right_conds = [rhs.c.get(c) for c in listpack(t.on_right)]
else:
rdict = dict((c.name, c) for c in inner_columns(rhs))
right_conds = [rdict.get(c) for c in listpack(t.on_right)]
condition = reduce(and_, map(eq, left_conds, right_conds))
# Perform join
if t.how == 'inner':
join = _join_selectables(lhs, rhs, condition=condition)
main = lhs
elif t.how == 'left':
main, other = lhs, rhs
join = _join_selectables(lhs, rhs, condition=condition, isouter=True)
elif t.how == 'right':
join = _join_selectables(rhs, lhs, condition=condition, isouter=True)
main = rhs
else:
# http://stackoverflow.com/questions/20361017/sqlalchemy-full-outer-join
raise ValueError("SQLAlchemy doesn't support full outer Join")
"""
We now need to arrange the columns in the join to match the columns in
the expression. We care about order and don't want repeats
"""
if isinstance(join, Select):
def cols(x):
if isinstance(x, Select):
return list(x.inner_columns)
else:
return list(x.columns)
else:
cols = lambda x: list(x.columns)
main_cols = cols(main)
left_cols = cols(lhs)
left_names = set(map(_getname, left_cols))
right_cols = cols(rhs)
right_names = set(map(_getname, right_cols))
left_suffix, right_suffix = t.suffixes
fields = [
f.replace(left_suffix, '').replace(right_suffix, '') for f in t.fields
]
columns = [c for c in main_cols if c.name in t._on_left]
columns += [_clean_join_name(right_names, left_suffix, c)
for c in left_cols
if c.name in fields and c.name not in t._on_left]
columns += [_clean_join_name(left_names, right_suffix, c)
for c in right_cols
if c.name in fields and c.name not in t._on_right]
if isinstance(join, Select):
return join.with_only_columns(columns)
else:
return sa.select(columns, from_obj=join)
names = {
mean: 'avg'
}
def reconstruct_select(columns, original, **kwargs):
return sa.select(columns,
from_obj=kwargs.pop('from_obj', None),
whereclause=kwargs.pop('whereclause',
getattr(original,
'_whereclause', None)),
bind=kwargs.pop('bind', original.bind),
distinct=kwargs.pop('distinct',
getattr(original,
'_distinct', False)),
group_by=kwargs.pop('group_by',
getattr(original,
'_group_by_clause', None)),
having=kwargs.pop('having',
getattr(original, '_having', None)),
limit=kwargs.pop('limit',
getattr(original, '_limit', None)),
offset=kwargs.pop('offset',
getattr(original, '_offset', None)),
order_by=kwargs.pop('order_by',
getattr(original,
'_order_by_clause', None)),
**kwargs)
@dispatch((nunique, Reduction), Select)
def compute_up(expr, data, **kwargs):
if expr.axis != (0,):
raise ValueError('axis not equal to 0 not defined for SQL reductions')
data = data.cte(name=next(aliases))
cols = list(inner_columns(data))
d = dict((expr._child[c], cols[i])
for i, c in enumerate(expr._child.fields))
return select([compute(expr, d, post_compute=False)])
@dispatch(Distinct, ColumnElement)
def compute_up(t, s, **kwargs):
return s.distinct(*t.on).label(t._name)
@dispatch(Distinct, Select)
def compute_up(t, s, **kwargs):
return s.distinct(*t.on)
@dispatch(Distinct, Selectable)
def compute_up(t, s, **kwargs):
return select(s).distinct(*t.on)
@dispatch(Reduction, ClauseElement)
def compute_up(t, s, **kwargs):
if t.axis != (0,):
raise ValueError('axis not equal to 0 not defined for SQL reductions')
try:
op = getattr(sa.sql.functions, t.symbol)
except AttributeError:
op = getattr(sa.sql.func, names.get(type(t), t.symbol))
return op(s).label(t._name)
prefixes = {
std: 'stddev',
var: 'var'
}
@dispatch((std, var), sql.elements.ColumnElement)
def compute_up(t, s, **kwargs):
if t.axis != (0,):
raise ValueError('axis not equal to 0 not defined for SQL reductions')
funcname = 'samp' if t.unbiased else 'pop'
full_funcname = '%s_%s' % (prefixes[type(t)], funcname)
return getattr(sa.func, full_funcname)(s).label(t._name)
@dispatch(count, Selectable)
def compute_up(t, s, **kwargs):
return s.count()
@dispatch(count, sa.Table)
def compute_up(t, s, **kwargs):
if t.axis != (0,):
raise ValueError('axis not equal to 0 not defined for SQL reductions')
try:
c = list(s.primary_key)[0]
except IndexError:
c = list(s.columns)[0]
return sa.func.count(c)
@dispatch(nelements, (Select, ClauseElement))
def compute_up(t, s, **kwargs):
return compute_up(t._child.count(), s)
@dispatch(count, Select)
def compute_up(t, s, **kwargs):
if t.axis != (0,):
raise ValueError('axis not equal to 0 not defined for SQL reductions')
al = next(aliases)
try:
s2 = s.alias(al)
col = list(s2.primary_key)[0]
except (KeyError, IndexError):
s2 = s.alias(al)
col = list(s2.columns)[0]
result = sa.func.count(col)
return select([list(inner_columns(result))[0].label(t._name)])
@dispatch(nunique, sa.Column)
def compute_up(t, s, **kwargs):
if t.axis != (0,):
raise ValueError('axis not equal to 0 not defined for SQL reductions')
return sa.func.count(s.distinct())
@dispatch(nunique, Selectable)
def compute_up(expr, data, **kwargs):
return select(data).distinct().alias(next(aliases)).count()
@dispatch(By, sa.Column)
def compute_up(expr, data, **kwargs):
grouper = lower_column(data)
app = expr.apply
if isinstance(app, Reduction):
reductions = [compute(app, data, post_compute=False)]
elif isinstance(app, Summary):
reductions = [compute(val, data, post_compute=None).label(name)
for val, name in zip(app.values, app.fields)]
return sa.select([grouper] + reductions).group_by(grouper)
@dispatch(By, ClauseElement)
def compute_up(expr, data, **kwargs):
if not valid_grouper(expr):
raise TypeError("Grouper must have a non-nested record or one "
"dimensional collection datashape, "
"got %s of type %r with dshape %s" %
(expr.grouper, type(expr.grouper).__name__,
expr.dshape))
grouper = get_inner_columns(compute(expr.grouper, data,
post_compute=False))
app = expr.apply
reductions = [compute(val, data, post_compute=False).label(name)
for val, name in zip(app.values, app.fields)]
return sa.select(grouper + reductions).group_by(*grouper)
def lower_column(col):
""" Return column from lower level tables if possible
>>> metadata = sa.MetaData()
>>> s = sa.Table('accounts', metadata,
... sa.Column('name', sa.String),
... sa.Column('amount', sa.Integer),
... sa.Column('id', sa.Integer, primary_key=True),
... )
>>> s2 = select([s])
>>> s2.c.amount is s.c.amount
False
>>> lower_column(s2.c.amount) is s.c.amount
True
>>> lower_column(s2.c.amount)
Column('amount', Integer(), table=<accounts>)
"""
old = None
while col is not None and col is not old:
old = col
if not hasattr(col, 'table') or not hasattr(col.table, 'froms'):
return col
for f in col.table.froms:
if f.corresponding_column(col) is not None:
col = f.corresponding_column(col)
return old
aliases = ('alias_%d' % i for i in itertools.count(1))
@toolz.memoize
def alias_it(s):
""" Alias a Selectable if it has a group by clause """
if (hasattr(s, '_group_by_clause') and
s._group_by_clause is not None and
len(s._group_by_clause)):
return s.alias(next(aliases))
else:
return s
def is_nested_record(measure):
"""Predicate for checking whether `measure` is a nested ``Record`` dshape
Examples
--------
>>> from datashape import dshape
>>> is_nested_record(dshape('{a: int32, b: int32}').measure)
False
>>> is_nested_record(dshape('{a: var * ?float64, b: ?string}').measure)
True
"""
if not isrecord(measure):
raise TypeError('Input must be a Record type got %s of type %r' %
(measure, type(measure).__name__))
return not all(isscalar(t) for t in measure.types)
def valid_grouper(expr):
ds = expr.dshape
measure = ds.measure
return (iscollection(ds) and
(isscalar(measure) or
(isrecord(measure) and not is_nested_record(measure))))
def valid_reducer(expr):
ds = expr.dshape
measure = ds.measure
return (not iscollection(ds) and
(isscalar(measure) or
(isrecord(measure) and not is_nested_record(measure))))
@dispatch(By, Select)
def compute_up(expr, data, **kwargs):
if not valid_grouper(expr):
raise TypeError("Grouper must have a non-nested record or one "
"dimensional collection datashape, "
"got %s of type %r with dshape %s" %
(expr.grouper, type(expr.grouper).__name__, expr.dshape))
s = alias_it(data)
if valid_reducer(expr.apply):
reduction = compute(expr.apply, s, post_compute=False)
else:
raise TypeError('apply must be a Summary expression')
grouper = get_inner_columns(compute(expr.grouper, s, post_compute=False))
reduction_columns = pipe(reduction.inner_columns,
map(get_inner_columns),
concat)
columns = list(unique(chain(grouper, reduction_columns)))
if (not isinstance(s, sa.sql.selectable.Alias) or
(hasattr(s, 'froms') and isinstance(s.froms[0],
sa.sql.selectable.Join))):
assert len(s.froms) == 1, 'only a single FROM clause supported for now'
from_obj, = s.froms
else:
from_obj = None
return reconstruct_select(columns,
getattr(s, 'element', s),
from_obj=from_obj,
group_by=grouper)
@dispatch(Sort, (Selectable, Select))
def compute_up(t, s, **kwargs):
s = select(s.alias())
direction = sa.asc if t.ascending else sa.desc
cols = [direction(lower_column(s.c[c])) for c in listpack(t.key)]
return s.order_by(*cols)
@dispatch(Sort, (sa.Table, ColumnElement))
def compute_up(t, s, **kwargs):
s = select(s)
direction = sa.asc if t.ascending else sa.desc
cols = [direction(lower_column(s.c[c])) for c in listpack(t.key)]
return s.order_by(*cols)
@dispatch(Head, FromClause)
def compute_up(t, s, **kwargs):
if s._limit is not None and s._limit <= t.n:
return s
return s.limit(t.n)
@dispatch(Head, sa.Table)
def compute_up(t, s, **kwargs):
return s.select().limit(t.n)
@dispatch(Head, ColumnElement)
def compute_up(t, s, **kwargs):
return sa.select([s]).limit(t.n)
@dispatch(Head, ScalarSelect)
def compute_up(t, s, **kwargs):
return compute(t, s.element, post_compute=False)
@dispatch(Label, ColumnElement)
def compute_up(t, s, **kwargs):
return s.label(t.label)
@dispatch(Label, FromClause)
def compute_up(t, s, **kwargs):
assert len(s.c) == 1, \
'expected %s to have a single column but has %d' % (s, len(s.c))
inner_column, = s.inner_columns
return reconstruct_select([inner_column.label(t.label)], s).as_scalar()
@dispatch(Expr, ScalarSelect)
def post_compute(t, s, **kwargs):
return s.element
@dispatch(ReLabel, Selectable)
def compute_up(t, s, **kwargs):
columns = [getattr(s.c, col).label(new_col)
if col != new_col else
getattr(s.c, col)
for col, new_col in zip(t._child.fields, t.fields)]
return select(columns)
@dispatch(FromClause)
def get_inner_columns(sel):
try:
return list(sel.inner_columns)
except AttributeError:
return list(map(lower_column, sel.c.values()))
@dispatch(ColumnElement)
def get_inner_columns(c):
return [c]
@dispatch(ScalarSelect)
def get_inner_columns(sel):
inner_columns = list(sel.inner_columns)
assert len(inner_columns) == 1, 'ScalarSelect should have only ONE column'
return list(map(lower_column, inner_columns))
@dispatch(sa.sql.functions.Function)
def get_inner_columns(f):
unique_columns = unique(concat(map(get_inner_columns, f.clauses)))
lowered = [x.label(getattr(x, 'name', None)) for x in unique_columns]
return [getattr(sa.func, f.name)(*lowered)]
@dispatch(sa.sql.elements.Label)
def get_inner_columns(label):
"""
Notes
-----
This should only ever return a list of length 1
This is because we need to turn ScalarSelects into an actual column
"""
name = label.name
inner_columns = get_inner_columns(label.element)
assert len(inner_columns) == 1
return [lower_column(c).label(name) for c in inner_columns]
@dispatch(Select)
def get_all_froms(sel):
return list(unique(sel.locate_all_froms()))
@dispatch(sa.Table)
def get_all_froms(t):
return [t]
@dispatch(ColumnClause)
def get_all_froms(c):
return [c.table]
def get_clause(data, kind):
# arg SQLAlchemy doesn't allow things like data._group_by_clause or None
assert kind == 'order_by' or kind == 'group_by', \
'kind must be "order_by" or "group_by"'
clause = getattr(data, '_%s_clause' % kind, None)
return clause.clauses if clause is not None else None
@dispatch(Merge, (Selectable, Select, sa.Column))
def compute_up(expr, data, **kwargs):
# get the common subexpression of all the children in the merge
subexpression = common_subexpression(*expr.children)
# compute each child, including the common subexpression
children = [compute(child, {subexpression: data}, post_compute=False)
for child in expr.children]
# Get the original columns from the selection and rip out columns from
# Selectables and ScalarSelects
columns = list(unique(concat(map(get_inner_columns, children))))
# we need these getattrs if data is a ColumnClause or Table
from_obj = get_all_froms(data)
assert len(from_obj) == 1, 'only a single FROM clause supported'
return reconstruct_select(columns, data, from_obj=from_obj)
@dispatch(Summary, Select)
def compute_up(t, s, scope=None, **kwargs):
d = dict((t._child[c], list(inner_columns(s))[i])
for i, c in enumerate(t._child.fields))
cols = [compute(val, toolz.merge(scope, d), post_compute=None).label(name)
for name, val in zip(t.fields, t.values)]
s = copy(s)
for c in cols:
s.append_column(c)
return s.with_only_columns(cols)
@dispatch(Summary, ClauseElement)
def compute_up(t, s, **kwargs):
scope = {t._child: s}
return sa.select(
compute(value, scope, post_compute=None).label(name)
for value, name in zip(t.values, t.fields)
)
@dispatch(Like, Selectable)
def compute_up(t, s, **kwargs):
return compute_up(t, select(s), **kwargs)
@dispatch(Like, Select)
def compute_up(t, s, **kwargs):
items = [(f.c.get(name), pattern.replace('*', '%'))
for name, pattern in t.patterns.items()
for f in s.froms if name in f.c]
return s.where(reduce(and_, [key.like(pattern) for key, pattern in items]))
string_func_names = {
# <blaze function name>: <SQL function name>
}
# TODO: remove if the alternative fix goes into PyHive
@compiles(sa.sql.functions.Function, 'hive')
def compile_char_length_on_hive(element, compiler, **kwargs):
assert len(element.clauses) == 1, \
'char_length must have a single clause, got %s' % list(element.clauses)
if element.name == 'char_length':
return compiler.visit_function(sa.func.length(*element.clauses),
**kwargs)
return compiler.visit_function(element, **kwargs)
@dispatch(strlen, ColumnElement)
def compute_up(expr, data, **kwargs):
return sa.sql.functions.char_length(data).label(expr._name)
@dispatch(UnaryStringFunction, ColumnElement)
def compute_up(expr, data, **kwargs):
func_name = type(expr).__name__
func_name = string_func_names.get(func_name, func_name)
return getattr(sa.sql.func, func_name)(data).label(expr._name)
@dispatch(notnull, ColumnElement)
def compute_up(expr, data, **kwargs):
return data != None
@toolz.memoize
def table_of_metadata(metadata, name):
if metadata.schema is not None:
name = '.'.join((metadata.schema, name))
if name not in metadata.tables:
metadata.reflect(views=metadata.bind.dialect.supports_views)
return metadata.tables[name]
def table_of_engine(engine, name):
metadata = metadata_of_engine(engine)
return table_of_metadata(metadata, name)
@dispatch(Field, sa.engine.Engine)
def compute_up(expr, data, **kwargs):
return table_of_engine(data, expr._name)
@dispatch(DateTime, (ClauseElement, sa.sql.elements.ColumnElement))
def compute_up(expr, data, **kwargs):
if expr.attr == 'date':
return sa.func.date(data).label(expr._name)
return sa.extract(expr.attr, data).label(expr._name)
@compiles(sa.sql.elements.Extract, 'hive')
def hive_extract_to_date_function(element, compiler, **kwargs):
func = getattr(sa.func, element.field)(element.expr)
return compiler.visit_function(func, **kwargs)
@compiles(sa.sql.elements.Extract, 'mssql')
def mssql_extract_to_datepart(element, compiler, **kwargs):
func = sa.func.datepart(sa.sql.expression.column(element.field),
element.expr)
return compiler.visit_function(func, **kwargs)
def engine_of(x):
if isinstance(x, Engine):
return x
if isinstance(x, MetaData):
return x.bind
if isinstance(x, Table):
return x.metadata.bind
raise NotImplementedError("Can't deterimine engine of %s" % x)
@dispatch(Expr, ClauseElement)
def optimize(expr, _):
return broadcast_collect(expr)
@dispatch(Field, sa.MetaData)
def compute_up(expr, data, **kwargs):
return table_of_metadata(data, expr._name)
@dispatch(Expr, ClauseElement)
def post_compute(_, s, **kwargs):
return select(s)
@dispatch(IsIn, ColumnElement)
def compute_up(expr, data, **kwargs):
return data.in_(expr._keys)
@dispatch(Slice, (Select, Selectable, ColumnElement))
def compute_up(expr, data, **kwargs):
index = expr.index[0] # [0] replace_slices returns tuple ((start, stop), )
if isinstance(index, slice):
start = index.start or 0
if start < 0:
raise ValueError('start value of slice cannot be negative'
' with a SQL backend')
stop = index.stop
if stop is not None and stop < 0:
raise ValueError('stop value of slice cannot be negative with a '
'SQL backend.')
if index.step is not None and index.step != 1:
raise ValueError('step parameter in slice objects not supported '
'with SQL backend')
elif isinstance(index, (np.integer, numbers.Integral)):
if index < 0:
raise ValueError('integer slice cannot be negative for the'
' SQL backend')
start = index
stop = start + 1
else:
raise TypeError('type %r not supported for slicing wih SQL backend'
% type(index).__name__)
warnings.warn('The order of the result set from a Slice expression '
'computed against the SQL backend is not deterministic.')
if stop is None: # Represents open-ended slice. e.g. [3:]
return select(data).offset(start)
else:
return select(data).offset(start).limit(stop - start)
@dispatch(Coerce, ColumnElement)
def compute_up(expr, data, **kwargs):
return sa.cast(data, dshape_to_alchemy(expr.to)).label(expr._name)
|
|
"""
This module contains a set of similarity measures that was designed for use
within the Lucas-Kanade framework. They therefore expose a number of methods
that make them useful for inverse compositional and forward additive
Lucas-Kanade.
These similarity measures are designed to be dimension independent where
possible. For this reason, some methods look more complicated than would be
normally the case. For example, calculating the Hessian involves summing
a multi-dimensional array, so we dynamically calculate the list of axes
to sum over. However, the basics of the logic, other than dimension
reduction, should be similar to the original algorithms.
References
----------
.. [1] Lucas, Bruce D., and Takeo Kanade.
"An iterative image registration technique with an application to stereo
vision."
IJCAI. Vol. 81. 1981.
"""
import abc
import numpy as np
from numpy.fft import fftshift, fft2
import scipy.linalg
from menpo.math import log_gabor
from menpo.image import MaskedImage
from menpo.feature import gradient
class Residual(object):
"""
An abstract base class for calculating the residual between two images
within the Lucas-Kanade algorithm. The classes were designed
specifically to work within the Lucas-Kanade framework and so no
guarantee is made that calling methods on these subclasses will generate
correct results.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@property
def error(self):
r"""
The RMS of the error image.
:type: float
Notes
-----
Will only generate a result if the
:func:`steepest_descent_update` function has been calculated prior.
.. math::
error = \sqrt{\sum_x E(x)^2}
where :math:`E(x) = T(x) - I(W(x;p))` within the forward additive
framework.
"""
return np.sqrt(np.mean(self._error_img ** 2))
@abc.abstractmethod
def steepest_descent_images(self, image, dW_dp, **kwargs):
r"""
Calculates the standard steepest descent images.
Within the forward additive framework this is defined as
.. math::
\nabla I \frac{\partial W}{\partial p}
The input image is vectorised (`N`-pixels) so that masked images can
be handled.
Parameters
----------
image : :class:`menpo.image.base.Image`
The image to calculate the steepest descent images from, could be
either the template or input image depending on which framework is
used.
dW_dp : ndarray
The Jacobian of the warp.
Returns
-------
VT_dW_dp : (N, n_params) ndarray
The steepest descent images
"""
pass
@abc.abstractmethod
def calculate_hessian(self, VT_dW_dp):
r"""
Calculates the Gauss-Newton approximation to the Hessian.
This is abstracted because some residuals expect the Hessian to be
pre-processed. The Gauss-Newton approximation to the Hessian is
defined as:
.. math::
\mathbf{J J^T}
Parameters
----------
VT_dW_dp : (N, n_params) ndarray
The steepest descent images.
Returns
-------
H : (n_params, n_params) ndarray
The approximation to the Hessian
"""
pass
@abc.abstractmethod
def steepest_descent_update(self, VT_dW_dp, IWxp, template):
r"""
Calculates the steepest descent parameter updates.
These are defined, for the forward additive algorithm, as:
.. math::
\sum_x [ \nabla I \frac{\partial W}{\partial p} ]^T [ T(x) - I(W(x;p)) ]
Parameters
----------
VT_dW_dp : (N, n_params) ndarray
The steepest descent images.
IWxp : :class:`menpo.image.base.Image`
Either the warped image or the template
(depending on the framework)
template : :class:`menpo.image.base.Image`
Either the warped image or the template
(depending on the framework)
Returns
-------
sd_delta_p : (n_params,) ndarray
The steepest descent parameter updates.
"""
pass
def _calculate_gradients(self, image, forward=None):
r"""
Calculates the gradients of the given method.
If `forward` is provided, then the gradients are warped
(as required in the forward additive algorithm)
Parameters
----------
image : :class:`menpo.image.base.Image`
The image to calculate the gradients for
forward : (:map:`Image`, :map:`AlignableTransform>`), optional
A tuple containing the extra weights required for the function
`warp` (which should be passed as a function handle).
Default: `None`
"""
if forward:
# Calculate the gradient over the image
# grad: (dims x ch) x H x W
grad = gradient(image)
# Warp gradient for forward additive using the given transform
# grad: (dims x ch) x h x w
template, transform = forward
grad = grad.warp_to_mask(template.mask, transform,
warp_landmarks=False)
else:
# Calculate the gradient over the image and set one pixels along
# the boundary of the image mask to zero (no reliable gradient
# can be computed there!)
# grad: (dims x ch) x h x w
grad = gradient(image)
grad.set_boundary_pixels()
return grad
class SSD(Residual):
type = 'SSD'
def steepest_descent_images(self, image, dW_dp, forward=None):
# compute gradient
# grad: dims x ch x pixels
grad = self._calculate_gradients(image, forward=forward)
grad = grad.as_vector().reshape((image.n_dims, image.n_channels, -1))
# compute steepest descent images
# gradient: dims x ch x pixels
# dw_dp: dims x x pixels x params
# sdi: ch x pixels x params
sdi = 0
a = grad[..., None] * dW_dp[:, None, ...]
for d in a:
sdi += d
# reshape steepest descent images
# sdi: (ch x pixels) x params
return sdi.reshape((-1, sdi.shape[-1]))
def calculate_hessian(self, sdi, sdi2=None):
# compute hessian
# sdi.T: params x (ch x pixels)
# sdi: (ch x pixels) x params
# hessian: params x x params
if sdi2 is None:
H = sdi.T.dot(sdi)
else:
H = sdi.T.dot(sdi2)
return H
def steepest_descent_update(self, sdi, IWxp, template):
self._error_img = IWxp.as_vector() - template.as_vector()
return sdi.T.dot(self._error_img)
class GaborFourier(Residual):
type = 'GaborFourier'
def __init__(self, image_shape, **kwargs):
super(GaborFourier, self).__init__()
if 'filter_bank' in kwargs:
self._filter_bank = kwargs.get('filter_bank')
if self._filter_bank.shape != image_shape:
raise ValueError('Filter bank shape must match the shape '
'of the image')
else:
gabor = log_gabor(np.ones(image_shape), **kwargs)
# Get filter bank matrix
self._filter_bank = gabor[2]
# Flatten the filter bank for vectorized calculations
self._filter_bank = self._filter_bank.ravel()
def steepest_descent_images(self, image, dW_dp, forward=None):
n_dims = image.n_dims
n_channels = image.n_channels
n_params = dW_dp.shape[-1]
# compute gradient
# grad: dims x ch x pixels
grad_img = self._calculate_gradients(image, forward=forward)
grad = grad_img.as_vector().reshape((n_dims, n_channels, -1))
# compute steepest descent images
# gradient: dims x ch x pixels
# dw_dp: dims x x pixels x params
# sdi: ch x pixels x params
sdi = 0
a = grad[..., None] * dW_dp[:, None, ...]
for d in a:
sdi += d
# make sdi images
# sdi_img: ch x h x w x params
sdi_mask = np.tile(grad_img.mask.pixels[0, ..., None],
(1, 1, n_params))
sdi_img = MaskedImage.blank(grad_img.shape + (n_params,),
n_channels=n_channels,
mask=sdi_mask)
sdi_img.from_vector_inplace(sdi.ravel())
# compute FFT over each channel, parameter and dimension
# fft_sdi: ch x h x w x params
fft_sdi = fftshift(fft2(sdi_img.pixels, axes=(-3, -2)), axes=(-3, -2))
# Note that, fft_sdi is rectangular, i.e. is not define in
# terms of the mask pixels, but in terms of the whole image.
# Selecting mask pixels once the fft has been computed makes no
# sense because they have lost their original spatial meaning.
# reshape steepest descent images
# sdi: (ch x h x w) x params
return fft_sdi.reshape((-1, fft_sdi.shape[-1]))
def calculate_hessian(self, sdi):
# reshape steepest descent images
# sdi: ch x (h x w) x params
sdi = sdi.reshape((-1, self._filter_bank.shape[0], sdi.shape[-1]))
# compute filtered steepest descent images
# filter_bank: (h x w)
# sdi: ch x (h x w) x params
# filtered_sdi: ch x (h x w) x params
filtered_sdi = (self._filter_bank[None, ..., None] ** 0.5) * sdi
# reshape filtered steepest descent images
# filtered_sdi: (ch x h x w) x params
filtered_sdi = filtered_sdi.reshape((-1, sdi.shape[-1]))
# compute filtered hessian
# filtered_sdi.T: params x (ch x h x w)
# filtered_sdi: (ch x h x w) x params
# hessian: params x x n_param
return np.conjugate(filtered_sdi).T.dot(filtered_sdi)
def steepest_descent_update(self, sdi, IWxp, template):
# compute error image
# error_img: ch x h x w
error_img = IWxp.pixels - template.pixels
# compute FFT error image
# fft_error_img: ch x (h x w)
fft_error_img = fftshift(fft2(error_img))
fft_error_img = fft_error_img.reshape((IWxp.n_channels, -1))
# compute filtered steepest descent images
# filter_bank: (h x w)
# fft_error_img: ch x (h x w)
# filtered_error_img: ch x (h x w)
filtered_error_img = self._filter_bank * fft_error_img
# reshape _error_img
# error_img: (ch x h x w)
self._error_img = filtered_error_img.ravel()
# compute steepest descent update
# sdi: params x (ch x h x w)
# error_img: (ch x h x w)
# sdu: params
return sdi.T.dot(np.conjugate(self._error_img))
class ECC(Residual):
type = 'ECC'
def _normalise_images(self, image):
# TODO: do we need to copy the image?
# TODO: is this supposed to be per channel normalization?
norm_image = image.copy()
norm_image.normalize_norm_inplace()
return norm_image
def steepest_descent_images(self, image, dW_dp, forward=None):
# normalize image
norm_image = self._normalise_images(image)
# compute gradient
# gradient: dims x ch x pixels
grad = self._calculate_gradients(norm_image, forward=forward)
grad = grad.as_vector().reshape((image.n_dims, image.n_channels, -1))
# compute steepest descent images
# gradient: dims x ch x pixels
# dw_dp: dims x x pixels x params
# sdi: ch x pixels x params
sdi = 0
a = grad[..., None] * dW_dp[:, None, ...]
for d in a:
sdi += d
# reshape steepest descent images
# sdi: (ch x pixels) x params
return sdi.reshape((-1, sdi.shape[-1]))
def calculate_hessian(self, sdi):
# compute hessian
# sdi.T: params x (ch x pixels)
# sdi: (ch x pixels) x params
# hessian: params x x params
H = sdi.T.dot(sdi)
self._H_inv = scipy.linalg.inv(H)
return H
def steepest_descent_update(self, sdi, IWxp, template):
normalised_IWxp = self._normalise_images(IWxp).as_vector()
normalised_template = self._normalise_images(template).as_vector()
Gt = sdi.T.dot(normalised_template)
Gw = sdi.T.dot(normalised_IWxp)
# Calculate the numerator
IWxp_norm = scipy.linalg.norm(normalised_IWxp)
num1 = IWxp_norm ** 2
num2 = np.dot(Gw.T, np.dot(self._H_inv, Gw))
num = num1 - num2
# Calculate the denominator
den1 = np.dot(normalised_template, normalised_IWxp)
den2 = np.dot(Gt.T, np.dot(self._H_inv, Gw))
den = den1 - den2
# Calculate lambda to choose the step size
# Avoid division by zero
if den > 0:
l = num / den
else:
den3 = np.dot(Gt.T, np.dot(self._H_inv, Gt))
l1 = np.sqrt(num2 / den3)
l2 = - den / den3
l = np.maximum(l1, l2)
self._error_img = l * normalised_IWxp - normalised_template
return sdi.T.dot(self._error_img)
class GradientImages(Residual):
type = 'GradientImages'
def _regularise_gradients(self, grad):
pixels = grad.pixels
ab = np.sqrt(np.sum(pixels**2, axis=0))
m_ab = np.median(ab)
ab = ab + m_ab
grad.pixels = pixels / ab
return grad
def steepest_descent_images(self, image, dW_dp, forward=None):
n_dims = image.n_dims
n_channels = image.n_channels
# compute gradient
first_grad = self._calculate_gradients(image, forward=forward)
self._template_grad = self._regularise_gradients(first_grad)
# compute gradient
# second_grad: dims x dims x ch x pixels
second_grad = self._calculate_gradients(self._template_grad)
second_grad = second_grad.masked_pixels().flatten().reshape(
(n_dims, n_dims, n_channels, -1))
# Fix crossed derivatives: dydx = dxdy
second_grad[1, 0, ...] = second_grad[0, 1, ...]
# compute steepest descent images
# gradient: dims x dims x ch x (h x w)
# dw_dp: dims x x (h x w) x params
# sdi: dims x ch x (h x w) x params
sdi = 0
a = second_grad[..., None] * dW_dp[:, None, None, ...]
for d in a:
sdi += d
# reshape steepest descent images
# sdi: (dims x ch x h x w) x params
return sdi.reshape((-1, sdi.shape[-1]))
def calculate_hessian(self, sdi):
# compute hessian
# sdi.T: params x (dims x ch x pixels)
# sdi: (dims x ch x pixels) x params
# hessian: params x x params
return sdi.T.dot(sdi)
def steepest_descent_update(self, sdi, IWxp, template):
# compute IWxp regularized gradient
IWxp_grad = self._calculate_gradients(IWxp)
IWxp_grad = self._regularise_gradients(IWxp_grad)
# compute vectorized error_image
# error_img: (dims x ch x pixels)
self._error_img = (IWxp_grad.as_vector() -
self._template_grad.as_vector())
# compute steepest descent update
# sdi.T: params x (dims x ch x pixels)
# error_img: (dims x ch x pixels)
# sdu: params
return sdi.T.dot(self._error_img)
class GradientCorrelation(Residual):
type = 'GradientCorrelation'
def steepest_descent_images(self, image, dW_dp, forward=None):
n_dims = image.n_dims
n_channels = image.n_channels
# compute gradient
# grad: dims x ch x pixels
grad = self._calculate_gradients(image, forward=forward)
grad2 = grad.as_vector().reshape((n_dims, n_channels, -1))
# compute IGOs (remember axis 0 is y, axis 1 is x)
# grad: dims x ch x pixels
# phi: ch x pixels
# cos_phi: ch x pixels
# sin_phi: ch x pixels
phi = np.angle(grad2[1, ...] + 1j * grad2[0, ...])
self._cos_phi = np.cos(phi)
self._sin_phi = np.sin(phi)
# concatenate sin and cos terms so that we can take the second
# derivatives correctly. sin(phi) = y and cos(phi) = x which is the
# correct ordering when multiplying against the warp Jacobian
# cos_phi: ch x pixels
# sin_phi: ch x pixels
# grad: (dims x ch) x pixels
grad.from_vector_inplace(
np.concatenate((self._sin_phi[None, ...],
self._cos_phi[None, ...]), axis=0).ravel())
# compute IGOs gradient
# second_grad: dims x dims x ch x pixels
second_grad = self._calculate_gradients(grad)
second_grad = second_grad.masked_pixels().flatten().reshape(
(n_dims, n_dims, n_channels, -1))
# Fix crossed derivatives: dydx = dxdy
second_grad[1, 0, ...] = second_grad[0, 1, ...]
# complete full IGOs gradient computation
# second_grad: dims x dims x ch x pixels
second_grad[1, ...] = (-self._sin_phi[None, ...] * second_grad[1, ...])
second_grad[0, ...] = (self._cos_phi[None, ...] * second_grad[0, ...])
# compute steepest descent images
# gradient: dims x dims x ch x pixels
# dw_dp: dims x x pixels x params
# sdi: ch x pixels x params
sdi = 0
aux = second_grad[..., None] * dW_dp[None, :, None, ...]
for a in aux.reshape(((-1,) + aux.shape[2:])):
sdi += a
# compute constant N
# N: 1
self._N = grad.n_parameters / 2
# reshape steepest descent images
# sdi: (ch x pixels) x params
return sdi.reshape((-1, sdi.shape[-1]))
def calculate_hessian(self, sdi):
# compute hessian
# sdi.T: params x (dims x ch x pixels)
# sdi: (dims x ch x pixels) x params
# hessian: params x x params
return sdi.T.dot(sdi)
def steepest_descent_update(self, sdi, IWxp, template):
n_dims = IWxp.n_dims
n_channels = IWxp.n_channels
# compute IWxp gradient
IWxp_grad = self._calculate_gradients(IWxp)
IWxp_grad = IWxp_grad.as_vector().reshape(
(n_dims, n_channels, -1))
# compute IGOs (remember axis 0 is y, axis 1 is x)
# IWxp_grad: dims x ch x pixels
# phi: ch x pixels
# IWxp_cos_phi: ch x pixels
# IWxp_sin_phi: ch x pixels
phi = np.angle(IWxp_grad[1, ...] + 1j * IWxp_grad[0, ...])
IWxp_cos_phi = np.cos(phi)
IWxp_sin_phi = np.sin(phi)
# compute error image
# error_img: (ch x h x w)
self._error_img = (self._cos_phi * IWxp_sin_phi -
self._sin_phi * IWxp_cos_phi).ravel()
# compute steepest descent update
# sdi: (ch x pixels) x params
# error_img: (ch x pixels)
# sdu: params
sdu = sdi.T.dot(self._error_img)
# compute step size
qp = np.sum(self._cos_phi * IWxp_cos_phi +
self._sin_phi * IWxp_sin_phi)
l = self._N / qp
return l * sdu
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import math
import random
import functools
import numpy as np
from threading import Thread
import subprocess
import time
from Queue import Queue
import paddle
from PIL import Image, ImageEnhance
random.seed(0)
DATA_DIM = 224
THREAD = int(os.getenv("PREPROCESS_THREADS", "10"))
BUF_SIZE = 5120
DATA_DIR = '/mnt/ImageNet'
TRAIN_LIST = '/mnt/ImageNet/train.txt'
TEST_LIST = '/mnt/ImageNet/val.txt'
img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
def resize_short(img, target_size):
percent = float(target_size) / min(img.size[0], img.size[1])
resized_width = int(round(img.size[0] * percent))
resized_height = int(round(img.size[1] * percent))
img = img.resize((resized_width, resized_height), Image.LANCZOS)
return img
def crop_image(img, target_size, center):
width, height = img.size
size = target_size
if center == True:
w_start = (width - size) / 2
h_start = (height - size) / 2
else:
w_start = random.randint(0, width - size)
h_start = random.randint(0, height - size)
w_end = w_start + size
h_end = h_start + size
img = img.crop((w_start, h_start, w_end, h_end))
return img
def random_crop(img, size, scale=[0.08, 1.0], ratio=[3. / 4., 4. / 3.]):
aspect_ratio = math.sqrt(random.uniform(*ratio))
w = 1. * aspect_ratio
h = 1. / aspect_ratio
bound = min((float(img.size[0]) / img.size[1]) / (w**2),
(float(img.size[1]) / img.size[0]) / (h**2))
scale_max = min(scale[1], bound)
scale_min = min(scale[0], bound)
target_area = img.size[0] * img.size[1] * random.uniform(scale_min,
scale_max)
target_size = math.sqrt(target_area)
w = int(target_size * w)
h = int(target_size * h)
i = random.randint(0, img.size[0] - w)
j = random.randint(0, img.size[1] - h)
img = img.crop((i, j, i + w, j + h))
img = img.resize((size, size), Image.LANCZOS)
return img
def rotate_image(img):
angle = random.randint(-10, 10)
img = img.rotate(angle)
return img
def distort_color(img):
def random_brightness(img, lower=0.5, upper=1.5):
e = random.uniform(lower, upper)
return ImageEnhance.Brightness(img).enhance(e)
def random_contrast(img, lower=0.5, upper=1.5):
e = random.uniform(lower, upper)
return ImageEnhance.Contrast(img).enhance(e)
def random_color(img, lower=0.5, upper=1.5):
e = random.uniform(lower, upper)
return ImageEnhance.Color(img).enhance(e)
ops = [random_brightness, random_contrast, random_color]
random.shuffle(ops)
img = ops[0](img)
img = ops[1](img)
img = ops[2](img)
return img
def process_image(sample, mode, color_jitter, rotate):
img_path = sample[0]
img = Image.open(img_path)
if mode == 'train':
if rotate: img = rotate_image(img)
img = random_crop(img, DATA_DIM)
else:
img = resize_short(img, target_size=256)
img = crop_image(img, target_size=DATA_DIM, center=True)
if mode == 'train':
if color_jitter:
img = distort_color(img)
if random.randint(0, 1) == 1:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
if img.mode != 'RGB':
img = img.convert('RGB')
img = np.array(img).astype('float32').transpose((2, 0, 1)) / 255
img -= img_mean
img /= img_std
if mode == 'train' or mode == 'val':
return img, sample[1]
elif mode == 'test':
return [img]
class XmapEndSignal():
pass
def xmap_readers(mapper,
reader,
process_num,
buffer_size,
order=False,
print_queue_state=True):
end = XmapEndSignal()
# define a worker to read samples from reader to in_queue
def read_worker(reader, in_queue):
for i in reader():
in_queue.put(i)
in_queue.put(end)
# define a worker to read samples from reader to in_queue with order flag
def order_read_worker(reader, in_queue, file_queue):
in_order = 0
for i in reader():
in_queue.put((in_order, i))
in_order += 1
in_queue.put(end)
# define a worker to handle samples from in_queue by mapper
# and put mapped samples into out_queue
def handle_worker(in_queue, out_queue, mapper):
sample = in_queue.get()
while not isinstance(sample, XmapEndSignal):
r = mapper(sample)
out_queue.put(r)
sample = in_queue.get()
in_queue.put(end)
out_queue.put(end)
# define a worker to handle samples from in_queue by mapper
# and put mapped samples into out_queue by order
def order_handle_worker(in_queue, out_queue, mapper, out_order):
ins = in_queue.get()
while not isinstance(ins, XmapEndSignal):
order, sample = ins
r = mapper(sample)
while order != out_order[0]:
pass
out_queue.put(r)
out_order[0] += 1
ins = in_queue.get()
in_queue.put(end)
out_queue.put(end)
def xreader():
file_queue = Queue()
in_queue = Queue(buffer_size)
out_queue = Queue(buffer_size)
out_order = [0]
# start a read worker in a thread
target = order_read_worker if order else read_worker
t = Thread(target=target, args=(reader, in_queue))
t.daemon = True
t.start()
# start several handle_workers
target = order_handle_worker if order else handle_worker
args = (in_queue, out_queue, mapper, out_order) if order else (
in_queue, out_queue, mapper)
workers = []
for i in xrange(process_num):
worker = Thread(target=target, args=args)
worker.daemon = True
workers.append(worker)
for w in workers:
w.start()
sample = out_queue.get()
start_t = time.time()
while not isinstance(sample, XmapEndSignal):
yield sample
sample = out_queue.get()
if time.time() - start_t > 3:
if print_queue_state:
print("queue sizes: ", in_queue.qsize(), out_queue.qsize())
start_t = time.time()
finish = 1
while finish < process_num:
sample = out_queue.get()
if isinstance(sample, XmapEndSignal):
finish += 1
else:
yield sample
return xreader
def _reader_creator(file_list,
mode,
shuffle=False,
color_jitter=False,
rotate=False,
xmap=True):
def reader():
with open(file_list) as flist:
full_lines = [line.strip() for line in flist]
if shuffle:
random.shuffle(full_lines)
if mode == 'train':
trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
trainer_count = int(os.getenv("PADDLE_TRAINERS"))
per_node_lines = len(full_lines) / trainer_count
lines = full_lines[trainer_id * per_node_lines:(trainer_id + 1)
* per_node_lines]
print(
"read images from %d, length: %d, lines length: %d, total: %d"
% (trainer_id * per_node_lines, per_node_lines, len(lines),
len(full_lines)))
else:
lines = full_lines
for line in lines:
if mode == 'train':
img_path, label = line.split()
img_path = img_path.replace("JPEG", "jpeg")
img_path = os.path.join(DATA_DIR, "train", img_path)
yield (img_path, int(label))
elif mode == 'val':
img_path, label = line.split()
img_path = img_path.replace("JPEG", "jpeg")
img_path = os.path.join(DATA_DIR, "val", img_path)
yield (img_path, int(label))
elif mode == 'test':
img_path = os.path.join(DATA_DIR, line)
yield [img_path]
mapper = functools.partial(
process_image, mode=mode, color_jitter=color_jitter, rotate=rotate)
return paddle.reader.xmap_readers(mapper, reader, THREAD, BUF_SIZE)
def load_raw_image_uint8(sample):
img_arr = np.array(Image.open(sample[0])).astype('int64')
return img_arr, int(sample[1])
def train_raw(file_list=TRAIN_LIST, shuffle=True):
def reader():
with open(file_list) as flist:
full_lines = [line.strip() for line in flist]
if shuffle:
random.shuffle(full_lines)
trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
trainer_count = int(os.getenv("PADDLE_TRAINERS"))
per_node_lines = len(full_lines) / trainer_count
lines = full_lines[trainer_id * per_node_lines:(trainer_id + 1) *
per_node_lines]
print("read images from %d, length: %d, lines length: %d, total: %d"
% (trainer_id * per_node_lines, per_node_lines, len(lines),
len(full_lines)))
for line in lines:
img_path, label = line.split()
img_path = img_path.replace("JPEG", "jpeg")
img_path = os.path.join(DATA_DIR, "train", img_path)
yield (img_path, int(label))
return paddle.reader.xmap_readers(load_raw_image_uint8, reader, THREAD,
BUF_SIZE)
def train(file_list=TRAIN_LIST, xmap=True):
return _reader_creator(
file_list,
'train',
shuffle=True,
color_jitter=False,
rotate=False,
xmap=xmap)
def val(file_list=TEST_LIST, xmap=True):
return _reader_creator(file_list, 'val', shuffle=False, xmap=xmap)
def test(file_list=TEST_LIST):
return _reader_creator(file_list, 'test', shuffle=False)
if __name__ == "__main__":
c = 0
start_t = time.time()
for d in train()():
c += 1
if c >= 10000:
break
spent = time.time() - start_t
print("read 10000 speed: ", 10000 / spent, spent)
|
|
"""A POP3 client class.
Based on the J. Myers POP3 draft, Jan. 96
"""
# Author: David Ascher <[email protected]>
# [heavily stealing from nntplib.py]
# Updated: Piers Lauder <[email protected]> [Jul '97]
# String method conversion and test jig improvements by ESR, February 2001.
# Added the POP3_SSL class. Methods loosely based on IMAP_SSL. Hector Urtubia <[email protected]> Aug 2003
# Example (see the test function at the end of this file)
# Imports
import re, socket
__all__ = ["POP3","error_proto"]
# Exception raised when an error or invalid response is received:
class error_proto(Exception): pass
# Standard Port
POP3_PORT = 110
# POP SSL PORT
POP3_SSL_PORT = 995
# Line terminators (we always output CRLF, but accept any of CRLF, LFCR, LF)
CR = b'\r'
LF = b'\n'
CRLF = CR+LF
class POP3:
"""This class supports both the minimal and optional command sets.
Arguments can be strings or integers (where appropriate)
(e.g.: retr(1) and retr('1') both work equally well.
Minimal Command Set:
USER name user(name)
PASS string pass_(string)
STAT stat()
LIST [msg] list(msg = None)
RETR msg retr(msg)
DELE msg dele(msg)
NOOP noop()
RSET rset()
QUIT quit()
Optional Commands (some servers support these):
RPOP name rpop(name)
APOP name digest apop(name, digest)
TOP msg n top(msg, n)
UIDL [msg] uidl(msg = None)
Raises one exception: 'error_proto'.
Instantiate with:
POP3(hostname, port=110)
NB: the POP protocol locks the mailbox from user
authorization until QUIT, so be sure to get in, suck
the messages, and quit, each time you access the
mailbox.
POP is a line-based protocol, which means large mail
messages consume lots of python cycles reading them
line-by-line.
If it's available on your mail server, use IMAP4
instead, it doesn't suffer from the two problems
above.
"""
encoding = 'UTF-8'
def __init__(self, host, port=POP3_PORT,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.host = host
self.port = port
self.sock = self._create_socket(timeout)
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
def _create_socket(self, timeout):
return socket.create_connection((self.host, self.port), timeout)
def _putline(self, line):
if self._debugging > 1: print('*put*', repr(line))
self.sock.sendall(line + CRLF)
# Internal: send one command to the server (through _putline())
def _putcmd(self, line):
if self._debugging: print('*cmd*', repr(line))
line = bytes(line, self.encoding)
self._putline(line)
# Internal: return one line from the server, stripping CRLF.
# This is where all the CPU time of this module is consumed.
# Raise error_proto('-ERR EOF') if the connection is closed.
def _getline(self):
line = self.file.readline()
if self._debugging > 1: print('*get*', repr(line))
if not line: raise error_proto('-ERR EOF')
octets = len(line)
# server can send any combination of CR & LF
# however, 'readline()' returns lines ending in LF
# so only possibilities are ...LF, ...CRLF, CR...LF
if line[-2:] == CRLF:
return line[:-2], octets
if line[0] == CR:
return line[1:-1], octets
return line[:-1], octets
# Internal: get a response from the server.
# Raise 'error_proto' if the response doesn't start with '+'.
def _getresp(self):
resp, o = self._getline()
if self._debugging > 1: print('*resp*', repr(resp))
if not resp.startswith(b'+'):
raise error_proto(resp)
return resp
# Internal: get a response plus following text from the server.
def _getlongresp(self):
resp = self._getresp()
list = []; octets = 0
line, o = self._getline()
while line != b'.':
if line.startswith(b'..'):
o = o-1
line = line[1:]
octets = octets + o
list.append(line)
line, o = self._getline()
return resp, list, octets
# Internal: send a command and get the response
def _shortcmd(self, line):
self._putcmd(line)
return self._getresp()
# Internal: send a command and get the response plus following text
def _longcmd(self, line):
self._putcmd(line)
return self._getlongresp()
# These can be useful:
def getwelcome(self):
return self.welcome
def set_debuglevel(self, level):
self._debugging = level
# Here are all the POP commands:
def user(self, user):
"""Send user name, return response
(should indicate password required).
"""
return self._shortcmd('USER %s' % user)
def pass_(self, pswd):
"""Send password, return response
(response includes message count, mailbox size).
NB: mailbox is locked by server from here to 'quit()'
"""
return self._shortcmd('PASS %s' % pswd)
def stat(self):
"""Get mailbox status.
Result is tuple of 2 ints (message count, mailbox size)
"""
retval = self._shortcmd('STAT')
rets = retval.split()
if self._debugging: print('*stat*', repr(rets))
numMessages = int(rets[1])
sizeMessages = int(rets[2])
return (numMessages, sizeMessages)
def list(self, which=None):
"""Request listing, return result.
Result without a message number argument is in form
['response', ['mesg_num octets', ...], octets].
Result when a message number argument is given is a
single response: the "scan listing" for that message.
"""
if which is not None:
return self._shortcmd('LIST %s' % which)
return self._longcmd('LIST')
def retr(self, which):
"""Retrieve whole message number 'which'.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('RETR %s' % which)
def dele(self, which):
"""Delete message number 'which'.
Result is 'response'.
"""
return self._shortcmd('DELE %s' % which)
def noop(self):
"""Does nothing.
One supposes the response indicates the server is alive.
"""
return self._shortcmd('NOOP')
def rset(self):
"""Unmark all messages marked for deletion."""
return self._shortcmd('RSET')
def quit(self):
"""Signoff: commit changes on server, unlock mailbox, close connection."""
resp = self._shortcmd('QUIT')
self.close()
return resp
def close(self):
"""Close the connection without assuming anything about it."""
if self.file is not None:
self.file.close()
if self.sock is not None:
self.sock.close()
self.file = self.sock = None
#__del__ = quit
# optional commands:
def rpop(self, user):
"""Not sure what this does."""
return self._shortcmd('RPOP %s' % user)
timestamp = re.compile(br'\+OK.*(<[^>]+>)')
def apop(self, user, password):
"""Authorisation
- only possible if server has supplied a timestamp in initial greeting.
Args:
user - mailbox user;
password - mailbox password.
NB: mailbox is locked by server from here to 'quit()'
"""
secret = bytes(password, self.encoding)
m = self.timestamp.match(self.welcome)
if not m:
raise error_proto('-ERR APOP not supported by server')
import hashlib
digest = m.group(1)+secret
digest = hashlib.md5(digest).hexdigest()
return self._shortcmd('APOP %s %s' % (user, digest))
def top(self, which, howmuch):
"""Retrieve message header of message number 'which'
and first 'howmuch' lines of message body.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('TOP %s %s' % (which, howmuch))
def uidl(self, which=None):
"""Return message digest (unique id) list.
If 'which', result contains unique id for that message
in the form 'response mesgnum uid', otherwise result is
the list ['response', ['mesgnum uid', ...], octets]
"""
if which is not None:
return self._shortcmd('UIDL %s' % which)
return self._longcmd('UIDL')
try:
import ssl
except ImportError:
pass
else:
class POP3_SSL(POP3):
"""POP3 client class over SSL connection
Instantiate with: POP3_SSL(hostname, port=995, keyfile=None, certfile=None)
hostname - the hostname of the pop3 over ssl server
port - port number
keyfile - PEM formatted file that countains your private key
certfile - PEM formatted certificate chain file
See the methods of the parent class POP3 for more documentation.
"""
def __init__(self, host, port=POP3_SSL_PORT, keyfile=None, certfile=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT, context=None):
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
self.keyfile = keyfile
self.certfile = certfile
self.context = context
POP3.__init__(self, host, port, timeout)
def _create_socket(self, timeout):
sock = POP3._create_socket(self, timeout)
if self.context is not None:
sock = self.context.wrap_socket(sock)
else:
sock = ssl.wrap_socket(sock, self.keyfile, self.certfile)
return sock
__all__.append("POP3_SSL")
if __name__ == "__main__":
import sys
a = POP3(sys.argv[1])
print(a.getwelcome())
a.user(sys.argv[2])
a.pass_(sys.argv[3])
a.list()
(numMsgs, totalSize) = a.stat()
for i in range(1, numMsgs + 1):
(header, msg, octets) = a.retr(i)
print("Message %d:" % i)
for line in msg:
print(' ' + line)
print('-----------------------')
a.quit()
|
|
#!/home/jhjguxin/blogserver/bin/python
"""PILdriver, an image-processing calculator using PIL.
An instance of class PILDriver is essentially a software stack machine
(Polish-notation interpreter) for sequencing PIL image
transformations. The state of the instance is the interpreter stack.
The only method one will normally invoke after initialization is the
`execute' method. This takes an argument list of tokens, pushes them
onto the instance's stack, and then tries to clear the stack by
successive evaluation of PILdriver operators. Any part of the stack
not cleaned off persists and is part of the evaluation context for
the next call of the execute method.
PILDriver doesn't catch any exceptions, on the theory that these
are actually diagnostic information that should be interpreted by
the calling code.
When called as a script, the command-line arguments are passed to
a PILDriver instance. If there are no command-line arguments, the
module runs an interactive interpreter, each line of which is split into
space-separated tokens and passed to the execute method.
In the method descriptions below, a first line beginning with the string
`usage:' means this method can be invoked with the token that follows
it. Following <>-enclosed arguments describe how the method interprets
the entries on the stack. Each argument specification begins with a
type specification: either `int', `float', `string', or `image'.
All operations consume their arguments off the stack (use `dup' to
keep copies around). Use `verbose 1' to see the stack state displayed
before each operation.
Usage examples:
`show crop 0 0 200 300 open test.png' loads test.png, crops out a portion
of its upper-left-hand corner and displays the cropped portion.
`save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it
30 degrees, and saves the result as rotated.png (in PNG format).
"""
# by Eric S. Raymond <[email protected]>
# $Id$
# TO DO:
# 1. Add PILFont capabilities, once that's documented.
# 2. Add PILDraw operations.
# 3. Add support for composing and decomposing multiple-image files.
#
from PIL import Image
import string
class PILDriver:
verbose = 0
def do_verbose(self):
"""usage: verbose <int:num>
Set verbosity flag from top of stack.
"""
self.verbose = self.do_pop()
# The evaluation stack (internal only)
stack = [] # Stack of pending operations
def push(self, item):
"Push an argument onto the evaluation stack."
self.stack = [item] + self.stack
def top(self):
"Return the top-of-stack element."
return self.stack[0]
# Stack manipulation (callable)
def do_clear(self):
"""usage: clear
Clear the stack.
"""
self.stack = []
def do_pop(self):
"""usage: pop
Discard the top element on the stack.
"""
top = self.stack[0]
self.stack = self.stack[1:]
return top
def do_dup(self):
"""usage: dup
Duplicate the top-of-stack item.
"""
if hasattr(self, 'format'): # If it's an image, do a real copy
dup = self.stack[0].copy()
else:
dup = self.stack[0]
self.stack = [dup] + self.stack
def do_swap(self):
"""usage: swap
Swap the top-of-stack item with the next one down.
"""
self.stack = [self.stack[1], self.stack[0]] + self.stack[2:]
# Image module functions (callable)
def do_new(self):
"""usage: new <int:xsize> <int:ysize> <int:color>:
Create and push a greyscale image of given size and color.
"""
xsize = int(self.do_pop())
ysize = int(self.do_pop())
color = int(self.do_pop())
self.push(Image.new("L", (xsize, ysize), color))
def do_open(self):
"""usage: open <string:filename>
Open the indicated image, read it, push the image on the stack.
"""
self.push(Image.open(self.do_pop()))
def do_blend(self):
"""usage: blend <image:pic1> <image:pic2> <float:alpha>
Replace two images and an alpha with the blended image.
"""
image1 = self.do_pop()
image2 = self.do_pop()
alpha = float(self.do_pop())
self.push(Image.blend(image1, image2, alpha))
def do_composite(self):
"""usage: composite <image:pic1> <image:pic2> <image:mask>
Replace two images and a mask with their composite.
"""
image1 = self.do_pop()
image2 = self.do_pop()
mask = self.do_pop()
self.push(Image.composite(image1, image2, mask))
def do_merge(self):
"""usage: merge <string:mode> <image:pic1> [<image:pic2> [<image:pic3> [<image:pic4>]]]
Merge top-of stack images in a way described by the mode.
"""
mode = self.do_pop()
bandlist = []
for band in mode:
bandlist.append(self.do_pop())
self.push(Image.merge(mode, bandlist))
# Image class methods
def do_convert(self):
"""usage: convert <string:mode> <image:pic1>
Convert the top image to the given mode.
"""
mode = self.do_pop()
image = self.do_pop()
self.push(image.convert(mode))
def do_copy(self):
"""usage: copy <image:pic1>
Make and push a true copy of the top image.
"""
self.dup()
def do_crop(self):
"""usage: crop <int:left> <int:upper> <int:right> <int:lower> <image:pic1>
Crop and push a rectangular region from the current image.
"""
left = int(self.do_pop())
upper = int(self.do_pop())
right = int(self.do_pop())
lower = int(self.do_pop())
image = self.do_pop()
self.push(image.crop((left, upper, right, lower)))
def do_draft(self):
"""usage: draft <string:mode> <int:xsize> <int:ysize>
Configure the loader for a given mode and size.
"""
mode = self.do_pop()
xsize = int(self.do_pop())
ysize = int(self.do_pop())
self.push(self.draft(mode, (xsize, ysize)))
def do_filter(self):
"""usage: filter <string:filtername> <image:pic1>
Process the top image with the given filter.
"""
import ImageFilter
filter = eval("ImageFilter." + string.upper(self.do_pop()))
image = self.do_pop()
self.push(image.filter(filter))
def do_getbbox(self):
"""usage: getbbox
Push left, upper, right, and lower pixel coordinates of the top image.
"""
bounding_box = self.do_pop().getbbox()
self.push(bounding_box[3])
self.push(bounding_box[2])
self.push(bounding_box[1])
self.push(bounding_box[0])
def do_getextrema(self):
"""usage: extrema
Push minimum and maximum pixel values of the top image.
"""
extrema = self.do_pop().extrema()
self.push(extrema[1])
self.push(extrema[0])
def do_offset(self):
"""usage: offset <int:xoffset> <int:yoffset> <image:pic1>
Offset the pixels in the top image.
"""
xoff = int(self.do_pop())
yoff = int(self.do_pop())
image = self.do_pop()
self.push(image.offset(xoff, yoff))
def do_paste(self):
"""usage: paste <image:figure> <int:xoffset> <int:yoffset> <image:ground>
Paste figure image into ground with upper left at given offsets.
"""
figure = self.do_pop()
xoff = int(self.do_pop())
yoff = int(self.do_pop())
ground = self.do_pop()
if figure.mode == "RGBA":
ground.paste(figure, (xoff, yoff), figure)
else:
ground.paste(figure, (xoff, yoff))
self.push(ground)
def do_resize(self):
"""usage: resize <int:xsize> <int:ysize> <image:pic1>
Resize the top image.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
image = self.do_pop()
self.push(image.resize((xsize, ysize)))
def do_rotate(self):
"""usage: rotate <int:angle> <image:pic1>
Rotate image through a given angle
"""
angle = int(self.do_pop())
image = self.do_pop()
self.push(image.rotate(angle))
def do_save(self):
"""usage: save <string:filename> <image:pic1>
Save image with default options.
"""
filename = self.do_pop()
image = self.do_pop()
image.save(filename)
def do_save2(self):
"""usage: save2 <string:filename> <string:options> <image:pic1>
Save image with specified options.
"""
filename = self.do_pop()
options = self.do_pop()
image = self.do_pop()
image.save(filename, None, options)
def do_show(self):
"""usage: show <image:pic1>
Display and pop the top image.
"""
self.do_pop().show()
def do_thumbnail(self):
"""usage: thumbnail <int:xsize> <int:ysize> <image:pic1>
Modify the top image in the stack to contain a thumbnail of itself.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
self.top().thumbnail((xsize, ysize))
def do_transpose(self):
"""usage: transpose <string:operator> <image:pic1>
Transpose the top image.
"""
transpose = string.upper(self.do_pop())
image = self.do_pop()
self.push(image.transpose(transpose))
# Image attributes
def do_format(self):
"""usage: format <image:pic1>
Push the format of the top image onto the stack.
"""
self.push(self.pop().format)
def do_mode(self):
"""usage: mode <image:pic1>
Push the mode of the top image onto the stack.
"""
self.push(self.pop().mode)
def do_size(self):
"""usage: size <image:pic1>
Push the image size on the stack as (y, x).
"""
size = self.pop().size
self.push(size[0])
self.push(size[1])
# ImageChops operations
def do_invert(self):
"""usage: invert <image:pic1>
Invert the top image.
"""
import ImageChops
self.push(ImageChops.invert(self.do_pop()))
def do_lighter(self):
"""usage: lighter <image:pic1> <image:pic2>
Pop the two top images, push an image of the lighter pixels of both.
"""
import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.lighter(image1, image2))
def do_darker(self):
"""usage: darker <image:pic1> <image:pic2>
Pop the two top images, push an image of the darker pixels of both.
"""
import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.darker(image1, image2))
def do_difference(self):
"""usage: difference <image:pic1> <image:pic2>
Pop the two top images, push the difference image
"""
import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.difference(image1, image2))
def do_multiply(self):
"""usage: multiply <image:pic1> <image:pic2>
Pop the two top images, push the multiplication image.
"""
import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.multiply(image1, image2))
def do_screen(self):
"""usage: screen <image:pic1> <image:pic2>
Pop the two top images, superimpose their inverted versions.
"""
import ImageChops
image2 = self.do_pop()
image1 = self.do_pop()
self.push(ImageChops.screen(image1, image2))
def do_add(self):
"""usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled sum with offset.
"""
import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.add(image1, image2, scale, offset))
def do_subtract(self):
"""usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled difference with offset.
"""
import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.subtract(image1, image2, scale, offset))
# ImageEnhance classes
def do_color(self):
"""usage: color <image:pic1>
Enhance color in the top image.
"""
import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_brightness(self):
"""usage: brightness <image:pic1>
Enhance brightness in the top image.
"""
import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_sharpness(self):
"""usage: sharpness <image:pic1>
Enhance sharpness in the top image.
"""
import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
# The interpreter loop
def execute(self, list):
"Interpret a list of PILDriver commands."
list.reverse()
while len(list) > 0:
self.push(list[0])
list = list[1:]
if self.verbose:
print "Stack: " + `self.stack`
top = self.top()
if type(top) != type(""):
continue;
funcname = "do_" + top
if not hasattr(self, funcname):
continue
else:
self.do_pop()
func = getattr(self, funcname)
func()
if __name__ == '__main__':
import sys
try:
import readline
except ImportError:
pass # not available on all platforms
# If we see command-line arguments, interpret them as a stack state
# and execute. Otherwise go interactive.
driver = PILDriver()
if len(sys.argv[1:]) > 0:
driver.execute(sys.argv[1:])
else:
print "PILDriver says hello."
while 1:
try:
line = raw_input('pildriver> ');
except EOFError:
print "\nPILDriver says goodbye."
break
driver.execute(string.split(line))
print driver.stack
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient.contrib.ec2 import utils as ec2_utils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from six.moves.urllib import parse as urlparse
from heat.common import exception
from heat.common.i18n import _
from heat.common import password_gen
from heat.engine.clients.os import swift
from heat.engine.resources import stack_user
LOG = logging.getLogger(__name__)
SIGNAL_TYPES = (
WAITCONDITION, SIGNAL
) = (
'/waitcondition', '/signal'
)
SIGNAL_VERB = {WAITCONDITION: 'PUT',
SIGNAL: 'POST'}
class SignalResponder(stack_user.StackUser):
PROPERTIES = (
SIGNAL_TRANSPORT,
) = (
'signal_transport',
)
ATTRIBUTES = (
SIGNAL_ATTR,
) = (
'signal',
)
# Anything which subclasses this may trigger authenticated
# API operations as a consequence of handling a signal
requires_deferred_auth = True
def handle_delete(self):
self._delete_signals()
return super(SignalResponder, self).handle_delete()
def _delete_signals(self):
self._delete_ec2_signed_url()
self._delete_heat_signal_url()
self._delete_swift_signal_url()
self._delete_zaqar_signal_queue()
@property
def password(self):
return self.data().get('password')
@password.setter
def password(self, password):
if password is None:
self.data_delete('password')
else:
self.data_set('password', password, True)
def _signal_transport_cfn(self):
return self.properties[
self.SIGNAL_TRANSPORT] == self.CFN_SIGNAL
def _signal_transport_heat(self):
return self.properties[
self.SIGNAL_TRANSPORT] == self.HEAT_SIGNAL
def _signal_transport_none(self):
return self.properties[
self.SIGNAL_TRANSPORT] == self.NO_SIGNAL
def _signal_transport_temp_url(self):
return self.properties[
self.SIGNAL_TRANSPORT] == self.TEMP_URL_SIGNAL
def _signal_transport_zaqar(self):
return self.properties.get(
self.SIGNAL_TRANSPORT) == self.ZAQAR_SIGNAL
def _get_heat_signal_credentials(self):
"""Return OpenStack credentials that can be used to send a signal.
These credentials are for the user associated with this resource in
the heat stack user domain.
"""
if self._get_user_id() is None:
if self.password is None:
self.password = password_gen.generate_openstack_password()
self._create_user()
return {'auth_url': self.keystone().v3_endpoint,
'username': self.physical_resource_name(),
'user_id': self._get_user_id(),
'password': self.password,
'project_id': self.stack.stack_user_project_id,
'domain_id': self.keystone().stack_domain_id,
'region_name': (self.context.region_name or
cfg.CONF.region_name_for_services)}
def _get_ec2_signed_url(self, signal_type=SIGNAL):
"""Create properly formatted and pre-signed URL.
This uses the created user for the credentials.
See boto/auth.py::QuerySignatureV2AuthHandler
:param signal_type: either WAITCONDITION or SIGNAL.
"""
stored = self.data().get('ec2_signed_url')
if stored is not None:
return stored
access_key = self.data().get('access_key')
secret_key = self.data().get('secret_key')
if not access_key or not secret_key:
if self.id is None:
# it is too early
return
if self._get_user_id() is None:
self._create_user()
self._create_keypair()
access_key = self.data().get('access_key')
secret_key = self.data().get('secret_key')
if not access_key or not secret_key:
LOG.warning('Cannot generate signed url, '
'unable to create keypair')
return
config_url = cfg.CONF.heat_waitcondition_server_url
if config_url:
signal_url = config_url.replace('/waitcondition', signal_type)
else:
heat_client_plugin = self.stack.clients.client_plugin('heat')
endpoint = heat_client_plugin.get_heat_cfn_url()
signal_url = ''.join([endpoint, signal_type])
host_url = urlparse.urlparse(signal_url)
path = self.identifier().arn_url_path()
# Note the WSGI spec apparently means that the webob request we end up
# processing in the CFN API (ec2token.py) has an unquoted path, so we
# need to calculate the signature with the path component unquoted, but
# ensure the actual URL contains the quoted version...
unquoted_path = urlparse.unquote(host_url.path + path)
request = {'host': host_url.netloc.lower(),
'verb': SIGNAL_VERB[signal_type],
'path': unquoted_path,
'params': {'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'AWSAccessKeyId': access_key,
'Timestamp':
self.created_time.strftime("%Y-%m-%dT%H:%M:%SZ")
}}
# Sign the request
signer = ec2_utils.Ec2Signer(secret_key)
request['params']['Signature'] = signer.generate(request)
qs = urlparse.urlencode(request['params'])
url = "%s%s?%s" % (signal_url.lower(),
path, qs)
self.data_set('ec2_signed_url', url)
return url
def _delete_ec2_signed_url(self):
self.data_delete('ec2_signed_url')
self._delete_keypair()
def _get_heat_signal_url(self, project_id=None):
"""Return a heat-api signal URL for this resource.
This URL is not pre-signed, valid user credentials are required.
If a project_id is provided, it is used in place of the original
project_id. This is useful to generate a signal URL that uses
the heat stack user project instead of the user's.
"""
stored = self.data().get('heat_signal_url')
if stored is not None:
return stored
if self.id is None:
# it is too early
return
url = self.client_plugin('heat').get_heat_url()
path = self.identifier().url_path()
if project_id is not None:
path = project_id + path[path.find('/'):]
url = urlparse.urljoin(url, '%s/signal' % path)
self.data_set('heat_signal_url', url)
return url
def _delete_heat_signal_url(self):
self.data_delete('heat_signal_url')
def _get_swift_signal_url(self, multiple_signals=False):
"""Create properly formatted and pre-signed Swift signal URL.
This uses a Swift pre-signed temp_url. If multiple_signals is
requested, the Swift object referenced by the returned URL will have
versioning enabled.
"""
put_url = self.data().get('swift_signal_url')
if put_url:
return put_url
if self.id is None:
# it is too early
return
container = self.stack.id
object_name = self.physical_resource_name()
self.client('swift').put_container(container)
if multiple_signals:
put_url = self.client_plugin('swift').get_signal_url(container,
object_name)
else:
put_url = self.client_plugin('swift').get_temp_url(container,
object_name)
self.client('swift').put_object(container, object_name, '')
self.data_set('swift_signal_url', put_url)
self.data_set('swift_signal_object_name', object_name)
return put_url
def _delete_swift_signal_url(self):
object_name = self.data().get('swift_signal_object_name')
if not object_name:
return
with self.client_plugin('swift').ignore_not_found:
container_name = self.stack.id
swift = self.client('swift')
# delete all versions of the object, in case there are some
# signals that are waiting to be handled
container = swift.get_container(container_name)
filtered = [obj for obj in container[1]
if object_name in obj['name']]
for obj in filtered:
# we delete the main object every time, swift takes
# care of restoring the previous version after each delete
swift.delete_object(container_name, object_name)
headers = swift.head_container(container_name)
if int(headers['x-container-object-count']) == 0:
swift.delete_container(container_name)
self.data_delete('swift_signal_object_name')
self.data_delete('swift_signal_url')
def _get_zaqar_signal_queue_id(self):
"""Return a zaqar queue_id for signaling this resource.
This uses the created user for the credentials.
"""
queue_id = self.data().get('zaqar_signal_queue_id')
if queue_id:
return queue_id
if self.id is None:
# it is too early
return
if self._get_user_id() is None:
if self.password is None:
self.password = password_gen.generate_openstack_password()
self._create_user()
queue_id = self.physical_resource_name()
zaqar_plugin = self.client_plugin('zaqar')
zaqar = zaqar_plugin.create_for_tenant(
self.stack.stack_user_project_id, self._user_token())
queue = zaqar.queue(queue_id)
signed_url_data = queue.signed_url(
['messages'], methods=['GET', 'DELETE'])
self.data_set('zaqar_queue_signed_url_data',
jsonutils.dumps(signed_url_data))
self.data_set('zaqar_signal_queue_id', queue_id)
return queue_id
def _delete_zaqar_signal_queue(self):
queue_id = self.data().get('zaqar_signal_queue_id')
if not queue_id:
return
zaqar_plugin = self.client_plugin('zaqar')
zaqar = zaqar_plugin.create_for_tenant(
self.stack.stack_user_project_id, self._user_token())
with zaqar_plugin.ignore_not_found:
zaqar.queue(queue_id).delete()
self.data_delete('zaqar_signal_queue_id')
def _get_signal(self, signal_type=SIGNAL, multiple_signals=False):
"""Return a dictionary with signal details.
Subclasses can invoke this method to retrieve information of the
resource signal for the specified transport.
"""
signal = None
if self._signal_transport_cfn():
signal = {'alarm_url': self._get_ec2_signed_url(
signal_type=signal_type)}
elif self._signal_transport_heat():
signal = self._get_heat_signal_credentials()
signal['alarm_url'] = self._get_heat_signal_url(
project_id=self.stack.stack_user_project_id)
elif self._signal_transport_temp_url():
signal = {'alarm_url': self._get_swift_signal_url(
multiple_signals=multiple_signals)}
elif self._signal_transport_zaqar():
signal = self._get_heat_signal_credentials()
signal['queue_id'] = self._get_zaqar_signal_queue_id()
elif self._signal_transport_none():
signal = {}
return signal
def _service_swift_signal(self):
swift_client = self.client('swift')
try:
container = swift_client.get_container(self.stack.id)
except Exception as exc:
self.client_plugin('swift').ignore_not_found(exc)
LOG.debug("Swift container %s was not found", self.stack.id)
return
index = container[1]
if not index: # Swift objects were deleted by user
LOG.debug("Swift objects in container %s were not found",
self.stack.id)
return
# Remove objects that are for other resources, given that
# multiple swift signals in the same stack share a container
object_name = self.physical_resource_name()
filtered = [obj for obj in index if object_name in obj['name']]
# Fetch objects from Swift and filter results
signal_names = []
for obj in filtered:
try:
signal = swift_client.get_object(self.stack.id, obj['name'])
except Exception as exc:
self.client_plugin('swift').ignore_not_found(exc)
continue
body = signal[1]
if body == swift.IN_PROGRESS: # Ignore the initial object
continue
signal_names.append(obj['name'])
if body == "":
self.signal(details={})
continue
try:
self.signal(details=jsonutils.loads(body))
except ValueError:
raise exception.Error(_("Failed to parse JSON data: %s") %
body)
# remove the signals that were consumed
for signal_name in signal_names:
if signal_name != object_name:
swift_client.delete_object(self.stack.id, signal_name)
if object_name in signal_names:
swift_client.delete_object(self.stack.id, object_name)
def _service_zaqar_signal(self):
zaqar_plugin = self.client_plugin('zaqar')
zaqar = zaqar_plugin.create_for_tenant(
self.stack.stack_user_project_id, self._user_token())
try:
queue = zaqar.queue(self._get_zaqar_signal_queue_id())
except Exception as ex:
self.client_plugin('zaqar').ignore_not_found(ex)
return
messages = list(queue.pop())
for message in messages:
self.signal(details=message.body)
def _service_signal(self):
"""Service the signal, when necessary.
This method must be called repeatedly by subclasses to update the
state of the signals that require polling, which are the ones based on
Swift temp URLs and Zaqar queues. The "NO_SIGNAL" case is also handled
here by triggering the signal once per call.
"""
if self._signal_transport_temp_url():
self._service_swift_signal()
elif self._signal_transport_zaqar():
self._service_zaqar_signal()
elif self._signal_transport_none():
self.signal(details={})
|
|
from pymongo import MongoClient, ReadPreference, uri_parser
import six
from mongoengine.python_support import IS_PYMONGO_3
__all__ = ['MongoEngineConnectionError', 'connect', 'register_connection',
'DEFAULT_CONNECTION_NAME']
DEFAULT_CONNECTION_NAME = 'default'
if IS_PYMONGO_3:
READ_PREFERENCE = ReadPreference.PRIMARY
else:
from pymongo import MongoReplicaSetClient
READ_PREFERENCE = False
class MongoEngineConnectionError(Exception):
"""Error raised when the database connection can't be established or
when a connection with a requested alias can't be retrieved.
"""
pass
_connection_settings = {}
_connections = {}
_dbs = {}
def register_connection(alias, name=None, host=None, port=None,
read_preference=READ_PREFERENCE,
username=None, password=None,
authentication_source=None,
authentication_mechanism=None,
**kwargs):
"""Add a connection.
:param alias: the name that will be used to refer to this connection
throughout MongoEngine
:param name: the name of the specific database to use
:param host: the host name of the :program:`mongod` instance to connect to
:param port: the port that the :program:`mongod` instance is running on
:param read_preference: The read preference for the collection
** Added pymongo 2.1
:param username: username to authenticate with
:param password: password to authenticate with
:param authentication_source: database to authenticate against
:param authentication_mechanism: database authentication mechanisms.
By default, use SCRAM-SHA-1 with MongoDB 3.0 and later,
MONGODB-CR (MongoDB Challenge Response protocol) for older servers.
:param is_mock: explicitly use mongomock for this connection
(can also be done by using `mongomock://` as db host prefix)
:param kwargs: ad-hoc parameters to be passed into the pymongo driver,
for example maxpoolsize, tz_aware, etc. See the documentation
for pymongo's `MongoClient` for a full list.
.. versionchanged:: 0.10.6 - added mongomock support
"""
conn_settings = {
'name': name or 'test',
'host': host or 'localhost',
'port': port or 27017,
'read_preference': read_preference,
'username': username,
'password': password,
'authentication_source': authentication_source,
'authentication_mechanism': authentication_mechanism
}
conn_host = conn_settings['host']
# Host can be a list or a string, so if string, force to a list.
if isinstance(conn_host, six.string_types):
conn_host = [conn_host]
resolved_hosts = []
for entity in conn_host:
# Handle Mongomock
if entity.startswith('mongomock://'):
conn_settings['is_mock'] = True
# `mongomock://` is not a valid url prefix and must be replaced by `mongodb://`
resolved_hosts.append(entity.replace('mongomock://', 'mongodb://', 1))
# Handle URI style connections, only updating connection params which
# were explicitly specified in the URI.
elif '://' in entity:
uri_dict = uri_parser.parse_uri(entity)
resolved_hosts.append(entity)
if uri_dict.get('database'):
conn_settings['name'] = uri_dict.get('database')
for param in ('read_preference', 'username', 'password'):
if uri_dict.get(param):
conn_settings[param] = uri_dict[param]
uri_options = uri_dict['options']
if 'replicaset' in uri_options:
conn_settings['replicaSet'] = uri_options['replicaset']
if 'authsource' in uri_options:
conn_settings['authentication_source'] = uri_options['authsource']
if 'authmechanism' in uri_options:
conn_settings['authentication_mechanism'] = uri_options['authmechanism']
else:
resolved_hosts.append(entity)
conn_settings['host'] = resolved_hosts
# Deprecated parameters that should not be passed on
kwargs.pop('slaves', None)
kwargs.pop('is_slave', None)
conn_settings.update(kwargs)
_connection_settings[alias] = conn_settings
def disconnect(alias=DEFAULT_CONNECTION_NAME):
"""Close the connection with a given alias."""
if alias in _connections:
get_connection(alias=alias).close()
del _connections[alias]
if alias in _dbs:
del _dbs[alias]
def get_connection(alias=DEFAULT_CONNECTION_NAME, reconnect=False):
"""Return a connection with a given alias."""
# Connect to the database if not already connected
if reconnect:
disconnect(alias)
# If the requested alias already exists in the _connections list, return
# it immediately.
if alias in _connections:
return _connections[alias]
# Validate that the requested alias exists in the _connection_settings.
# Raise MongoEngineConnectionError if it doesn't.
if alias not in _connection_settings:
if alias == DEFAULT_CONNECTION_NAME:
msg = 'You have not defined a default connection'
else:
msg = 'Connection with alias "%s" has not been defined' % alias
raise MongoEngineConnectionError(msg)
def _clean_settings(settings_dict):
irrelevant_fields = set([
'name', 'username', 'password', 'authentication_source',
'authentication_mechanism'
])
return {
k: v for k, v in settings_dict.items()
if k not in irrelevant_fields
}
# Retrieve a copy of the connection settings associated with the requested
# alias and remove the database name and authentication info (we don't
# care about them at this point).
conn_settings = _clean_settings(_connection_settings[alias].copy())
# Determine if we should use PyMongo's or mongomock's MongoClient.
is_mock = conn_settings.pop('is_mock', False)
if is_mock:
try:
import mongomock
except ImportError:
raise RuntimeError('You need mongomock installed to mock '
'MongoEngine.')
connection_class = mongomock.MongoClient
else:
connection_class = MongoClient
# For replica set connections with PyMongo 2.x, use
# MongoReplicaSetClient.
# TODO remove this once we stop supporting PyMongo 2.x.
if 'replicaSet' in conn_settings and not IS_PYMONGO_3:
connection_class = MongoReplicaSetClient
conn_settings['hosts_or_uri'] = conn_settings.pop('host', None)
# hosts_or_uri has to be a string, so if 'host' was provided
# as a list, join its parts and separate them by ','
if isinstance(conn_settings['hosts_or_uri'], list):
conn_settings['hosts_or_uri'] = ','.join(
conn_settings['hosts_or_uri'])
# Discard port since it can't be used on MongoReplicaSetClient
conn_settings.pop('port', None)
# Iterate over all of the connection settings and if a connection with
# the same parameters is already established, use it instead of creating
# a new one.
existing_connection = None
connection_settings_iterator = (
(db_alias, settings.copy())
for db_alias, settings in _connection_settings.items()
)
for db_alias, connection_settings in connection_settings_iterator:
connection_settings = _clean_settings(connection_settings)
if conn_settings == connection_settings and _connections.get(db_alias):
existing_connection = _connections[db_alias]
break
# If an existing connection was found, assign it to the new alias
if existing_connection:
_connections[alias] = existing_connection
else:
# Otherwise, create the new connection for this alias. Raise
# MongoEngineConnectionError if it can't be established.
try:
_connections[alias] = connection_class(**conn_settings)
except Exception as e:
raise MongoEngineConnectionError(
'Cannot connect to database %s :\n%s' % (alias, e))
return _connections[alias]
def get_db(alias=DEFAULT_CONNECTION_NAME, reconnect=False):
if reconnect:
disconnect(alias)
if alias not in _dbs:
conn = get_connection(alias)
conn_settings = _connection_settings[alias]
db = conn[conn_settings['name']]
auth_kwargs = {'source': conn_settings['authentication_source']}
if conn_settings['authentication_mechanism'] is not None:
auth_kwargs['mechanism'] = conn_settings['authentication_mechanism']
# Authenticate if necessary
if conn_settings['username'] and (conn_settings['password'] or
conn_settings['authentication_mechanism'] == 'MONGODB-X509'):
db.authenticate(conn_settings['username'], conn_settings['password'], **auth_kwargs)
_dbs[alias] = db
return _dbs[alias]
def connect(db=None, alias=DEFAULT_CONNECTION_NAME, **kwargs):
"""Connect to the database specified by the 'db' argument.
Connection settings may be provided here as well if the database is not
running on the default port on localhost. If authentication is needed,
provide username and password arguments as well.
Multiple databases are supported by using aliases. Provide a separate
`alias` to connect to a different instance of :program:`mongod`.
See the docstring for `register_connection` for more details about all
supported kwargs.
.. versionchanged:: 0.6 - added multiple database support.
"""
if alias not in _connections:
register_connection(alias, db, **kwargs)
return get_connection(alias)
# Support old naming convention
_get_connection = get_connection
_get_db = get_db
|
|
# Module 'os2emxpath' -- common operations on OS/2 pathnames
"""Common pathname manipulations, OS/2 EMX version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
import os
import stat
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '/'
altsep = '\\'
pathsep = ';'
defpath = '.;C:\\bin'
devnull = 'nul'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all altseps into seps."""
return s.replace('\\', '/').lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon / UNC resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(a, *p):
"""Join two or more pathname components, inserting sep as needed"""
path = a
for b in p:
if isabs(b):
path = b
elif path == '' or path[-1:] in '/\\:':
path = path + b
else:
path = path + '/' + b
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '/' * 2 or firstTwo == '\\' * 2:
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('/', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('/', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
"""Split the extension from a pathname.
Extension is everything from the last dot to the end.
Return (root, ext), either part may be empty."""
root, ext = '', ''
for c in p:
if c in ['/','\\']:
root, ext = root + ext + c, ''
elif c == '.':
if ext:
root, ext = root + ext, c
else:
ext = c
elif ext:
ext = ext + c
else:
root = root + c
return root, ext
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
s1 = min(m)
s2 = max(m)
n = min(len(s1), len(s2))
for i in xrange(n):
if s1[i] != s2[i]:
return s1[:i]
return s1[:n]
# Get size, mtime, atime of files.
def getsize(filename):
"""Return the size of a file, reported by os.stat()"""
return os.stat(filename).st_size
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()"""
return os.stat(filename).st_mtime
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()"""
return os.stat(filename).st_atime
def getctime(filename):
"""Return the creation time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link. On OS/2 always returns false"""
return False
# Does a path exist?
# This is false for dangling symbolic links.
def exists(path):
"""Test whether a path exists"""
try:
st = os.stat(path)
except os.error:
return False
return True
lexists = exists
# Is a path a directory?
def isdir(path):
"""Test whether a path is a directory"""
try:
st = os.stat(path)
except os.error:
return False
return stat.S_ISDIR(st.st_mode)
# Is a path a regular file?
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path.
def isfile(path):
"""Test whether a path is a regular file"""
try:
st = os.stat(path)
except os.error:
return False
return stat.S_ISREG(st.st_mode)
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk whth callback function.
walk(top, func, arg) calls func(arg, d, files) for each directory d
in the tree rooted at top (including top itself); files is a list
of all the files and subdirs in directory d."""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
exceptions = ('.', '..')
for name in names:
if name not in exceptions:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if i == 1:
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
else:
return path
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - no escape character, except for '$$' which is translated into '$'
# - ${varname} is accepted.
# - varnames can be made out of letters, digits and the character '_'
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of form $var and ${var}.
Unknown variables are left unchanged."""
if '$' not in path:
return path
import string
varchars = string.letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
except ValueError:
res = res + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if var in os.environ:
res = res + os.environ[var]
if c != '':
res = res + c
else:
res = res + c
index = index + 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
path = path.replace('\\', '/')
prefix, path = splitdrive(path)
while path[:1] == '/':
prefix = prefix + '/'
path = path[1:]
comps = path.split('/')
i = 0
while i < len(comps):
if comps[i] == '.':
del comps[i]
elif comps[i] == '..' and i > 0 and comps[i-1] not in ('', '..'):
del comps[i-1:i+1]
i = i - 1
elif comps[i] == '' and i > 0 and comps[i-1] != '':
del comps[i]
else:
i = i + 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append('.')
return prefix + '/'.join(comps)
# Return an absolute path.
def abspath(path):
"""Return the absolute version of a path"""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
supports_unicode_filenames = False
|
|
from __future__ import absolute_import
from collections import OrderedDict
from modelcluster.models import get_all_child_relations
from taggit.managers import _TaggableManager
from django.db import models
from django.utils.encoding import force_text
from django.shortcuts import get_object_or_404
from django.conf.urls import url
from django.conf import settings
from wagtail.wagtailcore.models import Page
from wagtail.wagtailimages.models import get_image_model
from wagtail.wagtaildocs.models import Document
from wagtail.wagtailcore.utils import resolve_model_string
from wagtail.wagtailsearch.backends import get_search_backend
from wagtail.utils.compat import get_related_model
from .utils import BadRequestError
class URLPath(object):
"""
This class represents a URL path that should be converted to a full URL.
It is used when the domain that should be used is not known at the time
the URL was generated. It will get resolved to a full URL during
serialisation in api.py.
One example use case is the documents endpoint adding download URLs into
the JSON. The endpoint does not know the domain name to use at the time so
returns one of these instead.
"""
def __init__(self, path):
self.path = path
class ObjectDetailURL(object):
def __init__(self, model, pk):
self.model = model
self.pk = pk
def get_api_data(obj, fields):
# Find any child relations (pages only)
child_relations = {}
if isinstance(obj, Page):
child_relations = {
child_relation.field.rel.related_name: get_related_model(child_relation)
for child_relation in get_all_child_relations(type(obj))
}
# Loop through fields
for field_name in fields:
# Check child relations
if field_name in child_relations and hasattr(child_relations[field_name], 'api_fields'):
yield field_name, [
dict(get_api_data(child_object, child_relations[field_name].api_fields))
for child_object in getattr(obj, field_name).all()
]
continue
# Check django fields
try:
field = obj._meta.get_field(field_name)
if field.rel and isinstance(field.rel, models.ManyToOneRel):
# Foreign key
val = field._get_val_from_obj(obj)
if val:
yield field_name, OrderedDict([
('id', field._get_val_from_obj(obj)),
('meta', OrderedDict([
('type', field.rel.to._meta.app_label + '.' + field.rel.to.__name__),
('detail_url', ObjectDetailURL(field.rel.to, val)),
])),
])
else:
yield field_name, None
else:
yield field_name, field._get_val_from_obj(obj)
continue
except models.fields.FieldDoesNotExist:
pass
# Check attributes
if hasattr(obj, field_name):
value = getattr(obj, field_name)
yield field_name, force_text(value, strings_only=True)
continue
class BaseAPIEndpoint(object):
known_query_parameters = frozenset([
'limit',
'offset',
'fields',
'order',
'search',
])
def listing_view(self, request):
return NotImplemented
def detail_view(self, request, pk):
return NotImplemented
def get_api_fields(self, model):
"""
This returns a list of field names that are allowed to
be used in the API (excluding the id field).
"""
api_fields = []
if hasattr(model, 'api_fields'):
api_fields.extend(model.api_fields)
return api_fields
def serialize_object_metadata(self, request, obj, show_details=False):
"""
This returns a JSON-serialisable dict to use for the "meta"
section of a particlular object.
"""
data = OrderedDict()
# Add type
data['type'] = type(obj)._meta.app_label + '.' + type(obj).__name__
data['detail_url'] = ObjectDetailURL(type(obj), obj.pk)
return data
def serialize_object(self, request, obj, fields=frozenset(), extra_data=(), all_fields=False, show_details=False):
"""
This converts an object into JSON-serialisable dict so it can
be used in the API.
"""
data = [
('id', obj.id),
]
# Add meta
metadata = self.serialize_object_metadata(request, obj, show_details=show_details)
if metadata:
data.append(('meta', metadata))
# Add extra data
data.extend(extra_data)
# Add other fields
api_fields = self.get_api_fields(type(obj))
api_fields = list(OrderedDict.fromkeys(api_fields)) # Removes any duplicates in case the user put "title" in api_fields
if all_fields:
fields = api_fields
else:
unknown_fields = fields - set(api_fields)
if unknown_fields:
raise BadRequestError("unknown fields: %s" % ', '.join(sorted(unknown_fields)))
# Reorder fields so it matches the order of api_fields
fields = [field for field in api_fields if field in fields]
data.extend(get_api_data(obj, fields))
return OrderedDict(data)
def check_query_parameters(self, request, queryset):
query_parameters = set(request.GET.keys())
# All query paramters must be either a field or an operation
allowed_query_parameters = set(self.get_api_fields(queryset.model)).union(self.known_query_parameters).union({'id'})
unknown_parameters = query_parameters - allowed_query_parameters
if unknown_parameters:
raise BadRequestError("query parameter is not an operation or a recognised field: %s" % ', '.join(sorted(unknown_parameters)))
def do_field_filtering(self, request, queryset):
"""
This performs field level filtering on the result set
Eg: ?title=James Joyce
"""
fields = set(self.get_api_fields(queryset.model)).union({'id'})
for field_name, value in request.GET.items():
if field_name in fields:
field = getattr(queryset.model, field_name, None)
if isinstance(field, _TaggableManager):
for tag in value.split(','):
queryset = queryset.filter(**{field_name + '__name': tag})
# Stick a message on the queryset to indicate that tag filtering has been performed
# This will let the do_search method know that it must raise an error as searching
# and tag filtering at the same time is not supported
queryset._filtered_by_tag = True
else:
queryset = queryset.filter(**{field_name: value})
return queryset
def do_ordering(self, request, queryset):
"""
This applies ordering to the result set
Eg: ?order=title
It also supports reverse ordering
Eg: ?order=-title
And random ordering
Eg: ?order=random
"""
if 'order' in request.GET:
# Prevent ordering while searching
if 'search' in request.GET:
raise BadRequestError("ordering with a search query is not supported")
order_by = request.GET['order']
# Random ordering
if order_by == 'random':
# Prevent ordering by random with offset
if 'offset' in request.GET:
raise BadRequestError("random ordering with offset is not supported")
return queryset.order_by('?')
# Check if reverse ordering is set
if order_by.startswith('-'):
reverse_order = True
order_by = order_by[1:]
else:
reverse_order = False
# Add ordering
if order_by == 'id' or order_by in self.get_api_fields(queryset.model):
queryset = queryset.order_by(order_by)
else:
# Unknown field
raise BadRequestError("cannot order by '%s' (unknown field)" % order_by)
# Reverse order
if reverse_order:
queryset = queryset.reverse()
return queryset
def do_search(self, request, queryset):
"""
This performs a full-text search on the result set
Eg: ?search=James Joyce
"""
search_enabled = getattr(settings, 'WAGTAILAPI_SEARCH_ENABLED', True)
if 'search' in request.GET:
if not search_enabled:
raise BadRequestError("search is disabled")
# Searching and filtering by tag at the same time is not supported
if getattr(queryset, '_filtered_by_tag', False):
raise BadRequestError("filtering by tag with a search query is not supported")
search_query = request.GET['search']
sb = get_search_backend()
queryset = sb.search(search_query, queryset)
return queryset
def do_pagination(self, request, queryset):
"""
This performs limit/offset based pagination on the result set
Eg: ?limit=10&offset=20 -- Returns 10 items starting at item 20
"""
limit_max = getattr(settings, 'WAGTAILAPI_LIMIT_MAX', 20)
try:
offset = int(request.GET.get('offset', 0))
assert offset >= 0
except (ValueError, AssertionError):
raise BadRequestError("offset must be a positive integer")
try:
limit = int(request.GET.get('limit', min(20, limit_max)))
if limit > limit_max:
raise BadRequestError("limit cannot be higher than %d" % limit_max)
assert limit >= 0
except (ValueError, AssertionError):
raise BadRequestError("limit must be a positive integer")
start = offset
stop = offset + limit
return queryset[start:stop]
def get_urlpatterns(self):
"""
This returns a list of URL patterns for the endpoint
"""
return [
url(r'^$', self.listing_view, name='listing'),
url(r'^(\d+)/$', self.detail_view, name='detail'),
]
def has_model(self, model):
return False
class PagesAPIEndpoint(BaseAPIEndpoint):
known_query_parameters = BaseAPIEndpoint.known_query_parameters.union([
'type',
'child_of',
])
def get_queryset(self, request, model=Page):
# Get live pages that are not in a private section
queryset = model.objects.public().live()
# Filter by site
queryset = queryset.descendant_of(request.site.root_page, inclusive=True)
return queryset
def get_api_fields(self, model):
api_fields = ['title']
api_fields.extend(super(PagesAPIEndpoint, self).get_api_fields(model))
return api_fields
def serialize_object_metadata(self, request, page, show_details=False):
data = super(PagesAPIEndpoint, self).serialize_object_metadata(request, page, show_details=show_details)
# Add type
data['type'] = page.specific_class._meta.app_label + '.' + page.specific_class.__name__
return data
def serialize_object(self, request, page, fields=frozenset(), extra_data=(), all_fields=False, show_details=False):
# Add parent
if show_details:
parent = page.get_parent()
# Make sure the parent is visible in the API
if self.get_queryset(request).filter(id=parent.id).exists():
parent_class = parent.specific_class
extra_data += (
('parent', OrderedDict([
('id', parent.id),
('meta', OrderedDict([
('type', parent_class._meta.app_label + '.' + parent_class.__name__),
('detail_url', ObjectDetailURL(parent_class, parent.id)),
])),
])),
)
return super(PagesAPIEndpoint, self).serialize_object(request, page, fields=fields, extra_data=extra_data, all_fields=all_fields, show_details=show_details)
def get_model(self, request):
if 'type' not in request.GET:
return Page
model_name = request.GET['type']
try:
model = resolve_model_string(model_name)
if not issubclass(model, Page):
raise BadRequestError("type doesn't exist")
return model
except LookupError:
raise BadRequestError("type doesn't exist")
def do_child_of_filter(self, request, queryset):
if 'child_of' in request.GET:
try:
parent_page_id = int(request.GET['child_of'])
assert parent_page_id >= 0
except (ValueError, AssertionError):
raise BadRequestError("child_of must be a positive integer")
try:
parent_page = self.get_queryset(request).get(id=parent_page_id)
return queryset.child_of(parent_page)
except Page.DoesNotExist:
raise BadRequestError("parent page doesn't exist")
return queryset
def listing_view(self, request):
# Get model and queryset
model = self.get_model(request)
queryset = self.get_queryset(request, model=model)
# Check query paramters
self.check_query_parameters(request, queryset)
# Filtering
queryset = self.do_field_filtering(request, queryset)
queryset = self.do_child_of_filter(request, queryset)
# Ordering
queryset = self.do_ordering(request, queryset)
# Search
queryset = self.do_search(request, queryset)
# Pagination
total_count = queryset.count()
queryset = self.do_pagination(request, queryset)
# Get list of fields to show in results
if 'fields' in request.GET:
fields = set(request.GET['fields'].split(','))
else:
fields = {'title'}
return OrderedDict([
('meta', OrderedDict([
('total_count', total_count),
])),
('pages', [
self.serialize_object(request, page, fields=fields)
for page in queryset
]),
])
def detail_view(self, request, pk):
page = get_object_or_404(self.get_queryset(request), pk=pk).specific
return self.serialize_object(request, page, all_fields=True, show_details=True)
def has_model(self, model):
return issubclass(model, Page)
class ImagesAPIEndpoint(BaseAPIEndpoint):
model = get_image_model()
def get_queryset(self, request):
return self.model.objects.all().order_by('id')
def get_api_fields(self, model):
api_fields = ['title', 'tags', 'width', 'height']
api_fields.extend(super(ImagesAPIEndpoint, self).get_api_fields(model))
return api_fields
def listing_view(self, request):
queryset = self.get_queryset(request)
# Check query paramters
self.check_query_parameters(request, queryset)
# Filtering
queryset = self.do_field_filtering(request, queryset)
# Ordering
queryset = self.do_ordering(request, queryset)
# Search
queryset = self.do_search(request, queryset)
# Pagination
total_count = queryset.count()
queryset = self.do_pagination(request, queryset)
# Get list of fields to show in results
if 'fields' in request.GET:
fields = set(request.GET['fields'].split(','))
else:
fields = {'title'}
return OrderedDict([
('meta', OrderedDict([
('total_count', total_count),
])),
('images', [
self.serialize_object(request, image, fields=fields)
for image in queryset
]),
])
def detail_view(self, request, pk):
image = get_object_or_404(self.get_queryset(request), pk=pk)
return self.serialize_object(request, image, all_fields=True)
def has_model(self, model):
return model == self.model
class DocumentsAPIEndpoint(BaseAPIEndpoint):
def get_api_fields(self, model):
api_fields = ['title', 'tags']
api_fields.extend(super(DocumentsAPIEndpoint, self).get_api_fields(model))
return api_fields
def serialize_object_metadata(self, request, document, show_details=False):
data = super(DocumentsAPIEndpoint, self).serialize_object_metadata(request, document, show_details=show_details)
# Download URL
if show_details:
data['download_url'] = URLPath(document.url)
return data
def listing_view(self, request):
queryset = Document.objects.all().order_by('id')
# Check query paramters
self.check_query_parameters(request, queryset)
# Filtering
queryset = self.do_field_filtering(request, queryset)
# Ordering
queryset = self.do_ordering(request, queryset)
# Search
queryset = self.do_search(request, queryset)
# Pagination
total_count = queryset.count()
queryset = self.do_pagination(request, queryset)
# Get list of fields to show in results
if 'fields' in request.GET:
fields = set(request.GET['fields'].split(','))
else:
fields = {'title'}
return OrderedDict([
('meta', OrderedDict([
('total_count', total_count),
])),
('documents', [
self.serialize_object(request, document, fields=fields)
for document in queryset
]),
])
def detail_view(self, request, pk):
document = get_object_or_404(Document, pk=pk)
return self.serialize_object(request, document, all_fields=True, show_details=True)
def has_model(self, model):
return model == Document
|
|
"""
Return/control aspects of the grains data
Grains set or altered with this module are stored in the 'grains'
file on the minions. By default, this file is located at: ``/etc/salt/grains``
.. Note::
This does **NOT** override any grains set in the minion config file.
"""
import collections
import logging
import math
import operator
import os
from collections.abc import Mapping
from functools import reduce # pylint: disable=redefined-builtin
import salt.utils.compat
import salt.utils.data
import salt.utils.files
import salt.utils.json
import salt.utils.platform
import salt.utils.yaml
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.exceptions import SaltException
__proxyenabled__ = ["*"]
# Seed the grains dict so cython will build
__grains__ = {}
# Change the default outputter to make it more readable
__outputter__ = {
"items": "nested",
"item": "nested",
"setval": "nested",
}
# http://stackoverflow.com/a/12414913/127816
_infinitedict = lambda: collections.defaultdict(_infinitedict)
_non_existent_key = "NonExistentValueMagicNumberSpK3hnufdHfeBUXCfqVK"
log = logging.getLogger(__name__)
def _serial_sanitizer(instr):
"""Replaces the last 1/4 of a string with X's"""
length = len(instr)
index = int(math.floor(length * 0.75))
return "{}{}".format(instr[:index], "X" * (length - index))
_FQDN_SANITIZER = lambda x: "MINION.DOMAINNAME"
_HOSTNAME_SANITIZER = lambda x: "MINION"
_DOMAINNAME_SANITIZER = lambda x: "DOMAINNAME"
# A dictionary of grain -> function mappings for sanitizing grain output. This
# is used when the 'sanitize' flag is given.
_SANITIZERS = {
"serialnumber": _serial_sanitizer,
"domain": _DOMAINNAME_SANITIZER,
"fqdn": _FQDN_SANITIZER,
"id": _FQDN_SANITIZER,
"host": _HOSTNAME_SANITIZER,
"localhost": _HOSTNAME_SANITIZER,
"nodename": _HOSTNAME_SANITIZER,
}
def get(key, default="", delimiter=DEFAULT_TARGET_DELIM, ordered=True):
"""
Attempt to retrieve the named value from grains, if the named value is not
available return the passed default. The default return is an empty string.
The value can also represent a value in a nested dict using a ":" delimiter
for the dict. This means that if a dict in grains looks like this::
{'pkg': {'apache': 'httpd'}}
To retrieve the value associated with the apache key in the pkg dict this
key can be passed::
pkg:apache
:param delimiter:
Specify an alternate delimiter to use when traversing a nested dict.
This is useful for when the desired key contains a colon. See CLI
example below for usage.
.. versionadded:: 2014.7.0
:param ordered:
Outputs an ordered dict if applicable (default: True)
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt '*' grains.get pkg:apache
salt '*' grains.get abc::def|ghi delimiter='|'
"""
if ordered is True:
grains = __grains__
else:
grains = salt.utils.json.loads(salt.utils.json.dumps(__grains__))
return salt.utils.data.traverse_dict_and_list(grains, key, default, delimiter)
def has_value(key):
"""
Determine whether a key exists in the grains dictionary.
Given a grains dictionary that contains the following structure::
{'pkg': {'apache': 'httpd'}}
One would determine if the apache key in the pkg dict exists by::
pkg:apache
CLI Example:
.. code-block:: bash
salt '*' grains.has_value pkg:apache
"""
return (
salt.utils.data.traverse_dict_and_list(__grains__, key, KeyError)
is not KeyError
)
def items(sanitize=False):
"""
Return all of the minion's grains
CLI Example:
.. code-block:: bash
salt '*' grains.items
Sanitized CLI Example:
.. code-block:: bash
salt '*' grains.items sanitize=True
"""
if salt.utils.data.is_true(sanitize):
out = dict(__grains__)
for key, func in _SANITIZERS.items():
if key in out:
out[key] = func(out[key])
return out
else:
return dict(__grains__)
def item(*args, **kwargs):
"""
Return one or more grains
CLI Example:
.. code-block:: bash
salt '*' grains.item os
salt '*' grains.item os osrelease oscodename
Sanitized CLI Example:
.. code-block:: bash
salt '*' grains.item host sanitize=True
"""
ret = {}
default = kwargs.get("default", "")
delimiter = kwargs.get("delimiter", DEFAULT_TARGET_DELIM)
try:
for arg in args:
ret[arg] = salt.utils.data.traverse_dict_and_list(
__grains__, arg, default, delimiter
)
except KeyError:
pass
if salt.utils.data.is_true(kwargs.get("sanitize")):
for arg, func in _SANITIZERS.items():
if arg in ret:
ret[arg] = func(ret[arg])
return ret
def setvals(grains, destructive=False, refresh_pillar=True):
"""
Set new grains values in the grains config file
destructive
If an operation results in a key being removed, delete the key, too.
Defaults to False.
refresh_pillar
Whether pillar will be refreshed.
Defaults to True.
CLI Example:
.. code-block:: bash
salt '*' grains.setvals "{'key1': 'val1', 'key2': 'val2'}"
"""
new_grains = grains
if not isinstance(new_grains, Mapping):
raise SaltException("setvals grains must be a dictionary.")
grains = {}
if os.path.isfile(__opts__["conf_file"]):
if salt.utils.platform.is_proxy():
gfn = os.path.join(
os.path.dirname(__opts__["conf_file"]),
"proxy.d",
__opts__["id"],
"grains",
)
else:
gfn = os.path.join(os.path.dirname(__opts__["conf_file"]), "grains")
elif os.path.isdir(__opts__["conf_file"]):
if salt.utils.platform.is_proxy():
gfn = os.path.join(
__opts__["conf_file"], "proxy.d", __opts__["id"], "grains"
)
else:
gfn = os.path.join(__opts__["conf_file"], "grains")
else:
if salt.utils.platform.is_proxy():
gfn = os.path.join(
os.path.dirname(__opts__["conf_file"]),
"proxy.d",
__opts__["id"],
"grains",
)
else:
gfn = os.path.join(os.path.dirname(__opts__["conf_file"]), "grains")
if os.path.isfile(gfn):
with salt.utils.files.fopen(gfn, "rb") as fp_:
try:
grains = salt.utils.yaml.safe_load(fp_)
except salt.utils.yaml.YAMLError as exc:
return "Unable to read existing grains file: {}".format(exc)
if not isinstance(grains, dict):
grains = {}
for key, val in new_grains.items():
if val is None and destructive is True:
if key in grains:
del grains[key]
if key in __grains__:
del __grains__[key]
else:
grains[key] = val
__grains__[key] = val
try:
with salt.utils.files.fopen(gfn, "w+", encoding="utf-8") as fp_:
salt.utils.yaml.safe_dump(grains, fp_, default_flow_style=False)
except OSError:
log.error("Unable to write to grains file at %s. Check permissions.", gfn)
fn_ = os.path.join(__opts__["cachedir"], "module_refresh")
try:
with salt.utils.files.flopen(fn_, "w+"):
pass
except OSError:
log.error("Unable to write to cache file %s. Check permissions.", fn_)
if not __opts__.get("local", False):
# Refresh the grains
__salt__["saltutil.refresh_grains"](refresh_pillar=refresh_pillar)
# Return the grains we just set to confirm everything was OK
return new_grains
def setval(key, val, destructive=False, refresh_pillar=True):
"""
Set a grains value in the grains config file
key
The grain key to be set.
val
The value to set the grain key to.
destructive
If an operation results in a key being removed, delete the key, too.
Defaults to False.
refresh_pillar
Whether pillar will be refreshed.
Defaults to True.
CLI Example:
.. code-block:: bash
salt '*' grains.setval key val
salt '*' grains.setval key "{'sub-key': 'val', 'sub-key2': 'val2'}"
"""
return setvals({key: val}, destructive, refresh_pillar=refresh_pillar)
def append(key, val, convert=False, delimiter=DEFAULT_TARGET_DELIM):
"""
.. versionadded:: 0.17.0
Append a value to a list in the grains config file. If the grain doesn't
exist, the grain key is added and the value is appended to the new grain
as a list item.
key
The grain key to be appended to
val
The value to append to the grain key
convert
If convert is True, convert non-list contents into a list.
If convert is False and the grain contains non-list contents, an error
is given. Defaults to False.
delimiter
The key can be a nested dict key. Use this parameter to
specify the delimiter you use, instead of the default ``:``.
You can now append values to a list in nested dictionary grains. If the
list doesn't exist at this level, it will be created.
.. versionadded:: 2014.7.6
CLI Example:
.. code-block:: bash
salt '*' grains.append key val
"""
grains = get(key, [], delimiter)
if convert:
if not isinstance(grains, list):
grains = [] if grains is None else [grains]
if not isinstance(grains, list):
return "The key {} is not a valid list".format(key)
if val in grains:
return "The val {} was already in the list {}".format(val, key)
if isinstance(val, list):
for item in val:
grains.append(item)
else:
grains.append(val)
while delimiter in key:
key, rest = key.rsplit(delimiter, 1)
_grain = get(key, _infinitedict(), delimiter)
if isinstance(_grain, dict):
_grain.update({rest: grains})
grains = _grain
return setval(key, grains)
def remove(key, val, delimiter=DEFAULT_TARGET_DELIM):
"""
.. versionadded:: 0.17.0
Remove a value from a list in the grains config file
key
The grain key to remove.
val
The value to remove.
delimiter
The key can be a nested dict key. Use this parameter to
specify the delimiter you use, instead of the default ``:``.
You can now append values to a list in nested dictionary grains. If the
list doesn't exist at this level, it will be created.
.. versionadded:: 2015.8.2
CLI Example:
.. code-block:: bash
salt '*' grains.remove key val
"""
grains = get(key, [], delimiter)
if not isinstance(grains, list):
return "The key {} is not a valid list".format(key)
if val not in grains:
return "The val {} was not in the list {}".format(val, key)
grains.remove(val)
while delimiter in key:
key, rest = key.rsplit(delimiter, 1)
_grain = get(key, None, delimiter)
if isinstance(_grain, dict):
_grain.update({rest: grains})
grains = _grain
return setval(key, grains)
def delkey(key, force=False):
"""
.. versionadded:: 2017.7.0
Remove a grain completely from the grain system, this will remove the
grain key and value
key
The grain key from which to delete the value.
force
Force remove the grain even when it is a mapped value.
Defaults to False
CLI Example:
.. code-block:: bash
salt '*' grains.delkey key
"""
return delval(key, destructive=True, force=force)
def delval(key, destructive=False, force=False):
"""
.. versionadded:: 0.17.0
Delete a grain value from the grains config file. This will just set the
grain value to ``None``. To completely remove the grain, run ``grains.delkey``
or pass ``destructive=True`` to ``grains.delval``.
key
The grain key from which to delete the value.
destructive
Delete the key, too. Defaults to False.
force
Force remove the grain even when it is a mapped value.
Defaults to False
CLI Example:
.. code-block:: bash
salt '*' grains.delval key
"""
return set(key, None, destructive=destructive, force=force)
def ls(): # pylint: disable=C0103
"""
Return a list of all available grains
CLI Example:
.. code-block:: bash
salt '*' grains.ls
"""
return sorted(__grains__)
def filter_by(lookup_dict, grain="os_family", merge=None, default="default", base=None):
"""
.. versionadded:: 0.17.0
Look up the given grain in a given dictionary for the current OS and return
the result
Although this may occasionally be useful at the CLI, the primary intent of
this function is for use in Jinja to make short work of creating lookup
tables for OS-specific data. For example:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
'Debian': {'pkg': 'apache2', 'srv': 'apache2'},
'RedHat': {'pkg': 'httpd', 'srv': 'httpd'},
}, default='Debian') %}
myapache:
pkg.installed:
- name: {{ apache.pkg }}
service.running:
- name: {{ apache.srv }}
Values in the lookup table may be overridden by values in Pillar. An
example Pillar to override values in the example above could be as follows:
.. code-block:: yaml
apache:
lookup:
pkg: apache_13
srv: apache
The call to ``filter_by()`` would be modified as follows to reference those
Pillar values:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
...
}, merge=salt['pillar.get']('apache:lookup')) %}
:param lookup_dict: A dictionary, keyed by a grain, containing a value or
values relevant to systems matching that grain. For example, a key
could be the grain for an OS and the value could the name of a package
on that particular OS.
.. versionchanged:: 2016.11.0
The dictionary key could be a globbing pattern. The function will
return the corresponding ``lookup_dict`` value where grain value
matches the pattern. For example:
.. code-block:: bash
# this will render 'got some salt' if Minion ID begins from 'salt'
salt '*' grains.filter_by '{salt*: got some salt, default: salt is not here}' id
:param grain: The name of a grain to match with the current system's
grains. For example, the value of the "os_family" grain for the current
system could be used to pull values from the ``lookup_dict``
dictionary.
.. versionchanged:: 2016.11.0
The grain value could be a list. The function will return the
``lookup_dict`` value for a first found item in the list matching
one of the ``lookup_dict`` keys.
:param merge: A dictionary to merge with the results of the grain selection
from ``lookup_dict``. This allows Pillar to override the values in the
``lookup_dict``. This could be useful, for example, to override the
values for non-standard package names such as when using a different
Python version from the default Python version provided by the OS
(e.g., ``python26-mysql`` instead of ``python-mysql``).
:param default: default lookup_dict's key used if the grain does not exists
or if the grain value has no match on lookup_dict. If unspecified
the value is "default".
.. versionadded:: 2014.1.0
:param base: A lookup_dict key to use for a base dictionary. The
grain-selected ``lookup_dict`` is merged over this and then finally
the ``merge`` dictionary is merged. This allows common values for
each case to be collected in the base and overridden by the grain
selection dictionary and the merge dictionary. Default is unset.
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' grains.filter_by '{Debian: Debheads rule, RedHat: I love my hat}'
# this one will render {D: {E: I, G: H}, J: K}
salt '*' grains.filter_by '{A: B, C: {D: {E: F, G: H}}}' 'xxx' '{D: {E: I}, J: K}' 'C'
# next one renders {A: {B: G}, D: J}
salt '*' grains.filter_by '{default: {A: {B: C}, D: E}, F: {A: {B: G}}, H: {D: I}}' 'xxx' '{D: J}' 'F' 'default'
# next same as above when default='H' instead of 'F' renders {A: {B: C}, D: J}
"""
return salt.utils.data.filter_by(
lookup_dict=lookup_dict,
lookup=grain,
traverse=__grains__,
merge=merge,
default=default,
base=base,
)
def _dict_from_path(path, val, delimiter=DEFAULT_TARGET_DELIM):
"""
Given a lookup string in the form of 'foo:bar:baz" return a nested
dictionary of the appropriate depth with the final segment as a value.
>>> _dict_from_path('foo:bar:baz', 'somevalue')
{"foo": {"bar": {"baz": "somevalue"}}
"""
nested_dict = _infinitedict()
keys = path.rsplit(delimiter)
lastplace = reduce(operator.getitem, keys[:-1], nested_dict)
lastplace[keys[-1]] = val
return nested_dict
def set(key, val="", force=False, destructive=False, delimiter=DEFAULT_TARGET_DELIM):
"""
Set a key to an arbitrary value. It is used like setval but works
with nested keys.
This function is conservative. It will only overwrite an entry if
its value and the given one are not a list or a dict. The ``force``
parameter is used to allow overwriting in all cases.
.. versionadded:: 2015.8.0
:param force: Force writing over existing entry if given or existing
values are list or dict. Defaults to False.
:param destructive: If an operation results in a key being removed,
delete the key, too. Defaults to False.
:param delimiter:
Specify an alternate delimiter to use when traversing a nested dict,
the default being ``:``
CLI Example:
.. code-block:: bash
salt '*' grains.set 'apps:myApp:port' 2209
salt '*' grains.set 'apps:myApp' '{port: 2209}'
"""
ret = {"comment": "", "changes": {}, "result": True}
# Get val type
_new_value_type = "simple"
if isinstance(val, dict):
_new_value_type = "complex"
elif isinstance(val, list):
_new_value_type = "complex"
_non_existent = object()
_existing_value = get(key, _non_existent, delimiter)
_value = _existing_value
_existing_value_type = "simple"
if _existing_value is _non_existent:
_existing_value_type = None
elif isinstance(_existing_value, dict):
_existing_value_type = "complex"
elif isinstance(_existing_value, list):
_existing_value_type = "complex"
if (
_existing_value_type is not None
and _existing_value == val
and (val is not None or destructive is not True)
):
ret["comment"] = "Grain is already set"
return ret
if _existing_value is not None and not force:
if _existing_value_type == "complex":
ret["comment"] = (
"The key '{}' exists but is a dict or a list. "
"Use 'force=True' to overwrite.".format(key)
)
ret["result"] = False
return ret
elif _new_value_type == "complex" and _existing_value_type is not None:
ret["comment"] = (
"The key '{}' exists and the given value is a dict or a "
"list. Use 'force=True' to overwrite.".format(key)
)
ret["result"] = False
return ret
else:
_value = val
else:
_value = val
# Process nested grains
while delimiter in key:
key, rest = key.rsplit(delimiter, 1)
_existing_value = get(key, {}, delimiter)
if isinstance(_existing_value, dict):
if _value is None and destructive:
if rest in _existing_value.keys():
_existing_value.pop(rest)
else:
_existing_value.update({rest: _value})
elif isinstance(_existing_value, list):
_list_updated = False
for _index, _item in enumerate(_existing_value):
if _item == rest:
_existing_value[_index] = {rest: _value}
_list_updated = True
elif isinstance(_item, dict) and rest in _item:
_item.update({rest: _value})
_list_updated = True
if not _list_updated:
_existing_value.append({rest: _value})
elif _existing_value == rest or force:
_existing_value = {rest: _value}
else:
ret["comment"] = (
"The key '{}' value is '{}', which is different from "
"the provided key '{}'. Use 'force=True' to overwrite.".format(
key, _existing_value, rest
)
)
ret["result"] = False
return ret
_value = _existing_value
_setval_ret = setval(key, _value, destructive=destructive)
if isinstance(_setval_ret, dict):
ret["changes"] = _setval_ret
else:
ret["comment"] = _setval_ret
ret["result"] = False
return ret
def equals(key, value):
"""
Used to make sure the minion's grain key/value matches.
Returns ``True`` if matches otherwise ``False``.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt '*' grains.equals fqdn <expected_fqdn>
salt '*' grains.equals systemd:version 219
"""
return str(value) == str(get(key))
# Provide a jinja function call compatible get aliased as fetch
fetch = get
|
|
"""
Static site generator.
Copyright (c) 2009 Liam Cooke
Licensed under the terms of the MIT license.
"""
import codecs
import locale
import os
import re
import shutil
import sys
import time
from collections import defaultdict
from datetime import datetime
from itertools import izip
from os import path
import dateutil.parser
import PyRSS2Gen as rss2
import yaml
from mako.exceptions import MakoException
from mako.lookup import TemplateLookup
from pilcrow import pages, util
DEFAULT_CONFIG_FILE = 'site.yml'
DEFAULT_CONFIG = {
'domain': 'http://localhost/',
'root': '/',
'clean_urls': False,
'content_extensions': ('text', 'markdown', 'mkdn', 'md'),
'dirs': {
'content': 'content',
'files': 'files',
'templates': 'templates',
'deploy': 'deploy',
},
'feed': 'feed.rss',
'files_exclude': r'^[\._]|~$',
'files_include': r'^\.htaccess$',
'files_rename': {
'.less': '.css',
},
'lang': 'en',
}
class PageDatabase:
tags_by_count = lambda self: sorted(self.tags.values(), key=pages.Tag.sortkey_count)
tags_by_name = lambda self: sorted(self.tags.values(), key=pages.Tag.sortkey_tag)
def __init__(self, site):
self._site = site
self.pages, self.tags = {}, {}
tdir = self._site['dirs']['templates']
self.lookup = TemplateLookup(directories=[tdir], input_encoding='utf-8')
def __getitem__(self, id):
return self.pages[id]
def __iter__(self):
return iter(self.pages.values())
def add(self, page):
if page.id in self.pages:
util.die('duplicate page id: %s' % page.id)
self.pages[page.id] = page
if type(page) is pages.Tag:
self.tags[page.name] = page
elif 'tags' in page:
page_tags = {}
for tag_name in page.get('tags', []):
if tag_name in self.tags:
tag = self.tags[tag_name]
else:
tag = pages.Tag(self._site, tag_name)
self.add(tag)
tag.add(page)
page_tags[tag_name] = tag
page['tags'] = page_tags
def select(self, limit=None, dated=True, tag=None, chrono=False, sortby_origin=None):
if sortby_origin is None:
sortby_origin = bool(chrono)
sortkey = sortby_origin and pages.Page.sortkey_origin or pages.Page.sortkey_posted
results = sorted(self.pages.values(), key=sortkey, reverse=not chrono)
if dated:
results = [page for page in results if page.date]
if tag:
results = [page for page in results if tag in page.tags]
return tuple(results)[:limit]
def render(self):
for page in self:
t = page.template or self._site['default_template']
template = self.lookup.get_template('%s.html' % t)
print('%14s : /%s' % (t, page.id))
vars = dict(self._site, **page)
if vars['title']:
vars['head_title'] = vars['title_format'] % vars
#try:
if True:
html = template.render_unicode(**vars).strip()
fname = path.join(self._site['dirs']['deploy'], page.id) + '.html'
with open(fname, 'w') as f:
f.write(html.encode('utf-8'))
#except NameError:
# util.die('template error: undefined variable in', template.filename)
class Pilcrow(dict):
FILES_ACTIONS = {
'.less': lambda s, d: util.run_or_die('lessc %s %s' % (s, d)),
}
def __init__(self, site_path, config_file=DEFAULT_CONFIG_FILE):
try: os.chdir(site_path)
except OSError: util.die('invalid path:', site_path)
if not path.exists(config_file):
util.die('%s not found' % config_file)
dict.__init__(self, DEFAULT_CONFIG)
self.update(locale.localeconv())
with open(config_file) as f:
for k, v in yaml.load(f).items():
k = util.norm_key(k)
if type(v) is dict:
self[k] = dict(self.get(k, {}), **v)
else:
self[k] = v
def join_url(self, *parts, **kwargs):
ext = (kwargs.get('ext', 1) and not self['clean_urls']) and '.html' or ''
url = re.sub('//+', '/', '/'.join(str(s) for s in parts if s))
if ext and url.endswith(ext):
url = url[:-len(ext)]
return url + ext
def build(self, clean=False):
base_path = path.realpath(os.curdir)
deploy_path = path.realpath(self['dirs']['deploy'])
if clean:
shutil.rmtree(deploy_path, ignore_errors=True)
util.mkdir(deploy_path)
os.chdir(self['dirs']['files'])
excludes, includes = re.compile(self['files_exclude']), re.compile(self['files_include'])
for root, _, files in os.walk(os.curdir):
util.mkdir(path.normpath(path.join(deploy_path, root)))
for fname in files:
if excludes.match(fname) and not includes.match(fname):
continue
src, dest = path.join(root, fname), path.join(deploy_path, root, fname)
ext = path.splitext(fname)[1]
if ext in self['files_rename']:
dest = path.splitext(dest)[0] + self['files_rename'][ext]
if path.isfile(dest) and path.getmtime(src) <= path.getmtime(dest):
continue
self.FILES_ACTIONS.get(ext, shutil.copy2)(src, dest)
print('{0} => {1}'.format(path.relpath(src, base_path), path.relpath(dest, base_path)))
os.chdir(base_path)
db, years = PageDatabase(self), defaultdict(list)
for root, _, files in os.walk(self['dirs']['content']):
exts = ['.%s' % ext for ext in self['content_extensions']]
for file in [f for f in files if path.splitext(f)[1] in exts]:
with codecs.open(path.join(root, file), 'r', encoding='utf-8') as fp:
page = pages.Content(self, fp)
db.add(page)
if page.date:
years[page.date.year].append(page)
for year, posts in sorted(years.items()):
posts = sorted(posts, key=pages.Page.sortkey_origin)
db.add(pages.Year(self, posts, year))
for prevpost, post, nextpost in util.neighbours(posts):
post['prevpost'], post['nextpost'] = prevpost, nextpost
dirs = filter(bool, [os.path.dirname(p.id) for p in db])
for d in sorted(set(dirs)):
util.mkdir(os.path.join(deploy_path, d))
self.update({
'get': lambda id: db[str(id)],
'pages': db.select,
'domain': self['domain'].rstrip('/'),
'root': '/' + self.get('root', '').lstrip('/'),
'head_title': self.get('site_title', ''),
'site_tags': db.tags,
'join_url': self.join_url,
'tags_by_count': db.tags_by_count,
'tags_by_name': db.tags_by_name,
'years': sorted(years.keys()),
'default_template': self.get('default_template', 'page'),
})
self['home'] = self['domain'] + self['root']
db.render()
#try: db.render()
#except MakoException as e: util.die('template error:', e)
if self['feed']:
feed_posts = db.select(10)
feed_date = feed_posts[0].posted or feed_posts[0].date
feed = rss2.RSS2(items=[p.feed_item() for p in feed_posts],
title=self['site_title'], description=self.get('description', ''),
link=self['domain'] + self['root'], generator='Pilcrow',
language=self['lang'], lastBuildDate=feed_date)
with open(path.join(deploy_path, self['feed']), 'w') as f:
feed.write_xml(f, 'utf-8')
|
|
from tqdm import tqdm
class Core(object):
"""
Implements the functions to run a generic algorithm.
"""
def __init__(self, agent, mdp, callbacks_fit=None, callback_step=None,
preprocessors=None):
"""
Constructor.
Args:
agent (Agent): the agent moving according to a policy;
mdp (Environment): the environment in which the agent moves;
callbacks_fit (list): list of callbacks to execute at the end of
each fit;
callback_step (Callback): callback to execute after each step;
preprocessors (list): list of state preprocessors to be
applied to state variables before feeding them to the
agent.
"""
self.agent = agent
self.mdp = mdp
self.callbacks_fit = callbacks_fit if callbacks_fit is not None else list()
self.callback_step = callback_step if callback_step is not None else lambda x: None
self._preprocessors = preprocessors if preprocessors is not None else list()
self._state = None
self._total_episodes_counter = 0
self._total_steps_counter = 0
self._current_episodes_counter = 0
self._current_steps_counter = 0
self._episode_steps = None
self._n_episodes = None
self._n_steps_per_fit = None
self._n_episodes_per_fit = None
def learn(self, n_steps=None, n_episodes=None, n_steps_per_fit=None,
n_episodes_per_fit=None, render=False, quiet=False):
"""
This function moves the agent in the environment and fits the policy
using the collected samples. The agent can be moved for a given number
of steps or a given number of episodes and, independently from this
choice, the policy can be fitted after a given number of steps or a
given number of episodes. By default, the environment is reset.
Args:
n_steps (int, None): number of steps to move the agent;
n_episodes (int, None): number of episodes to move the agent;
n_steps_per_fit (int, None): number of steps between each fit of the
policy;
n_episodes_per_fit (int, None): number of episodes between each fit
of the policy;
render (bool, False): whether to render the environment or not;
quiet (bool, False): whether to show the progress bar or not.
"""
assert (n_episodes_per_fit is not None and n_steps_per_fit is None)\
or (n_episodes_per_fit is None and n_steps_per_fit is not None)
self._n_steps_per_fit = n_steps_per_fit
self._n_episodes_per_fit = n_episodes_per_fit
if n_steps_per_fit is not None:
fit_condition =\
lambda: self._current_steps_counter >= self._n_steps_per_fit
else:
fit_condition = lambda: self._current_episodes_counter\
>= self._n_episodes_per_fit
self._run(n_steps, n_episodes, fit_condition, render, quiet)
def evaluate(self, initial_states=None, n_steps=None, n_episodes=None,
render=False, quiet=False):
"""
This function moves the agent in the environment using its policy.
The agent is moved for a provided number of steps, episodes, or from
a set of initial states for the whole episode. By default, the
environment is reset.
Args:
initial_states (np.ndarray, None): the starting states of each
episode;
n_steps (int, None): number of steps to move the agent;
n_episodes (int, None): number of episodes to move the agent;
render (bool, False): whether to render the environment or not;
quiet (bool, False): whether to show the progress bar or not.
"""
fit_condition = lambda: False
return self._run(n_steps, n_episodes, fit_condition, render, quiet,
initial_states)
def _run(self, n_steps, n_episodes, fit_condition, render, quiet,
initial_states=None):
assert n_episodes is not None and n_steps is None and initial_states is None\
or n_episodes is None and n_steps is not None and initial_states is None\
or n_episodes is None and n_steps is None and initial_states is not None
self._n_episodes = len(
initial_states) if initial_states is not None else n_episodes
if n_steps is not None:
move_condition =\
lambda: self._total_steps_counter < n_steps
steps_progress_bar = tqdm(total=n_steps,
dynamic_ncols=True, disable=quiet,
leave=False)
episodes_progress_bar = tqdm(disable=True)
else:
move_condition =\
lambda: self._total_episodes_counter < self._n_episodes
steps_progress_bar = tqdm(disable=True)
episodes_progress_bar = tqdm(total=self._n_episodes,
dynamic_ncols=True, disable=quiet,
leave=False)
return self._run_impl(move_condition, fit_condition, steps_progress_bar,
episodes_progress_bar, render, initial_states)
def _run_impl(self, move_condition, fit_condition, steps_progress_bar,
episodes_progress_bar, render, initial_states):
self._total_episodes_counter = 0
self._total_steps_counter = 0
self._current_episodes_counter = 0
self._current_steps_counter = 0
dataset = list()
last = True
while move_condition():
if last:
self.reset(initial_states)
sample = self._step(render)
self.callback_step([sample])
self._total_steps_counter += 1
self._current_steps_counter += 1
steps_progress_bar.update(1)
if sample[-1]:
self._total_episodes_counter += 1
self._current_episodes_counter += 1
episodes_progress_bar.update(1)
dataset.append(sample)
if fit_condition():
self.agent.fit(dataset)
self._current_episodes_counter = 0
self._current_steps_counter = 0
for c in self.callbacks_fit:
c(dataset)
dataset = list()
last = sample[-1]
self.agent.stop()
self.mdp.stop()
steps_progress_bar.close()
episodes_progress_bar.close()
return dataset
def _step(self, render):
"""
Single step.
Args:
render (bool): whether to render or not.
Returns:
A tuple containing the previous state, the action sampled by the
agent, the reward obtained, the reached state, the absorbing flag
of the reached state and the last step flag.
"""
action = self.agent.draw_action(self._state)
next_state, reward, absorbing, _ = self.mdp.step(action)
self._episode_steps += 1
if render:
self.mdp.render()
last = not(
self._episode_steps < self.mdp.info.horizon and not absorbing)
state = self._state
next_state = self._preprocess(next_state.copy())
self._state = next_state
return state, action, reward, next_state, absorbing, last
def reset(self, initial_states=None):
"""
Reset the state of the agent.
"""
if initial_states is None\
or self._total_episodes_counter == self._n_episodes:
initial_state = None
else:
initial_state = initial_states[self._total_episodes_counter]
self._state = self._preprocess(self.mdp.reset(initial_state).copy())
self.agent.episode_start()
self.agent.next_action = None
self._episode_steps = 0
def _preprocess(self, state):
"""
Method to apply state preprocessors.
Args:
state (np.ndarray): the state to be preprocessed.
Returns:
The preprocessed state.
"""
for p in self._preprocessors:
state = p(state)
return state
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest import test
class FlavorsAdminV3Test(base.BaseV3ComputeAdminTest):
"""
Tests Flavors API Create and Delete that require admin privileges
"""
@classmethod
def setUpClass(cls):
super(FlavorsAdminV3Test, cls).setUpClass()
cls.client = cls.flavors_admin_client
cls.user_client = cls.flavors_client
cls.flavor_name_prefix = 'test_flavor_'
cls.ram = 512
cls.vcpus = 1
cls.disk = 10
cls.ephemeral = 10
cls.swap = 1024
cls.rxtx = 2
def flavor_clean_up(self, flavor_id):
resp, body = self.client.delete_flavor(flavor_id)
self.assertEqual(resp.status, 204)
self.client.wait_for_resource_deletion(flavor_id)
def _create_flavor(self, flavor_id):
# Create a flavor and ensure it is listed
# This operation requires the user to have 'admin' role
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
# Create the flavor
resp, flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
flavor_id,
ephemeral=self.ephemeral,
swap=self.swap,
rxtx=self.rxtx)
self.addCleanup(self.flavor_clean_up, flavor['id'])
self.assertEqual(201, resp.status)
self.assertEqual(flavor['name'], flavor_name)
self.assertEqual(flavor['vcpus'], self.vcpus)
self.assertEqual(flavor['disk'], self.disk)
self.assertEqual(flavor['ram'], self.ram)
self.assertEqual(flavor['swap'], self.swap)
if test.is_extension_enabled("os-flavor-rxtx", "compute_v3"):
self.assertEqual(flavor['os-flavor-rxtx:rxtx_factor'], self.rxtx)
self.assertEqual(flavor['ephemeral'],
self.ephemeral)
self.assertEqual(flavor['flavor-access:is_public'], True)
# Verify flavor is retrieved
resp, flavor = self.client.get_flavor_details(flavor['id'])
self.assertEqual(resp.status, 200)
self.assertEqual(flavor['name'], flavor_name)
return flavor['id']
@test.attr(type='gate')
def test_create_flavor_with_int_id(self):
flavor_id = data_utils.rand_int_id(start=1000)
new_flavor_id = self._create_flavor(flavor_id)
self.assertEqual(new_flavor_id, str(flavor_id))
@test.attr(type='gate')
def test_create_flavor_with_uuid_id(self):
flavor_id = str(uuid.uuid4())
new_flavor_id = self._create_flavor(flavor_id)
self.assertEqual(new_flavor_id, flavor_id)
@test.attr(type='gate')
def test_create_flavor_with_none_id(self):
# If nova receives a request with None as flavor_id,
# nova generates flavor_id of uuid.
flavor_id = None
new_flavor_id = self._create_flavor(flavor_id)
self.assertEqual(new_flavor_id, str(uuid.UUID(new_flavor_id)))
@test.attr(type='gate')
def test_create_flavor_verify_entry_in_list_details(self):
# Create a flavor and ensure it's details are listed
# This operation requires the user to have 'admin' role
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
# Create the flavor
resp, flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
ephemeral=self.ephemeral,
swap=self.swap,
rxtx=self.rxtx)
self.addCleanup(self.flavor_clean_up, flavor['id'])
flag = False
# Verify flavor is retrieved
resp, flavors = self.client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
for flavor in flavors:
if flavor['name'] == flavor_name:
flag = True
self.assertTrue(flag)
@test.attr(type='gate')
def test_create_list_flavor_without_extra_data(self):
# Create a flavor and ensure it is listed
# This operation requires the user to have 'admin' role
def verify_flavor_response_extension(flavor):
# check some extensions for the flavor create/show/detail response
self.assertEqual(flavor['swap'], 0)
if test.is_extension_enabled("os-flavor-rxtx", "compute_v3"):
self.assertEqual(int(flavor['os-flavor-rxtx:rxtx_factor']), 1)
self.assertEqual(int(flavor['ephemeral']), 0)
self.assertEqual(flavor['flavor-access:is_public'], True)
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
# Create the flavor
resp, flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id)
self.addCleanup(self.flavor_clean_up, flavor['id'])
self.assertEqual(201, resp.status)
self.assertEqual(flavor['name'], flavor_name)
self.assertEqual(flavor['ram'], self.ram)
self.assertEqual(flavor['vcpus'], self.vcpus)
self.assertEqual(flavor['disk'], self.disk)
self.assertEqual(int(flavor['id']), new_flavor_id)
verify_flavor_response_extension(flavor)
# Verify flavor is retrieved
resp, flavor = self.client.get_flavor_details(new_flavor_id)
self.assertEqual(resp.status, 200)
self.assertEqual(flavor['name'], flavor_name)
verify_flavor_response_extension(flavor)
# Check if flavor is present in list
resp, flavors = self.user_client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
for flavor in flavors:
if flavor['name'] == flavor_name:
verify_flavor_response_extension(flavor)
flag = True
self.assertTrue(flag)
@test.attr(type='gate')
def test_list_non_public_flavor(self):
# Create a flavor with os-flavor-access:is_public false should
# be present in list_details.
# This operation requires the user to have 'admin' role
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
# Create the flavor
resp, flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
is_public="False")
self.addCleanup(self.flavor_clean_up, flavor['id'])
# Verify flavor is retrieved
flag = False
resp, flavors = self.client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
for flavor in flavors:
if flavor['name'] == flavor_name:
flag = True
self.assertTrue(flag)
# Verify flavor is not retrieved with other user
flag = False
resp, flavors = self.user_client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
for flavor in flavors:
if flavor['name'] == flavor_name:
flag = True
self.assertFalse(flag)
@test.attr(type='gate')
def test_create_server_with_non_public_flavor(self):
# Create a flavor with os-flavor-access:is_public false
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
# Create the flavor
resp, flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
is_public="False")
self.addCleanup(self.flavor_clean_up, flavor['id'])
self.assertEqual(201, resp.status)
# Verify flavor is not used by other user
self.assertRaises(exceptions.BadRequest,
self.servers_client.create_server,
'test', self.image_ref, flavor['id'])
@test.attr(type='gate')
def test_list_public_flavor_with_other_user(self):
# Create a Flavor with public access.
# Try to List/Get flavor with another user
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
# Create the flavor
resp, flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
is_public="True")
self.addCleanup(self.flavor_clean_up, flavor['id'])
flag = False
self.new_client = self.flavors_client
# Verify flavor is retrieved with new user
resp, flavors = self.new_client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
for flavor in flavors:
if flavor['name'] == flavor_name:
flag = True
self.assertTrue(flag)
@test.attr(type='gate')
def test_is_public_string_variations(self):
flavor_id_not_public = data_utils.rand_int_id(start=1000)
flavor_name_not_public = data_utils.rand_name(self.flavor_name_prefix)
flavor_id_public = data_utils.rand_int_id(start=1000)
flavor_name_public = data_utils.rand_name(self.flavor_name_prefix)
# Create a non public flavor
resp, flavor = self.client.create_flavor(flavor_name_not_public,
self.ram, self.vcpus,
self.disk,
flavor_id_not_public,
is_public="False")
self.addCleanup(self.flavor_clean_up, flavor['id'])
# Create a public flavor
resp, flavor = self.client.create_flavor(flavor_name_public,
self.ram, self.vcpus,
self.disk,
flavor_id_public,
is_public="True")
self.addCleanup(self.flavor_clean_up, flavor['id'])
def _flavor_lookup(flavors, flavor_name):
for flavor in flavors:
if flavor['name'] == flavor_name:
return flavor
return None
def _test_string_variations(variations, flavor_name):
for string in variations:
params = {'is_public': string}
r, flavors = self.client.list_flavors_with_detail(params)
self.assertEqual(r.status, 200)
flavor = _flavor_lookup(flavors, flavor_name)
self.assertIsNotNone(flavor)
_test_string_variations(['f', 'false', 'no', '0'],
flavor_name_not_public)
_test_string_variations(['t', 'true', 'yes', '1'],
flavor_name_public)
@test.attr(type='gate')
def test_create_flavor_using_string_ram(self):
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
ram = " 1024 "
resp, flavor = self.client.create_flavor(flavor_name,
ram, self.vcpus,
self.disk,
new_flavor_id)
self.addCleanup(self.flavor_clean_up, flavor['id'])
self.assertEqual(201, resp.status)
self.assertEqual(flavor['name'], flavor_name)
self.assertEqual(flavor['vcpus'], self.vcpus)
self.assertEqual(flavor['disk'], self.disk)
self.assertEqual(flavor['ram'], int(ram))
self.assertEqual(int(flavor['id']), new_flavor_id)
|
|
import nose
from decorator import decorator
import claripy
from test_backend_smt import TestSMTLibBackend
# use of decorator instead of the usual pattern is important because nose2 will check the argspec and wraps does not
# preserve that!
@decorator
def if_installed(f, *args, **kwargs):
try:
return f(*args, **kwargs)
except claripy.errors.MissingSolverError:
raise nose.SkipTest()
KEEP_TEST_PERFORMANT = True
class SmtLibSolverTestBase(TestSMTLibBackend):
def get_solver(self):
raise nose.SkipTest()
# raise NotImplementedError
@if_installed
def test_concat(self):
str_concrete = claripy.StringV("conc")
str_symbol = claripy.StringS("symb_concat", 4, explicit_name=True)
solver = self.get_solver()
res = str_concrete + str_symbol
solver.add(res == claripy.StringV("concrete"))
self.assertTrue(solver.satisfiable())
result = solver.eval(str_symbol, 2)
self.assertEqual(1, len(result))
self.assertEqual("rete", result[0])
result = solver.eval_to_ast(str_symbol, 2)
self.assertEqual([claripy.StringV("rete")], list(result))
@if_installed
def test_concat_simplification(self):
solver = self.get_solver()
str_concrete = claripy.StringV("conc")
res = str_concrete + str_concrete + str_concrete
res2 = claripy.StrConcat(str_concrete, str_concrete)
res3 = claripy.StrConcat(res2, str_concrete)
solver.add(res == res3)
self.assertTrue(solver.satisfiable())
result = solver.eval(str_concrete, 2)
self.assertEqual(["conc"], list(result))
@if_installed
def test_substr(self):
str_symbol = claripy.StringS("symb_subst", 4, explicit_name=True)
solver = self.get_solver()
solver.add(claripy.StrSubstr(1, 2, str_symbol) == claripy.StringV('o'))
self.assertTrue(solver.satisfiable())
results = solver.eval(str_symbol, 2 if KEEP_TEST_PERFORMANT else 100)
self.assertEqual(len(results), 2 if KEEP_TEST_PERFORMANT else 100)
for s in results:
self.assertTrue(s[1:2] == 'o')
@if_installed
def test_substr_simplification(self):
str_concrete = claripy.StringV("concrete")
solver = self.get_solver()
# TODO: Make sure that semantics of Substr match the ones of SMTLib substr
solver.add(claripy.StrSubstr(1, 2, str_concrete) == claripy.StringV('on'))
self.assertTrue(solver.satisfiable())
result = solver.eval(str_concrete, 2)
self.assertEqual(list(result), ["concrete"])
@if_installed
def test_replace(self):
str_to_replace_symb = claripy.StringS("symb_repl", 4, explicit_name=True)
sub_str_to_repl = claripy.StringV("a")
replacement = claripy.StringV("b")
solver = self.get_solver()
repl_stringa = claripy.StrReplace(str_to_replace_symb, sub_str_to_repl, replacement)
solver.add(repl_stringa == claripy.StringV("cbne"))
self.assertTrue(solver.satisfiable())
result = solver.eval(repl_stringa, 2)
self.assertEqual(list(result), ["cbne"])
result = solver.eval(str_to_replace_symb, 2 if KEEP_TEST_PERFORMANT else 100)
self.assertEqual(set(result), {"cbne", "cane"})
@if_installed
def test_replace_simplification(self):
str_to_replace = claripy.StringV("cane")
sub_str_to_repl = claripy.StringV("a")
replacement = claripy.StringV("b")
repl_stringa = claripy.StrReplace(str_to_replace, sub_str_to_repl, replacement)
solver = self.get_solver()
solver.add(repl_stringa == claripy.StringV("cbne"))
self.assertTrue(solver.satisfiable())
result = solver.eval(repl_stringa, 2)
self.assertEqual(["cbne"], list(result))
result = solver.eval(str_to_replace, 2)
self.assertEqual(["cane"], list(result))
@if_installed
def test_ne(self):
str_symb = claripy.StringS("symb_ne", 12, explicit_name=True)
solver = self.get_solver()
solver.add(str_symb != claripy.StringV("concrete"))
self.assertTrue(solver.satisfiable())
result = solver.eval(str_symb, 4 if KEEP_TEST_PERFORMANT else 100)
self.assertTrue('concrete' not in result)
@if_installed
def test_length(self):
str_symb = claripy.StringS("symb_length", 12, explicit_name=True)
solver = self.get_solver()
# TODO: How do we want to deal with the size of a symbolic string?
solver.add(claripy.StrLen(str_symb, 32) == 14)
self.assertTrue(solver.satisfiable())
result = solver.eval(str_symb, 4 if KEEP_TEST_PERFORMANT else 100)
for r in result:
self.assertTrue(len(r) == 14)
@if_installed
def test_length_simplification(self):
str_concrete = claripy.StringV("concrete")
solver = self.get_solver()
solver.add(claripy.StrLen(str_concrete, 32) == 8)
self.assertTrue(solver.satisfiable())
result = solver.eval(str_concrete, 2)
self.assertEqual(['concrete'], list(result))
for r in result:
self.assertTrue(len(r) == 8)
@if_installed
def test_or(self):
str_symb = claripy.StringS("Symb_or", 4, explicit_name=True)
solver = self.get_solver()
res = claripy.Or((str_symb == claripy.StringV("abc")),
(str_symb == claripy.StringV("ciao")))
solver.add(res)
self.assertTrue(solver.satisfiable())
result = solver.eval(str_symb, 3 if KEEP_TEST_PERFORMANT else 100)
self.assertEqual({'ciao', 'abc'}, set(result))
@if_installed
def test_lt_etc(self):
str_symb = claripy.StringS("Symb_2", 4)
solver = self.get_solver()
c1 = claripy.StrLen(str_symb, 32) <= 4
c2 = claripy.StrLen(str_symb, 32) < 4
c3 = claripy.StrLen(str_symb, 32) >= 4
c4 = claripy.StrLen(str_symb, 32) > 4
solver.add(c1)
solver.add(c2)
solver.add(c3)
solver.add(c4)
self.assertFalse(solver.satisfiable())
@if_installed
def test_substr_BV_concrete_index(self):
str_symbol = claripy.StringS("symb_subst", 4, explicit_name=True)
solver = self.get_solver()
bv1 = claripy.BVV(1, 32)
bv2 = claripy.BVV(2, 32)
res = claripy.StrSubstr(bv1, bv2, str_symbol) == claripy.StringV('on')
solver.add(res)
self.assertTrue(solver.satisfiable())
self.assertEqual('on', solver.eval(str_symbol, 1)[0][1:3])
@if_installed
def test_substr_BV_symbolic_index(self):
str_symbol = claripy.StringS("symb_subst", 4, explicit_name=True)
solver = self.get_solver()
start = claripy.BVS("start_idx", 32)
count = claripy.BVS("count", 32)
res = claripy.StrSubstr(start, count, str_symbol) == claripy.StringV('on')
solver.add(res)
self.assertTrue(solver.satisfiable())
self.assertEqual('on', solver.eval(str_symbol, 1, extra_constraints=(start == 0, count == 2))[0][0:2])
self.assertEqual('on', solver.eval(str_symbol, 1, extra_constraints=(start == 1, count == 2))[0][1:3])
self.assertEqual('on', solver.eval(str_symbol, 1, extra_constraints=(start == 2, count == 2))[0][2:4])
self.assertEqual('on', solver.eval(str_symbol, 1, extra_constraints=(start == 2, count == 3))[0][2:4])
self.assertEqual('on', solver.eval(str_symbol, 1, extra_constraints=(start == 2, count == 4))[0][2:4])
self.assertEqual('on', solver.eval(str_symbol, 1, extra_constraints=(start == 0, count == 3))[0])
self.assertEqual('on', solver.eval(str_symbol, 1, extra_constraints=(start == 1, count == 4))[0][1:])
@if_installed
def test_substr_BV_mixed_index(self):
str_symbol = claripy.StringS("symb_subst", 4, explicit_name=True)
solver = self.get_solver()
start = claripy.BVS("symb_subst_start_idx", 32, explicit_name=True)
count = claripy.BVV(2, 32)
res = claripy.StrSubstr(start, count, str_symbol) == claripy.StringV('on')
solver.add(res)
self.assertTrue(solver.satisfiable())
self.assertEqual('on', solver.eval(str_symbol, 1, extra_constraints=(start == 0,))[0][0:2])
self.assertEqual('on', solver.eval(str_symbol, 1, extra_constraints=(start == 1,))[0][1:3])
self.assertEqual('on', solver.eval(str_symbol, 1, extra_constraints=(start == 2,))[0][2:4])
@if_installed
def test_contains(self):
str_symb = claripy.StringS("symb_contains", 4, explicit_name=True)
res = claripy.StrContains(str_symb, claripy.StringV("an"))
solver = self.get_solver()
solver.add(res)
self.assertTrue(solver.satisfiable())
solutions = solver.eval(str_symb, 4 if KEEP_TEST_PERFORMANT else 100)
for sol in solutions:
self.assertTrue('an' in sol)
@if_installed
def test_contains_simplification(self):
str_concrete = claripy.StringV("concrete")
solver = self.get_solver()
res = claripy.StrContains(str_concrete, claripy.StringV("nc"))
solver.add(res)
self.assertTrue(solver.satisfiable())
self.assertEqual(tuple(), tuple(solver.constraints))
self.assertEqual(("concrete",), solver.eval(str_concrete, 2))
self.assertEqual((True,), solver.eval(res, 2))
@if_installed
def test_prefix(self):
str_symb = claripy.StringS("symb_prefix", 4, explicit_name=True)
res = claripy.StrPrefixOf(claripy.StringV("an"), str_symb)
solver = self.get_solver()
solver.add(res)
self.assertTrue(solver.satisfiable())
solutions = solver.eval(str_symb, 4 if KEEP_TEST_PERFORMANT else 100)
for sol in solutions:
self.assertTrue(sol.startswith('an'))
@if_installed
def test_suffix(self):
str_symb = claripy.StringS("symb_suffix", 4, explicit_name=True)
res = claripy.StrSuffixOf(claripy.StringV("an"), str_symb)
solver = self.get_solver()
solver.add(res)
self.assertTrue(solver.satisfiable())
solutions = solver.eval(str_symb, 4 if KEEP_TEST_PERFORMANT else 100)
for sol in solutions:
self.assertTrue(sol.endswith('an'))
@if_installed
def test_prefix_simplification(self):
str_concrete = claripy.StringV("concrete")
solver = self.get_solver()
res = claripy.StrPrefixOf(claripy.StringV("conc"), str_concrete)
solver.add(res)
self.assertTrue(solver.satisfiable())
self.assertEqual(tuple(), tuple(solver.constraints))
self.assertEqual(("concrete",), solver.eval(str_concrete, 2))
self.assertEqual((True,), solver.eval(res, 2))
@if_installed
def test_suffix_simplification(self):
str_concrete = claripy.StringV("concrete")
solver = self.get_solver()
res = claripy.StrSuffixOf(claripy.StringV("rete"), str_concrete)
solver.add(res)
self.assertTrue(solver.satisfiable())
self.assertEqual(tuple(), tuple(solver.constraints))
self.assertEqual(("concrete",), solver.eval(str_concrete, 2))
self.assertEqual((True,), solver.eval(res, 2))
@if_installed
def test_index_of(self):
str_symb = claripy.StringS("symb_suffix", 4, explicit_name=True)
res = claripy.StrIndexOf(str_symb, claripy.StringV("an"), 0, 32)
solver = self.get_solver()
target_idx = 4 if KEEP_TEST_PERFORMANT else 100
solver.add(res == target_idx)
self.assertTrue(solver.satisfiable())
solutions = solver.eval(str_symb, 4 if KEEP_TEST_PERFORMANT else 100)
for sol in solutions:
self.assertEqual('an', sol[target_idx:target_idx+2])
self.assertEqual((target_idx,), solver.eval(res, 2))
@if_installed
def test_index_of_simplification(self):
str_concrete = claripy.StringV("concrete")
solver = self.get_solver()
res = claripy.StrIndexOf(str_concrete, claripy.StringV("rete"), 0, 32)
target_idx = 4 if KEEP_TEST_PERFORMANT else 100
solver.add(res == target_idx)
self.assertTrue(solver.satisfiable())
self.assertEqual(tuple(), tuple(solver.constraints))
self.assertEqual((target_idx,), solver.eval(res, 2))
@if_installed
def test_index_of_symbolic_start_idx(self):
str_symb = claripy.StringS("symb_index_of", 4, explicit_name=True)
start_idx = claripy.BVS("symb_start_idx", 32, explicit_name=True)
solver = self.get_solver()
solver.add(start_idx > 32)
solver.add(start_idx < 35)
res = claripy.StrIndexOf(str_symb, claripy.StringV("an"), start_idx, 32)
solver.add(res != -1)
solver.add(res < 38)
self.assertTrue(solver.satisfiable())
self.assertEqual({33, 34, 35, 36, 37}, set(solver.eval(res, 10)))
strs = solver.eval(str_symb, 10 if KEEP_TEST_PERFORMANT else 100)
for s in strs:
self.assertTrue(32 < s.index('an') < 38)
@if_installed
def test_str_to_int(self):
str_symb = claripy.StringS("symb_strtoint", 4, explicit_name=True)
res = claripy.StrToInt(str_symb, 32)
solver = self.get_solver()
target_num = 12 if KEEP_TEST_PERFORMANT else 100000
solver.add(res == target_num)
self.assertTrue(solver.satisfiable())
solutions = solver.eval(str_symb, 2 if KEEP_TEST_PERFORMANT else 1000000)
for sol in solutions:
self.assertTrue(int(sol) == target_num)
@if_installed
def test_str_to_int_simplification(self):
target_num = 12 if not KEEP_TEST_PERFORMANT else 1000000
str_concrete = claripy.StringV(str(target_num))
solver = self.get_solver()
res = claripy.StrToInt(str_concrete, 32)
solver.add(res == target_num)
self.assertTrue(solver.satisfiable())
self.assertEqual(tuple(), tuple(solver.constraints))
self.assertEqual((target_num,), solver.eval(res, 2))
|
|
# Copyright 2020 Tom Caruso & individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct as _struct
from geomet.util import (
as_bin_str as _as_bin_str,
take as _take,
endian_token as _endian_token
)
from geomet import wkb as _wkb
def dump(obj, dest_file, big_endian=True):
"""
Dump GeoJSON-like `dict` to GeoPackage binary
and write it to the `dest_file`.
:param dict obj:
A GeoJSON-like dictionary. It must at least the keys 'type' and
'coordinates'.
:param dest_file:
Open and writable file-like object.
:param bool big_endian:
specify endianess of the dumped object.
:return:
"""
dest_file.write(dumps(obj, big_endian))
def load(source_file):
"""
Load a GeoJSON `dict` object from a ``source_file`` containing
GeoPackage (as a byte string).
:param source_file:
Open and readable file-like object.
:return:
A GeoJSON `dict` representing the geometry read from the file.
"""
return loads(source_file.read())
def dumps(obj, big_endian=True):
"""
Dump a GeoJSON-like dict to a GeoPackage bytestring.
If the dict contains a top-level 'meta' key like so:
```
'meta': {'srid': 4326}
```
then the srid will be added to the geopackage header, but *not*
to the WKB geometry header.
If the dict contains a top-level 'bbox' key like so:
```
'bbox': [0, 0, 3, 3]
```
Then an envelope will be added to the geopackage header
with this information.
If the geometry's coordinates are empty (an empty list)
then the geopackage header's "empty" flag will be set,
denoting that this geometry has no coordinates.
Please note that while this library can parse geopackages
with a mixed byte-order in the header, it will only produce
blobs with consistent byte order (albeit properly marked as such).
That means you cannot product a geopackage with e.g. little-endian
header and big-endian WKB geometry.
:param dict obj:
The geojson geometry to dump
:param bool big_endian:
if True, the geopackage binary will use big-endian
byte order, little-endian otherwise.
:return bytes:
bytestring representing the geometry in geopackage
format.
"""
header = _build_geopackage_header(obj, not big_endian)
result = _wkb._dumps(obj, big_endian, include_meta=False)
return header + result
def loads(string):
"""
Construct a GeoJSON `dict` from geopackage (string).
This function strips the geopackage header from the
string and passes the remaining WKB geometry to the
`geomet.wkb.loads` function.
The envelope, if present, is added to the GeoJSON as
a key called 'bbox' as per the GeoJSON spec, [1].
If an SRID is specified in the geopackage header
AND the wkb header, the SRID in the geopackage header
will take precedence and will replace that SRID
in the returned dict.
[1] https://tools.ietf.org/html/rfc7946#section-5
:param bytes string:
geopackage byte string.
:return dict:
GeoJSON represented the parsed geopackage binary.
"""
string = iter(string)
header = _as_bin_str(_take(_GeoPackage.HEADER_LEN, string))
_check_is_valid(header)
g, p, version, empty, envelope_indicator, is_little_endian, srid = (
_parse_header(header)
)
wkb_offset = _get_wkb_offset(envelope_indicator)
left_to_take = (wkb_offset - _GeoPackage.HEADER_LEN)
envelope_data = _as_bin_str(_take(left_to_take, string))
if envelope_data:
envelope = _parse_envelope(
envelope_indicator, envelope_data, is_little_endian
)
result = _wkb.loads(string)
if srid:
result['meta'] = {'srid': int(srid)}
result['crs'] = {
'type': 'name',
'properties': {'name': 'EPSG%s' % srid},
}
if envelope_data:
result['bbox'] = envelope
return result
class _GeoPackage:
"""
Much more information on geopackage structure
can be found here: http://www.geopackage.org/spec/#gpb_format
"""
# The ascii letter 'G'
MAGIC1 = 0x47
# The ascii letter 'P'
MAGIC2 = 0x50
VERSION1 = 0x00
HEADER_LEN = 8
HEADER_PACK_FMT = "BBBBI"
ENVELOPE_2D_LEN = 32
ENVELOPE_3D_LEN = 48
ENVELOPE_4D_LEN = 64
ENVELOPE_MASK = 0b00001111
EMPTY_GEOM_MASK = 0b00011111
ENDIANNESS_MASK = 0b00000001
# map the "envelope indicator" integer we get out of the geopackage header
# to the dimensionality of the envelope.
# more info here: http://www.geopackage.org/spec/#gpb_format
# in the "flags" section, bits 3, 2, 1.
_indicator_to_dim = {
0: 0,
1: 4,
2: 6,
3: 6,
4: 8,
}
# Map the dimensionality of our envelope to the indicator
# integer we will use in the geopackage binary header.
# because we have no way to tell between Z and M values,
# if the geometry has 3 dimensions we default to assume Z.
_dim_to_indicator = {
0: 0,
4: 1,
6: 2,
8: 4
}
def is_valid(data):
"""
Check if the data represents a valid geopackage
geometry. Input can be either the full geometry or
just the header.
:param bytes data:
bytes representing the geopackage binary.
:return (bool, str):
Is the geopackage valid, if not, string describing why
"""
g, p, version, _, envelope_indicator, _, _ = _parse_header(data[:8])
if (g != _GeoPackage.MAGIC1) or (p != _GeoPackage.MAGIC2):
return False, "Missing Geopackage header magic bytes"
if version != _GeoPackage.VERSION1:
return False, "Geopackage version must be 0"
if (envelope_indicator < 0) or (envelope_indicator > 4):
return False, "Envelope indicator must be between 0-4"
return True, ""
def _header_is_little_endian(header):
"""
Check to see if the header is encoded
as little endian or big endian.
Either the entire binary blob or
just the header can be passed in.
:param bytes header:
geopackage header or binary blob
:return bool: is the header little endian
"""
(flags,) = _struct.unpack("B", header[3:4])
return flags & _GeoPackage.ENDIANNESS_MASK
def _parse_header(header):
"""
Unpack all information from the geopackage
header, including "magic" GP bytes. Returns
all of them so we can confirm that this
geopackage is validly formed. Can also accept
the full binary blob.
:param header:
the header or the full geometry.
:return 7-tuple:
all attributes stored in the binary header.
"""
is_little_endian = _header_is_little_endian(header)
fmt = _endian_token(is_little_endian) + _GeoPackage.HEADER_PACK_FMT
g, p, version, flags, srid = _struct.unpack(
fmt, header[:_GeoPackage.HEADER_LEN]
)
empty, envelope_indicator, endianness = _parse_flags(flags)
return g, p, version, empty, envelope_indicator, endianness, srid
def _parse_flags(flags):
"""
Parse the bits in the "flags" byte
of the geopackage header to retrieve
useful information. We specifically parse
the endianness, the envelope indicator,
and the "empty" flag.
Much more info can be found in
the documentation [1].
[1] http://www.geopackage.org/spec/#gpb_format
:param byte flags:
The "flags" byte of a geopackage header.
:return tuple:
"""
endianness = flags & _GeoPackage.ENDIANNESS_MASK
envelope_indicator = (flags & _GeoPackage.ENVELOPE_MASK) >> 1
empty = (flags & _GeoPackage.EMPTY_GEOM_MASK) >> 4
return empty, envelope_indicator, endianness
def _build_flags(empty, envelope_indicator, is_little_endian=1):
"""
Create the "flags" byte which goes into
the geopackage header. Much more info
can be found in the documentation [1].
[1] http://www.geopackage.org/spec/#gpb_format
:param int empty:
0 or 1 indicating whether the geometry is empty.
True and False also work as expected.
:param int envelope_indicator:
indicates the dimensionality of the envelope.
:param int is_little_endian:
0 or 1 (or False / True) indicating
whether the header should be
little-endian encoded.
:return byte:
geopackage header flags
"""
flags = 0b0
if empty:
flags = (flags | 1) << 3
if envelope_indicator:
flags = flags | envelope_indicator
return (flags << 1) | is_little_endian
def _build_geopackage_header(obj, is_little_endian):
"""
Create the geopackage header for the input object.
Looks for a 'bbox' key on the geometry to use
for an envelope, and a 'meta' key with an
SRID to encode into the header.
:param dict obj:
a geojson object
:param bool is_little_endian:
which endianness to use when
encoding the data.
:return bytes: geopackage header.
"""
# Collect geometry metadata.
empty = 1 if len(obj['coordinates']) == 0 else 0
envelope = obj.get('bbox', [])
srid = obj.get('meta', {}).get('srid', 0)
try:
envelope_indicator = _dim_to_indicator[len(envelope)]
except KeyError:
raise ValueError("Bounding box must be of length 2*n where "
"n is the number of dimensions represented "
"in the contained geometries.")
pack_args = [
_GeoPackage.MAGIC1,
_GeoPackage.MAGIC2,
_GeoPackage.VERSION1,
# This looks funny, but _build_flags wants a 1 or 0 for
# "little endian" because it uses it to `or` with the bits.
# Conveniently, in Python, False == 0 and True == 1, so
# we can pass the boolean right in and it works as expected.
_build_flags(empty, envelope_indicator, is_little_endian),
srid
]
pack_fmt = _endian_token(is_little_endian) + _GeoPackage.HEADER_PACK_FMT
# This has no effect if we have a 0 envelope indicator.
pack_fmt += ('d' * _indicator_to_dim[envelope_indicator])
pack_args.extend(envelope)
return _struct.pack(pack_fmt, *pack_args)
def _check_is_valid(data):
"""
Raise if the header is not valid geopackage.
:param bytes data: Geopackage data or header.
:return None:
"""
valid, reason = is_valid(data)
if not valid:
raise ValueError("Could not read Geopackage geometry "
"because of errors: " + reason)
def _get_wkb_offset(envelope_indicator):
"""
Get the full byte offset at which the WKB geometry lies
in the geopackage geometry.
:param int envelope_indicator:
indicates the dimensionality of the envelope.
:return int:
number of bytes until the beginning of the
WKB geometry.
"""
base_len = _GeoPackage.HEADER_LEN
return (base_len * _indicator_to_dim[envelope_indicator]) + base_len
def _parse_envelope(envelope_indicator, envelope, is_little_endian):
"""
Parse a geopackage envelope bytestring into an n-tuple
of floats.
:param int envelope_indicator:
indicates the dimensionality of the envelope.
:param bytes envelope:
Bytestring of the envelope values.
:param bool is_little_endian:
how to pack the bytes in the envelope.
:return tuple[float]: Geometry envelope.
"""
pack_fmt = _endian_token(is_little_endian)
pack_fmt += ('d' * _indicator_to_dim[envelope_indicator])
return _struct.unpack(pack_fmt, envelope)
|
|
from __future__ import unicode_literals
from .abc import ABCIE
from .abc7news import Abc7NewsIE
from .academicearth import AcademicEarthCourseIE
from .addanime import AddAnimeIE
from .adobetv import (
AdobeTVIE,
AdobeTVVideoIE,
)
from .adultswim import AdultSwimIE
from .aftenposten import AftenpostenIE
from .aftonbladet import AftonbladetIE
from .airmozilla import AirMozillaIE
from .aljazeera import AlJazeeraIE
from .alphaporno import AlphaPornoIE
from .anitube import AnitubeIE
from .anysex import AnySexIE
from .aol import AolIE
from .allocine import AllocineIE
from .aparat import AparatIE
from .appleconnect import AppleConnectIE
from .appletrailers import AppleTrailersIE
from .archiveorg import ArchiveOrgIE
from .ard import (
ARDIE,
ARDMediathekIE,
SportschauIE,
)
from .arte import (
ArteTvIE,
ArteTVPlus7IE,
ArteTVCreativeIE,
ArteTVConcertIE,
ArteTVFutureIE,
ArteTVDDCIE,
ArteTVEmbedIE,
)
from .atresplayer import AtresPlayerIE
from .atttechchannel import ATTTechChannelIE
from .audiomack import AudiomackIE, AudiomackAlbumIE
from .azubu import AzubuIE
from .baidu import BaiduVideoIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bbc import (
BBCCoUkIE,
BBCIE,
)
from .beeg import BeegIE
from .behindkink import BehindKinkIE
from .beatportpro import BeatportProIE
from .bet import BetIE
from .bild import BildIE
from .bilibili import BiliBiliIE
from .blinkx import BlinkxIE
from .bliptv import BlipTVIE, BlipTVUserIE
from .bloomberg import BloombergIE
from .bpb import BpbIE
from .br import BRIE
from .breakcom import BreakIE
from .brightcove import BrightcoveIE
from .buzzfeed import BuzzFeedIE
from .byutv import BYUtvIE
from .c56 import C56IE
from .camdemy import (
CamdemyIE,
CamdemyFolderIE
)
from .canal13cl import Canal13clIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .cbs import CBSIE
from .cbsnews import CBSNewsIE
from .cbssports import CBSSportsIE
from .ccc import CCCIE
from .ceskatelevize import CeskaTelevizeIE
from .channel9 import Channel9IE
from .chaturbate import ChaturbateIE
from .chilloutzone import ChilloutzoneIE
from .chirbit import (
ChirbitIE,
ChirbitProfileIE,
)
from .cinchcast import CinchcastIE
from .cinemassacre import CinemassacreIE
from .clipfish import ClipfishIE
from .cliphunter import CliphunterIE
from .clipsyndicate import ClipsyndicateIE
from .cloudy import CloudyIE
from .clubic import ClubicIE
from .cmt import CMTIE
from .cnet import CNETIE
from .cnn import (
CNNIE,
CNNBlogsIE,
CNNArticleIE,
)
from .collegehumor import CollegeHumorIE
from .collegerama import CollegeRamaIE
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
from .comcarcoff import ComCarCoffIE
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
from .condenast import CondeNastIE
from .cracked import CrackedIE
from .criterion import CriterionIE
from .crooksandliars import CrooksAndLiarsIE
from .crunchyroll import (
CrunchyrollIE,
CrunchyrollShowPlaylistIE
)
from .cspan import CSpanIE
from .ctsnews import CtsNewsIE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
DailymotionCloudIE,
)
from .daum import DaumIE
from .dbtv import DBTVIE
from .dcn import DCNIE
from .dctp import DctpTvIE
from .deezer import DeezerPlaylistIE
from .dfb import DFBIE
from .dhm import DHMIE
from .dotsub import DotsubIE
from .douyutv import DouyuTVIE
from .dramafever import (
DramaFeverIE,
DramaFeverSeriesIE,
)
from .dreisat import DreiSatIE
from .drbonanza import DRBonanzaIE
from .drtuber import DrTuberIE
from .drtv import DRTVIE
from .dvtv import DVTVIE
from .dump import DumpIE
from .dumpert import DumpertIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .dropbox import DropboxIE
from .eagleplatform import EaglePlatformIE
from .ebaumsworld import EbaumsWorldIE
from .echomsk import EchoMskIE
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .einthusan import EinthusanIE
from .eitb import EitbIE
from .ellentv import (
EllenTVIE,
EllenTVClipsIE,
)
from .elpais import ElPaisIE
from .embedly import EmbedlyIE
from .engadget import EngadgetIE
from .eporner import EpornerIE
from .eroprofile import EroProfileIE
from .escapist import EscapistIE
from .espn import ESPNIE
from .esri import EsriVideoIE
from .europa import EuropaIE
from .everyonesmixtape import EveryonesMixtapeIE
from .exfm import ExfmIE
from .expotv import ExpoTVIE
from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
from .fc2 import FC2IE
from .firstpost import FirstpostIE
from .firsttv import FirstTVIE
from .fivemin import FiveMinIE
from .fivetv import FiveTVIE
from .fktv import FKTVIE
from .flickr import FlickrIE
from .folketinget import FolketingetIE
from .footyroom import FootyRoomIE
from .fourtube import FourTubeIE
from .foxgay import FoxgayIE
from .foxnews import FoxNewsIE
from .foxsports import FoxSportsIE
from .franceculture import FranceCultureIE
from .franceinter import FranceInterIE
from .francetv import (
PluzzIE,
FranceTvInfoIE,
FranceTVIE,
GenerationQuoiIE,
CultureboxIE,
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .freevideo import FreeVideoIE
from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
from .gameone import (
GameOneIE,
GameOnePlaylistIE,
)
from .gamersyde import GamersydeIE
from .gamespot import GameSpotIE
from .gamestar import GameStarIE
from .gametrailers import GametrailersIE
from .gazeta import GazetaIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .gfycat import GfycatIE
from .giantbomb import GiantBombIE
from .giga import GigaIE
from .glide import GlideIE
from .globo import GloboIE
from .godtube import GodTubeIE
from .goldenmoustache import GoldenMoustacheIE
from .golem import GolemIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .gorillavid import GorillaVidIE
from .goshgay import GoshgayIE
from .groupon import GrouponIE
from .hark import HarkIE
from .hearthisat import HearThisAtIE
from .heise import HeiseIE
from .hellporno import HellPornoIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
from .historicfilms import HistoricFilmsIE
from .history import HistoryIE
from .hitbox import HitboxIE, HitboxLiveIE
from .hornbunny import HornBunnyIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
from .howstuffworks import HowStuffWorksIE
from .huffpost import HuffPostIE
from .hypem import HypemIE
from .iconosquare import IconosquareIE
from .ign import IGNIE, OneUPIE
from .imdb import (
ImdbIE,
ImdbListIE
)
from .imgur import (
ImgurIE,
ImgurAlbumIE,
)
from .ina import InaIE
from .indavideo import (
IndavideoIE,
IndavideoEmbedIE,
)
from .infoq import InfoQIE
from .instagram import InstagramIE, InstagramUserIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import IPrimaIE
from .iqiyi import IqiyiIE
from .ir90tv import Ir90TvIE
from .ivi import (
IviIE,
IviCompilationIE
)
from .izlesene import IzleseneIE
from .jadorecettepub import JadoreCettePubIE
from .jeuxvideo import JeuxVideoIE
from .jove import JoveIE
from .jukebox import JukeboxIE
from .jpopsukitv import JpopsukiIE
from .kaltura import KalturaIE
from .kanalplay import KanalPlayIE
from .kankan import KankanIE
from .karaoketv import KaraoketvIE
from .karrierevideos import KarriereVideosIE
from .keezmovies import KeezMoviesIE
from .khanacademy import KhanAcademyIE
from .kickstarter import KickStarterIE
from .keek import KeekIE
from .kontrtube import KontrTubeIE
from .krasview import KrasViewIE
from .ku6 import Ku6IE
from .kuwo import (
KuwoIE,
KuwoAlbumIE,
KuwoChartIE,
KuwoSingerIE,
KuwoCategoryIE,
KuwoMvIE,
)
from .la7 import LA7IE
from .laola1tv import Laola1TvIE
from .lecture2go import Lecture2GoIE
from .letv import (
LetvIE,
LetvTvIE,
LetvPlaylistIE
)
from .libsyn import LibsynIE
from .lifenews import (
LifeNewsIE,
LifeEmbedIE,
)
from .limelight import (
LimelightMediaIE,
LimelightChannelIE,
LimelightChannelListIE,
)
from .liveleak import LiveLeakIE
from .livestream import (
LivestreamIE,
LivestreamOriginalIE,
LivestreamShortenerIE,
)
from .lnkgo import LnkGoIE
from .lrt import LRTIE
from .lynda import (
LyndaIE,
LyndaCourseIE
)
from .m6 import M6IE
from .macgamestore import MacGameStoreIE
from .mailru import MailRuIE
from .malemotion import MalemotionIE
from .mdr import MDRIE
from .megavideoz import MegaVideozIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mgoon import MgoonIE
from .minhateca import MinhatecaIE
from .ministrygrid import MinistryGridIE
from .miomio import MioMioIE
from .mit import TechTVMITIE, MITIE, OCWMITIE
from .mitele import MiTeleIE
from .mixcloud import MixcloudIE
from .mlb import MLBIE
from .mpora import MporaIE
from .moevideo import MoeVideoIE
from .mofosex import MofosexIE
from .mojvideo import MojvideoIE
from .moniker import MonikerIE
from .mooshare import MooshareIE
from .morningstar import MorningstarIE
from .motherless import MotherlessIE
from .motorsport import MotorsportIE
from .movieclips import MovieClipsIE
from .moviezine import MoviezineIE
from .movshare import MovShareIE
from .mtv import (
MTVIE,
MTVServicesEmbeddedIE,
MTVIggyIE,
MTVDEIE,
)
from .muenchentv import MuenchenTVIE
from .musicplayon import MusicPlayOnIE
from .muzu import MuzuTVIE
from .mwave import MwaveIE
from .myspace import MySpaceIE, MySpaceAlbumIE
from .myspass import MySpassIE
from .myvi import MyviIE
from .myvideo import MyVideoIE
from .myvidster import MyVidsterIE
from .nationalgeographic import NationalGeographicIE
from .naver import NaverIE
from .nba import NBAIE
from .nbc import (
NBCIE,
NBCNewsIE,
NBCSportsIE,
NBCSportsVPlayerIE,
MSNBCIE,
)
from .ndr import (
NDRIE,
NJoyIE,
NDREmbedBaseIE,
NDREmbedIE,
NJoyEmbedIE,
)
from .ndtv import NDTVIE
from .netzkino import NetzkinoIE
from .nerdcubed import NerdCubedFeedIE
from .nerdist import NerdistIE
from .neteasemusic import (
NetEaseMusicIE,
NetEaseMusicAlbumIE,
NetEaseMusicSingerIE,
NetEaseMusicListIE,
NetEaseMusicMvIE,
NetEaseMusicProgramIE,
NetEaseMusicDjRadioIE,
)
from .newgrounds import NewgroundsIE
from .newstube import NewstubeIE
from .nextmedia import (
NextMediaIE,
NextMediaActionNewsIE,
AppleDailyIE,
)
from .nfb import NFBIE
from .nfl import NFLIE
from .nhl import (
NHLIE,
NHLNewsIE,
NHLVideocenterIE,
)
from .niconico import NiconicoIE, NiconicoPlaylistIE
from .ninegag import NineGagIE
from .noco import NocoIE
from .normalboots import NormalbootsIE
from .nosvideo import NosVideoIE
from .nova import NovaIE
from .novamov import NovaMovIE
from .nowness import (
NownessIE,
NownessPlaylistIE,
NownessSeriesIE,
)
from .nowtv import NowTVIE
from .nowvideo import NowVideoIE
from .npo import (
NPOIE,
NPOLiveIE,
NPORadioIE,
NPORadioFragmentIE,
VPROIE,
WNLIE
)
from .nrk import (
NRKIE,
NRKPlaylistIE,
NRKTVIE,
)
from .ntvde import NTVDeIE
from .ntvru import NTVRuIE
from .nytimes import (
NYTimesIE,
NYTimesArticleIE,
)
from .nuvid import NuvidIE
from .odnoklassniki import OdnoklassnikiIE
from .oktoberfesttv import OktoberfestTVIE
from .onionstudios import OnionStudiosIE
from .ooyala import (
OoyalaIE,
OoyalaExternalIE,
)
from .orf import (
ORFTVthekIE,
ORFOE1IE,
ORFFM4IE,
ORFIPTVIE,
)
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
from .periscope import (
PeriscopeIE,
QuickscopeIE,
)
from .philharmoniedeparis import PhilharmonieDeParisIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
from .pinkbike import PinkbikeIE
from .planetaplay import PlanetaPlayIE
from .pladform import PladformIE
from .played import PlayedIE
from .playfm import PlayFMIE
from .playtvak import PlaytvakIE
from .playvid import PlayvidIE
from .playwire import PlaywireIE
from .pluralsight import (
PluralsightIE,
PluralsightCourseIE,
)
from .podomatic import PodomaticIE
from .porn91 import Porn91IE
from .pornhd import PornHdIE
from .pornhub import (
PornHubIE,
PornHubPlaylistIE,
)
from .pornotube import PornotubeIE
from .pornovoisines import PornoVoisinesIE
from .pornoxo import PornoXOIE
from .primesharetv import PrimeShareTVIE
from .promptfile import PromptFileIE
from .prosiebensat1 import ProSiebenSat1IE
from .puls4 import Puls4IE
from .pyvideo import PyvideoIE
from .qqmusic import (
QQMusicIE,
QQMusicSingerIE,
QQMusicAlbumIE,
QQMusicToplistIE,
QQMusicPlaylistIE,
)
from .quickvid import QuickVidIE
from .r7 import R7IE
from .radiode import RadioDeIE
from .radiojavan import RadioJavanIE
from .radiobremen import RadioBremenIE
from .radiofrance import RadioFranceIE
from .rai import RaiIE
from .rbmaradio import RBMARadioIE
from .rds import RDSIE
from .redtube import RedTubeIE
from .restudy import RestudyIE
from .reverbnation import ReverbNationIE
from .ringtv import RingTVIE
from .ro220 import Ro220IE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rtbf import RTBFIE
from .rte import RteIE
from .rtlnl import RtlNlIE
from .rtl2 import RTL2IE
from .rtp import RTPIE
from .rts import RTSIE
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE
from .rtvnh import RTVNHIE
from .ruhd import RUHDIE
from .rutube import (
RutubeIE,
RutubeChannelIE,
RutubeEmbedIE,
RutubeMovieIE,
RutubePersonIE,
)
from .rutv import RUTVIE
from .ruutu import RuutuIE
from .sandia import SandiaIE
from .safari import (
SafariIE,
SafariCourseIE,
)
from .sapo import SapoIE
from .savefrom import SaveFromIE
from .sbs import SBSIE
from .scivee import SciVeeIE
from .screencast import ScreencastIE
from .screencastomatic import ScreencastOMaticIE
from .screenwavemedia import ScreenwaveMediaIE, TeamFourIE
from .senateisvp import SenateISVPIE
from .servingsys import ServingSysIE
from .sexu import SexuIE
from .sexykarma import SexyKarmaIE
from .shahid import ShahidIE
from .shared import SharedIE
from .sharesix import ShareSixIE
from .sina import SinaIE
from .slideshare import SlideshareIE
from .slutload import SlutloadIE
from .smotri import (
SmotriIE,
SmotriCommunityIE,
SmotriUserIE,
SmotriBroadcastIE,
)
from .snagfilms import (
SnagFilmsIE,
SnagFilmsEmbedIE,
)
from .snotr import SnotrIE
from .sohu import SohuIE
from .soompi import (
SoompiIE,
SoompiShowIE,
)
from .soundcloud import (
SoundcloudIE,
SoundcloudSetIE,
SoundcloudUserIE,
SoundcloudPlaylistIE
)
from .soundgasm import (
SoundgasmIE,
SoundgasmProfileIE
)
from .southpark import (
SouthParkIE,
SouthParkDeIE,
SouthParkDkIE,
SouthParkEsIE,
SouthParkNlIE
)
from .space import SpaceIE
from .spankbang import SpankBangIE
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE, SpiegelArticleIE
from .spiegeltv import SpiegeltvIE
from .spike import SpikeIE
from .sport5 import Sport5IE
from .sportbox import (
SportBoxIE,
SportBoxEmbedIE,
)
from .sportdeutschland import SportDeutschlandIE
from .srf import SrfIE
from .srmediathek import SRMediathekIE
from .ssa import SSAIE
from .stanfordoc import StanfordOpenClassroomIE
from .steam import SteamIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
from .streetvoice import StreetVoiceIE
from .sunporno import SunPornoIE
from .svt import (
SVTIE,
SVTPlayIE,
)
from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .tagesschau import TagesschauIE
from .tapely import TapelyIE
from .tass import TassIE
from .teachertube import (
TeacherTubeIE,
TeacherTubeUserIE,
)
from .teachingchannel import TeachingChannelIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .telebruxelles import TeleBruxellesIE
from .telecinco import TelecincoIE
from .telegraaf import TelegraafIE
from .telemb import TeleMBIE
from .teletask import TeleTaskIE
from .tenplay import TenPlayIE
from .testurl import TestURLIE
from .testtube import TestTubeIE
from .tf1 import TF1IE
from .theonion import TheOnionIE
from .theplatform import (
ThePlatformIE,
ThePlatformFeedIE,
)
from .thesixtyone import TheSixtyOneIE
from .thisamericanlife import ThisAmericanLifeIE
from .thisav import ThisAVIE
from .tinypic import TinyPicIE
from .tlc import TlcIE, TlcDeIE
from .tmz import (
TMZIE,
TMZArticleIE,
)
from .tnaflix import (
TNAFlixIE,
EMPFlixIE,
MovieFapIE,
)
from .thvideo import (
THVideoIE,
THVideoPlaylistIE
)
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
from .trutube import TruTubeIE
from .tube8 import Tube8IE
from .tubitv import TubiTvIE
from .tudou import TudouIE
from .tumblr import TumblrIE
from .tunein import TuneInIE
from .turbo import TurboIE
from .tutv import TutvIE
from .tv2 import (
TV2IE,
TV2ArticleIE,
)
from .tv4 import TV4IE
from .tvc import (
TVCIE,
TVCArticleIE,
)
from .tvigle import TvigleIE
from .tvp import TvpIE, TvpSeriesIE
from .tvplay import TVPlayIE
from .tweakers import TweakersIE
from .twentyfourvideo import TwentyFourVideoIE
from .twentytwotracks import (
TwentyTwoTracksIE,
TwentyTwoTracksGenreIE
)
from .twitch import (
TwitchVideoIE,
TwitchChapterIE,
TwitchVodIE,
TwitchProfileIE,
TwitchPastBroadcastsIE,
TwitchBookmarksIE,
TwitchStreamIE,
)
from .twitter import TwitterCardIE
from .ubu import UbuIE
from .udemy import (
UdemyIE,
UdemyCourseIE
)
from .udn import UDNEmbedIE
from .ultimedia import UltimediaIE
from .universalmusicfrance import UniversalMusicFranceIE
from .unistra import UnistraIE
from .urort import UrortIE
from .ustream import UstreamIE, UstreamChannelIE
from .varzesh3 import Varzesh3IE
from .vbox7 import Vbox7IE
from .veehd import VeeHDIE
from .veoh import VeohIE
from .vessel import VesselIE
from .vesti import VestiIE
from .vevo import VevoIE
from .vgtv import (
BTArticleIE,
BTVestlendingenIE,
VGTVIE,
)
from .vh1 import VH1IE
from .vice import ViceIE
from .viddler import ViddlerIE
from .videodetective import VideoDetectiveIE
from .videolecturesnet import VideoLecturesNetIE
from .videofyme import VideofyMeIE
from .videomega import VideoMegaIE
from .videopremium import VideoPremiumIE
from .videott import VideoTtIE
from .videoweed import VideoWeedIE
from .vidme import VidmeIE
from .vidzi import VidziIE
from .vier import VierIE, VierVideosIE
from .viewster import ViewsterIE
from .vimeo import (
VimeoIE,
VimeoAlbumIE,
VimeoChannelIE,
VimeoGroupsIE,
VimeoLikesIE,
VimeoReviewIE,
VimeoUserIE,
VimeoWatchLaterIE,
)
from .vimple import VimpleIE
from .vine import (
VineIE,
VineUserIE,
)
from .viki import (
VikiIE,
VikiChannelIE,
)
from .vk import (
VKIE,
VKUserVideosIE,
)
from .vlive import VLiveIE
from .vodlocker import VodlockerIE
from .voicerepublic import VoiceRepublicIE
from .vporn import VpornIE
from .vrt import VRTIE
from .vube import VubeIE
from .vuclip import VuClipIE
from .vulture import VultureIE
from .walla import WallaIE
from .washingtonpost import WashingtonPostIE
from .wat import WatIE
from .wayofthemaster import WayOfTheMasterIE
from .wdr import (
WDRIE,
WDRMobileIE,
WDRMausIE,
)
from .webofstories import (
WebOfStoriesIE,
WebOfStoriesPlaylistIE,
)
from .weibo import WeiboIE
from .wimp import WimpIE
from .wistia import WistiaIE
from .worldstarhiphop import WorldStarHipHopIE
from .wrzuta import WrzutaIE
from .wsj import WSJIE
from .xbef import XBefIE
from .xboxclips import XboxClipsIE
from .xhamster import (
XHamsterIE,
XHamsterEmbedIE,
)
from .xminus import XMinusIE
from .xnxx import XNXXIE
from .xstream import XstreamIE
from .xtube import XTubeUserIE, XTubeIE
from .xuite import XuiteIE
from .xvideos import XVideosIE
from .xxxymovies import XXXYMoviesIE
from .yahoo import (
YahooIE,
YahooSearchIE,
)
from .yam import YamIE
from .yandexmusic import (
YandexMusicTrackIE,
YandexMusicAlbumIE,
YandexMusicPlaylistIE,
)
from .yesjapan import YesJapanIE
from .yinyuetai import YinYueTaiIE
from .ynet import YnetIE
from .youjizz import YouJizzIE
from .youku import YoukuIE
from .youporn import YouPornIE
from .yourupload import YourUploadIE
from .youtube import (
YoutubeIE,
YoutubeChannelIE,
YoutubeFavouritesIE,
YoutubeHistoryIE,
YoutubePlaylistIE,
YoutubeRecommendedIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
YoutubeSearchURLIE,
YoutubeShowIE,
YoutubeSubscriptionsIE,
YoutubeTruncatedIDIE,
YoutubeTruncatedURLIE,
YoutubeUserIE,
YoutubeWatchLaterIE,
)
from .zapiks import ZapiksIE
from .zdf import ZDFIE, ZDFChannelIE
from .zingmp3 import (
ZingMp3SongIE,
ZingMp3AlbumIE,
)
_ALL_CLASSES = [
klass
for name, klass in globals().items()
if name.endswith('IE') and name != 'GenericIE'
]
_ALL_CLASSES.append(GenericIE)
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
"""
return [klass() for klass in _ALL_CLASSES]
def list_extractors(age_limit):
"""
Return a list of extractors that are suitable for the given age,
sorted by extractor ID.
"""
return sorted(
filter(lambda ie: ie.is_suitable(age_limit), gen_extractors()),
key=lambda ie: ie.IE_NAME.lower())
def get_info_extractor(ie_name):
"""Returns the info extractor class with the given ie_name"""
return globals()[ie_name + 'IE']
|
|
#!/usr/bin/env python
"""
Name : qconcurrency.widgets._dictmodelqcombobox_.py
Created : Apr 16, 2017
Author : Will Pittman
Contact : [email protected]
________________________________________________________________________________
Description : A QComboBox designed to work with a `DictModel`.
Builtin QComboBoxes do not support nested items,
this combobox is built to address this issue, in addition
to being kept up to date with a Model as it is changed.
________________________________________________________________________________
"""
#builtin
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict, MutableMapping
import functools
import uuid
#external
from Qt import QtWidgets, QtGui, QtCore
from Qt.QtWidgets import QSizePolicy
#internal
from qconcurrency.models import DictModel
__all__ = 'DictModelQComboBox'
#!TODO: test updates to model update qcombobox
#!TODO: allow different column for name/id for each different level
class DictModelQComboBox( QtWidgets.QComboBox ):
"""
ComboBox whose contents are determined by the contents of a
:py:obj:`qconcurrency.models.DictModel`. The widget's contents are
updated whenever the modelchanges.
Example:
.. image:: ../media/qconcurrency.widgets.DictModelQComboBox.png
"""
def __init__(self, dictmodel, indexinfo={'id':'_id','name':'name'} ):
"""
Args:
dictmodel (DictModel):
The model you want this QComboBox to display.
indexinfo (dict, optional):
Stores information on where the following information
from the model:
* `name`: the name, as it appears in the QComboBox
* `id`: the id, generally a databaseId that corresponds with the name
This can either be a single dict, if the id/name
will be the same column for every level of the dictmodel,
or it can be a dictionary with the level, and a dict of id/name
for that specific level.
.. code-block:: python
# example 1:
# applies to all levels of table
indexinfo = { 'id':'_id', 'name':'name' }
# example 2:
# level-specific columns
indexinfo = {
'departmenttype':{'id':'_id', 'name':'departmenttype_name'},
'department' :{'id':'_id', 'name':'department_name'},
}
"""
QtWidgets.QComboBox.__init__(self)
if not isinstance( dictmodel, DictModel ):
raise TypeError(
'`dictmodel` argument expected to be of type DictModel. Received %s' % type(dictmodel)
)
self._dictmodel = dictmodel
self._indexinfo = indexinfo
self._modelindexes = {} # {combobox_index : QModelIndex} QModelIndex of each item in QComboBox
self._level_specific_columns = False # True if self._indexinfo is in the format of { level : {'_id':..., 'name':,..}, level : {...} }
# False if self._indexinfo is in the format of {'_id':..., 'name':...}
if len(dictmodel.keys()):
if isinstance( dictmodel[ dictmodel.keys()[0] ], MutableMapping ):
self._level_specific_columns = True
# Connections (update combobox every time model changes )
self._dictmodel.itemChanged.connect( self._handle_modelchange )
self.currentIndexChanged.connect(self.get_selected)
# Load
self._populate_combo()
def _handle_modelchange(self,*args,**kwds):
self._populate_combo()
def _populate_combo(self, _baseitem=None, _indent_lv=0):
"""
Rebuilds the available options
"""
if _baseitem is None:
_baseitem = self._dictmodel
selitem = self.get_selected()
self.clear()
for key in _baseitem.keys():
modelitem = _baseitem[ key ]
self._modelindexes[ self.count() ] = modelitem.index()
if self._level_specific_columns:
name = self._indexinfo[ modelitem.level() ]['name']
else:
name = self._indexinfo['name']
self.addItem(
' '*(3*_indent_lv)
+ modelitem.columnval( name )
)
# if modelitem has children
if modelitem.rowCount():
self._populate_combo( _baseitem=modelitem, _indent_lv=_indent_lv+1 )
# TODO:
# if this was the first item
# restore the user's selection if possible
if _baseitem is self._dictmodel:
pass
def get_selected(self):
"""
Returns the modelitem corresponding to the user's selection.
From this object, you can find any information from the model.
Returns:
The corresponding :py:obj:`DictModelRow` to the selected item.
See Also:
* :py:meth:`DictModelRow.columnvals`
* :py:meth:`DictModelRow.columnval`
* :py:meth:`DictModelRow.id`
"""
# if there are items in the list
if self.currentIndex() != -1:
modelitem = self._dictmodel.itemFromIndex(
self._modelindexes[ self.currentIndex() ]
)
return modelitem
def set_selected(self, modelindex):
"""
Sets the selected item in the QComboBox
"""
for combo_index in self._modelindexes:
if self._modelindexes[ combo_index ] == modelindex:
self.setCurrentIndex( combo_index )
return
raise KeyError(
'Cannot find item in model with a modelindex of %s' % repr(_id)
)
def get_item_indexinfo(self, modelitem):
"""
Returns all keys from `indexinfo` for a particular item.
(taking into account item's nested-table-level, if `indexinfo` requires it)
Returns:
.. code-block:: python
# if modelitem exists in model
{
'id': 123,
'name': 'test name',
}
# if modelitem does not exist in model
{}
"""
indexinfo = {}
if self._level_specific_columns:
for key in self._indexinfo[ modelitem.level() ]:
indexinfo[ key ] = modelitem.columnval( self._indexinfo[ modelitem.level() ][ key ] )
else:
for key in self._indexinfo:
indexinfo[ key ] = modelitem.columnval( self._indexinfo[ key ] )
return indexinfo
def get_modelindex_from_index(self, index):
"""
Returns the :py:obj:`QtCore.QModelIndex` corresponding
to the item with a :py:obj:`QtWidgets.QComboBox` index.
"""
return self._modelindexes[ index ]
if __name__ == '__main__':
#external
from Qt import QtWidgets
#internal
from qconcurrency import QApplication
from qconcurrency.models import DictModel
with QApplication():
model = DictModel( columns=['name'] )
model.add_row( 1, {'name':'one'} )
model.add_row( 2, {'name':'two'} )
model.add_row( 3, {'name':'three'} )
model[1].add_child( 10, {'name':'one-a'} )
model[1].add_child( 11, {'name':'one-b'} )
# This QComboBox can display a hierarchy (with indentation)
combo = DictModelQComboBox( model, indexinfo={'id':'_id', 'name':'name'} )
combo.show()
combo.set_selected( model[1][11].index() )
print( combo.get_selected().columnvals() )
|
|
# ~~ Email:Notifications~~
from flask import url_for
import json
from portality import models, app_email, constants
from portality.core import app
from portality.dao import Facetview2
from portality.ui.messages import Messages
from portality.lib import dates
def send_admin_ready_email(application, editor_id):
""" send email to the managing editors when an application is ready """
journal_name = application.bibjson().title
url_root = app.config.get("BASE_URL")
query_for_id = Facetview2.make_query(query_string=application.id)
string_id_query = json.dumps(query_for_id).replace(' ', '') # Avoid '+' being added to URLs by removing spaces
if application.application_type == constants.APPLICATION_TYPE_NEW_APPLICATION:
url_for_application = url_root + url_for("admin.suggestions", source=string_id_query)
else:
url_for_application = url_root + url_for("admin.update_requests", source=string_id_query)
# This is to the managing editor email list
to = [app.config.get('MANAGING_EDITOR_EMAIL', '[email protected]')]
fro = app.config.get('SYSTEM_EMAIL_FROM', '[email protected]')
subject = app.config.get("SERVICE_NAME", "") + " - application ready"
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name="email/admin_application_ready.jinja2",
application_title=journal_name,
editor=editor_id,
url_for_application=url_for_application)
def send_editor_group_email(obj):
""" Send an email to the editor of a group """
if type(obj) is models.Suggestion:
template = "email/editor_application_assigned_group.jinja2"
subject = app.config.get("SERVICE_NAME", "") + " - new application assigned to your group"
elif type(obj) is models.Journal:
template = "email/editor_journal_assigned_group.jinja2"
subject = app.config.get("SERVICE_NAME", "") + " - new journal assigned to your group"
else:
app.logger.error("Attempted to send editor group email for something that's not an Application or Journal")
return
eg = models.EditorGroup.pull_by_key("name", obj.editor_group)
if eg is None:
return
editor = eg.get_editor_account()
url_root = app.config.get("BASE_URL")
to = [editor.email]
fro = app.config.get('SYSTEM_EMAIL_FROM', '[email protected]')
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name=template,
editor=editor.id,
journal_name=obj.bibjson().title,
url_root=url_root)
def send_assoc_editor_email(obj):
""" Inform an associate editor that a journal or application has been assigned to them """
if type(obj) is models.Suggestion:
template = "email/assoc_editor_application_assigned.jinja2"
subject = app.config.get("SERVICE_NAME", "") + " - new application assigned to you"
elif type(obj) is models.Journal:
template = "email/assoc_editor_journal_assigned.jinja2"
subject = app.config.get("SERVICE_NAME", "") + " - new journal assigned to you"
else:
app.logger.error("Attempted to send email to editors for something that's not an Application or Journal")
return
if obj.editor is None:
return
assoc_editor = models.Account.pull(obj.editor)
eg = models.EditorGroup.pull_by_key("name", obj.editor_group)
if assoc_editor is None or eg is None:
return
url_root = app.config.get("BASE_URL")
to = [assoc_editor.email]
fro = app.config.get('SYSTEM_EMAIL_FROM', '[email protected]')
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name=template,
associate_editor=assoc_editor.id,
journal_name=obj.bibjson().title,
group_name=eg.name,
url_root=url_root)
def send_publisher_update_request_editor_assigned_email(application):
""" Send email to publisher informing them an editor has been assigned """
owner = models.Account.pull(application.owner)
send_list = [
{
"owner" : owner,
"name" : owner.name,
"email" : owner.email,
"sent_alert" : Messages.SENT_PUBLISHER_ASSIGNED_EMAIL,
"not_sent_alert" : Messages.NOT_SENT_PUBLISHER_ASSIGNED_EMAIL
}
]
fro = app.config.get('SYSTEM_EMAIL_FROM', '[email protected]')
subject = app.config.get("SERVICE_NAME","") + " - your update request has been assigned an editor for review"
alerts = []
for instructions in send_list:
to = [instructions["email"]]
try:
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name="email/publisher_update_request_editor_assigned.jinja2",
owner=instructions["owner"],
application=application)
alerts.append(instructions["sent_alert"])
except app_email.EmailException:
alerts.append(instructions["not_sent_alert"])
return alerts
def send_publisher_application_editor_assigned_email(application):
""" Send email to publisher informing them an editor has been assigned """
send_list = []
owner = models.Account.pull(application.owner)
if owner is not None:
send_list.append(
{
"owner" : owner,
"name" : owner.name,
"email" : owner.email,
"sent_alert" : Messages.SENT_PUBLISHER_ASSIGNED_EMAIL,
"not_sent_alert" : Messages.NOT_SENT_PUBLISHER_ASSIGNED_EMAIL
}
)
fro = app.config.get('SYSTEM_EMAIL_FROM', '[email protected]')
subject = app.config.get("SERVICE_NAME","") + " - your application has been assigned an editor for review"
alerts = []
for instructions in send_list:
to = [instructions["email"]]
try:
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name="email/publisher_application_editor_assigned.jinja2",
application=application,
owner=instructions["owner"])
alerts.append(instructions["sent_alert"])
except app_email.EmailException:
alerts.append(instructions["not_sent_alert"])
return alerts
def send_editor_inprogress_email(application):
""" Inform editor in charge of an application that the status is has been reverted from ready by a ManEd """
journal_name = application.bibjson().title
url_root = app.config.get("BASE_URL")
query_for_id = Facetview2.make_query(query_string=application.id)
string_id_query = json.dumps(query_for_id).replace(' ', '') # Avoid '+' being added to URLs by removing spaces
url_for_application = url_root + url_for("editor.group_suggestions", source=string_id_query)
# This is to the editor in charge of this AssEd's group
editor_group_name = application.editor_group
editor_group_id = models.EditorGroup.group_exists_by_name(name=editor_group_name)
try:
editor_group = models.EditorGroup.pull(editor_group_id)
editor_acc = editor_group.get_editor_account()
editor_id = editor_acc.id
to = [editor_acc.email]
except AttributeError:
raise
fro = app.config.get('SYSTEM_EMAIL_FROM', '[email protected]')
subject = app.config.get("SERVICE_NAME", "") + " - Application reverted to 'In Progress' by Managing Editor"
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name="email/editor_application_inprogress.jinja2",
editor=editor_id,
application_title=journal_name,
url_for_application=url_for_application)
def send_assoc_editor_inprogress_email(application):
""" Inform the associate editor assigned to application that the status has been reverted by an Ed or ManEd """
journal_name = application.bibjson().title
url_root = app.config.get("BASE_URL")
query_for_id = Facetview2.make_query(query_string=application.id)
string_id_query = json.dumps(query_for_id).replace(' ', '') # Avoid '+' being added to URLs by removing spaces
url_for_application = url_root + url_for("editor.associate_suggestions", source=string_id_query)
# This is to the associate editor assigned to this application
assoc_editor = models.Account.pull(application.editor)
to = [assoc_editor.email]
fro = app.config.get('SYSTEM_EMAIL_FROM', '[email protected]')
subject = app.config.get("SERVICE_NAME", "") + " - an application assigned to you has not passed review."
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name="email/assoc_editor_application_inprogress.jinja2",
assoc_editor=assoc_editor.id,
application_title=journal_name,
url_for_application=url_for_application)
def send_editor_completed_email(application):
""" inform the editor in charge of an application that it has been completed by an associate editor """
journal_name = application.bibjson().title
url_root = app.config.get("BASE_URL")
query_for_id = Facetview2.make_query(query_string=application.id)
string_id_query = json.dumps(query_for_id).replace(' ', '') # Avoid '+' being added to URLs by removing spaces
url_for_application = url_root + url_for("editor.group_suggestions", source=string_id_query)
# This is to the editor in charge of this application's assigned editor group
editor_group_name = application.editor_group
editor_group_id = models.EditorGroup.group_exists_by_name(name=editor_group_name)
editor_group = models.EditorGroup.pull(editor_group_id)
editor_acc = editor_group.get_editor_account()
editor_id = editor_acc.id
to = [editor_acc.email]
fro = app.config.get('SYSTEM_EMAIL_FROM', '[email protected]')
subject = app.config.get("SERVICE_NAME", "") + " - application marked 'completed'"
# The status change will have come from the associate editor assigned to the journal
assoc_id = application.editor
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name="email/editor_application_completed.jinja2",
editor=editor_id,
associate_editor=assoc_id,
application_title=journal_name,
url_for_application=url_for_application)
def send_publisher_update_request_inprogress_email(application):
"""Tell the publisher the UR is underway"""
owner = models.Account.pull(application.owner)
send_list = [
{
"owner" : owner,
"name" : owner.name,
"email" : owner.email,
"sent_alert" : Messages.SENT_PUBLISHER_IN_PROGRESS_EMAIL,
"not_sent_alert" : Messages.NOT_SENT_PUBLISHER_IN_PROGRESS_EMAIL
}
]
fro = app.config.get('SYSTEM_EMAIL_FROM', '[email protected]')
subject = app.config.get("SERVICE_NAME", "") + " - your update request is under review"
alerts = []
for instructions in send_list:
to = [instructions["email"]]
try:
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name="email/publisher_update_request_inprogress.jinja2",
owner=instructions["owner"],
application=application)
alerts.append(instructions["sent_alert"])
except app_email.EmailException:
alerts.append(instructions["not_sent_alert"])
return alerts
def send_publisher_application_inprogress_email(application):
"""Tell the publisher the application is underway"""
send_list = []
owner = models.Account.pull(application.owner)
if owner is not None:
send_list.append(
{
"owner" : owner,
"name" : owner.name,
"email" : owner.email,
"sent_alert" : Messages.SENT_PUBLISHER_IN_PROGRESS_EMAIL,
"not_sent_alert" : Messages.NOT_SENT_PUBLISHER_IN_PROGRESS_EMAIL
}
)
fro = app.config.get('SYSTEM_EMAIL_FROM', '[email protected]')
subject = app.config.get("SERVICE_NAME", "") + " - your application is under review"
alerts = []
for instructions in send_list:
to = [instructions["email"]]
try:
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name="email/publisher_application_inprogress.jinja2",
owner=instructions["owner"],
application=application)
alerts.append(instructions["sent_alert"])
except app_email.EmailException:
alerts.append(instructions["not_sent_alert"])
return alerts
def send_received_email(application):
""" Email the publisher when an application is received """
owner = models.Account.pull(application.owner)
to = [owner.email]
fro = app.config.get('SYSTEM_EMAIL_FROM', '[email protected]')
subject = app.config.get("SERVICE_NAME", "") + " - your application to DOAJ has been received"
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name="email/publisher_application_received.jinja2",
owner=owner,
application=application)
def send_publisher_update_request_revisions_required(application):
"""Tell the publisher their update request requires revisions"""
journal_title = application.bibjson().title
owner = models.Account.pull(application.owner)
if owner is None:
raise app_email.EmailException("Application {x} does not have an owner, cannot send email".format(x=application.id))
# This is to the publisher contact on the application
publisher_name = owner.name
publisher_email = owner.email
to = [publisher_email]
fro = app.config.get('SYSTEM_EMAIL_FROM', '[email protected]')
subject = app.config.get("SERVICE_NAME", "") + " - your update request requires revisions"
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name="email/publisher_update_request_revisions.jinja2",
application=application,
owner=owner)
def send_publisher_reject_email(application, note=None, update_request=False):
"""Tell the publisher their application was rejected"""
send_instructions = []
owner = models.Account.pull(application.owner)
if owner is not None:
send_instructions.append({
"owner" : owner,
"name" : owner.name,
"email" : owner.email,
"type" : "owner"
})
if len(send_instructions) == 0:
raise app_email.EmailException("Application {x} does not have an owner or suggester, cannot send email".format(x=application.id))
# determine if this is an application or an update request
app_type = "application" if update_request is False else "update"
for instructions in send_instructions:
to = [instructions["email"]]
fro = app.config.get('SYSTEM_EMAIL_FROM', '[email protected]')
subject = app.config.get("SERVICE_NAME", "") + " - your " + app_type + " was rejected"
if update_request:
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name="email/publisher_update_request_rejected.jinja2",
owner=instructions["owner"],
application=application,
note=note)
else:
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name="email/publisher_application_rejected.jinja2",
owner=instructions["owner"],
application=application,
note=note)
return send_instructions
def send_account_created_email(account):
reset_url = url_for('account.reset', reset_token=account.reset_token, _external=True)
forgot_pw_url = url_for('account.forgot', _external=True)
password_create_timeout_seconds = int(
app.config.get("PASSWORD_CREATE_TIMEOUT", app.config.get('PASSWORD_RESET_TIMEOUT', 86400) * 14))
password_create_timeout_days = password_create_timeout_seconds / (60 * 60 * 24)
to = [account.email]
fro = app.config.get('SYSTEM_EMAIL_FROM', '[email protected]')
subject = app.config.get("SERVICE_NAME", "") + " - account created, please verify your email address"
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name="email/account_created.jinja2",
reset_url=reset_url,
email=account.email,
timeout_days=password_create_timeout_days,
forgot_pw_url=forgot_pw_url
)
def send_account_password_reset_email(account):
reset_url = url_for('account.reset', reset_token=account.reset_token, _external=True)
to = [account.email]
fro = app.config.get('SYSTEM_EMAIL_FROM', app.config['ADMIN_EMAIL'])
subject = app.config.get("SERVICE_NAME", "") + " - password reset"
app_email.send_mail(to=to,
fro=fro,
subject=subject,
template_name="email/account_password_reset.jinja2",
email=account.email,
reset_url=reset_url,
forgot_pw_url=url_for('account.forgot', _external=True)
)
|
|
# coding: utf-8
# flake8: noqa
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from swagger_client.models.add_appointment_request import AddAppointmentRequest
from swagger_client.models.add_appointment_response import AddAppointmentResponse
from swagger_client.models.add_arrival_request import AddArrivalRequest
from swagger_client.models.add_arrival_response import AddArrivalResponse
from swagger_client.models.add_client_request import AddClientRequest
from swagger_client.models.add_client_response import AddClientResponse
from swagger_client.models.add_client_to_class_request import AddClientToClassRequest
from swagger_client.models.add_client_to_class_response import AddClientToClassResponse
from swagger_client.models.add_client_to_class_visit import AddClientToClassVisit
from swagger_client.models.add_client_to_enrollment_request import AddClientToEnrollmentRequest
from swagger_client.models.add_contact_log_request import AddContactLogRequest
from swagger_client.models.add_contact_log_type import AddContactLogType
from swagger_client.models.amenity import Amenity
from swagger_client.models.appointment import Appointment
from swagger_client.models.appointment_option import AppointmentOption
from swagger_client.models.assigned_client_index import AssignedClientIndex
from swagger_client.models.autopay_schedule import AutopaySchedule
from swagger_client.models.availability import Availability
from swagger_client.models.booking_window import BookingWindow
from swagger_client.models.checkout_appointment_booking_request import CheckoutAppointmentBookingRequest
from swagger_client.models.checkout_item import CheckoutItem
from swagger_client.models.checkout_item_wrapper import CheckoutItemWrapper
from swagger_client.models.checkout_payment_info import CheckoutPaymentInfo
from swagger_client.models.checkout_shopping_cart_request import CheckoutShoppingCartRequest
from swagger_client.models.class_description import ClassDescription
from swagger_client.models.class_payroll_event import ClassPayrollEvent
from swagger_client.models.class_schedule import ClassSchedule
from swagger_client.models.client import Client
from swagger_client.models.client_contract import ClientContract
from swagger_client.models.client_credit_card import ClientCreditCard
from swagger_client.models.client_document import ClientDocument
from swagger_client.models.client_index import ClientIndex
from swagger_client.models.client_index_value import ClientIndexValue
from swagger_client.models.client_membership import ClientMembership
from swagger_client.models.client_purchase_record import ClientPurchaseRecord
from swagger_client.models.client_relationship import ClientRelationship
from swagger_client.models.client_service import ClientService
from swagger_client.models.color import Color
from swagger_client.models.contact_log import ContactLog
from swagger_client.models.contact_log_comment import ContactLogComment
from swagger_client.models.contact_log_sub_type import ContactLogSubType
from swagger_client.models.contact_log_type import ContactLogType
from swagger_client.models.contract import Contract
from swagger_client.models.contract_item import ContractItem
from swagger_client.models.course import Course
from swagger_client.models.credit_card_info import CreditCardInfo
from swagger_client.models.cross_regional_client_association import CrossRegionalClientAssociation
from swagger_client.models.custom_client_field import CustomClientField
from swagger_client.models.custom_client_field_value import CustomClientFieldValue
from swagger_client.models.custom_payment_method import CustomPaymentMethod
from swagger_client.models.formula_note import FormulaNote
from swagger_client.models.get_activation_code_response import GetActivationCodeResponse
from swagger_client.models.get_active_client_memberships_request import GetActiveClientMembershipsRequest
from swagger_client.models.get_active_client_memberships_response import GetActiveClientMembershipsResponse
from swagger_client.models.get_active_session_times_request import GetActiveSessionTimesRequest
from swagger_client.models.get_active_session_times_response import GetActiveSessionTimesResponse
from swagger_client.models.get_appointment_options_response import GetAppointmentOptionsResponse
from swagger_client.models.get_bookable_items_request import GetBookableItemsRequest
from swagger_client.models.get_bookable_items_response import GetBookableItemsResponse
from swagger_client.models.get_class_descriptions_request import GetClassDescriptionsRequest
from swagger_client.models.get_class_descriptions_response import GetClassDescriptionsResponse
from swagger_client.models.get_class_payroll_request import GetClassPayrollRequest
from swagger_client.models.get_class_payroll_response import GetClassPayrollResponse
from swagger_client.models.get_class_schedules_request import GetClassSchedulesRequest
from swagger_client.models.get_class_schedules_response import GetClassSchedulesResponse
from swagger_client.models.get_class_visits_request import GetClassVisitsRequest
from swagger_client.models.get_class_visits_response import GetClassVisitsResponse
from swagger_client.models.get_classes_request import GetClassesRequest
from swagger_client.models.get_classes_response import GetClassesResponse
from swagger_client.models.get_client_account_balances_request import GetClientAccountBalancesRequest
from swagger_client.models.get_client_account_balances_response import GetClientAccountBalancesResponse
from swagger_client.models.get_client_contracts_request import GetClientContractsRequest
from swagger_client.models.get_client_contracts_response import GetClientContractsResponse
from swagger_client.models.get_client_formula_notes_request import GetClientFormulaNotesRequest
from swagger_client.models.get_client_formula_notes_response import GetClientFormulaNotesResponse
from swagger_client.models.get_client_indexes_request import GetClientIndexesRequest
from swagger_client.models.get_client_indexes_response import GetClientIndexesResponse
from swagger_client.models.get_client_purchases_request import GetClientPurchasesRequest
from swagger_client.models.get_client_purchases_response import GetClientPurchasesResponse
from swagger_client.models.get_client_referral_types_request import GetClientReferralTypesRequest
from swagger_client.models.get_client_referral_types_response import GetClientReferralTypesResponse
from swagger_client.models.get_client_services_request import GetClientServicesRequest
from swagger_client.models.get_client_services_response import GetClientServicesResponse
from swagger_client.models.get_client_visits_request import GetClientVisitsRequest
from swagger_client.models.get_client_visits_response import GetClientVisitsResponse
from swagger_client.models.get_clients_request import GetClientsRequest
from swagger_client.models.get_clients_response import GetClientsResponse
from swagger_client.models.get_contact_logs_request import GetContactLogsRequest
from swagger_client.models.get_contact_logs_response import GetContactLogsResponse
from swagger_client.models.get_contracts_request import GetContractsRequest
from swagger_client.models.get_contracts_response import GetContractsResponse
from swagger_client.models.get_cross_regional_client_associations_request import GetCrossRegionalClientAssociationsRequest
from swagger_client.models.get_cross_regional_client_associations_response import GetCrossRegionalClientAssociationsResponse
from swagger_client.models.get_custom_client_fields_request import GetCustomClientFieldsRequest
from swagger_client.models.get_custom_client_fields_response import GetCustomClientFieldsResponse
from swagger_client.models.get_custom_payment_methods_request import GetCustomPaymentMethodsRequest
from swagger_client.models.get_custom_payment_methods_response import GetCustomPaymentMethodsResponse
from swagger_client.models.get_enrollments_request import GetEnrollmentsRequest
from swagger_client.models.get_enrollments_response import GetEnrollmentsResponse
from swagger_client.models.get_gift_card_response import GetGiftCardResponse
from swagger_client.models.get_gift_cards_request import GetGiftCardsRequest
from swagger_client.models.get_locations_request import GetLocationsRequest
from swagger_client.models.get_locations_response import GetLocationsResponse
from swagger_client.models.get_packages_request import GetPackagesRequest
from swagger_client.models.get_packages_response import GetPackagesResponse
from swagger_client.models.get_products_request import GetProductsRequest
from swagger_client.models.get_products_response import GetProductsResponse
from swagger_client.models.get_programs_request import GetProgramsRequest
from swagger_client.models.get_programs_response import GetProgramsResponse
from swagger_client.models.get_required_client_fields_response import GetRequiredClientFieldsResponse
from swagger_client.models.get_resources_request import GetResourcesRequest
from swagger_client.models.get_resources_response import GetResourcesResponse
from swagger_client.models.get_sales_request import GetSalesRequest
from swagger_client.models.get_sales_response import GetSalesResponse
from swagger_client.models.get_schedule_items_request import GetScheduleItemsRequest
from swagger_client.models.get_schedule_items_response import GetScheduleItemsResponse
from swagger_client.models.get_services_request import GetServicesRequest
from swagger_client.models.get_services_response import GetServicesResponse
from swagger_client.models.get_session_types_request import GetSessionTypesRequest
from swagger_client.models.get_session_types_response import GetSessionTypesResponse
from swagger_client.models.get_sites_request import GetSitesRequest
from swagger_client.models.get_sites_response import GetSitesResponse
from swagger_client.models.get_staff_appointments_request import GetStaffAppointmentsRequest
from swagger_client.models.get_staff_appointments_response import GetStaffAppointmentsResponse
from swagger_client.models.get_staff_permissions_request import GetStaffPermissionsRequest
from swagger_client.models.get_staff_permissions_response import GetStaffPermissionsResponse
from swagger_client.models.get_staff_request import GetStaffRequest
from swagger_client.models.get_staff_response import GetStaffResponse
from swagger_client.models.get_time_clock_request import GetTimeClockRequest
from swagger_client.models.get_time_clock_response import GetTimeClockResponse
from swagger_client.models.get_waitlist_entries_request import GetWaitlistEntriesRequest
from swagger_client.models.get_waitlist_entries_response import GetWaitlistEntriesResponse
from swagger_client.models.gift_card import GiftCard
from swagger_client.models.gift_card_layout import GiftCardLayout
from swagger_client.models.issue_request import IssueRequest
from swagger_client.models.issue_response import IssueResponse
from swagger_client.models.level import Level
from swagger_client.models.liability import Liability
from swagger_client.models.location import Location
from swagger_client.models.membership_type_restriction import MembershipTypeRestriction
from swagger_client.models.model_class import ModelClass
from swagger_client.models.package import Package
from swagger_client.models.pagination_response import PaginationResponse
from swagger_client.models.product import Product
from swagger_client.models.program import Program
from swagger_client.models.prospect_stage import ProspectStage
from swagger_client.models.purchase_contract_request import PurchaseContractRequest
from swagger_client.models.purchase_contract_response import PurchaseContractResponse
from swagger_client.models.purchase_gift_card_request import PurchaseGiftCardRequest
from swagger_client.models.purchase_gift_card_response import PurchaseGiftCardResponse
from swagger_client.models.purchased_item import PurchasedItem
from swagger_client.models.relationship import Relationship
from swagger_client.models.remove_client_from_class_request import RemoveClientFromClassRequest
from swagger_client.models.remove_client_from_class_response import RemoveClientFromClassResponse
from swagger_client.models.remove_from_waitlist_request import RemoveFromWaitlistRequest
from swagger_client.models.remove_from_waitlist_response import RemoveFromWaitlistResponse
from swagger_client.models.resource import Resource
from swagger_client.models.sale import Sale
from swagger_client.models.sale_payment import SalePayment
from swagger_client.models.sales_rep import SalesRep
from swagger_client.models.send_password_reset_email_request import SendPasswordResetEmailRequest
from swagger_client.models.service import Service
from swagger_client.models.session_type import SessionType
from swagger_client.models.site import Site
from swagger_client.models.size import Size
from swagger_client.models.staff import Staff
from swagger_client.models.staff_permission_group import StaffPermissionGroup
from swagger_client.models.stored_card_info import StoredCardInfo
from swagger_client.models.substitute_class_teacher_request import SubstituteClassTeacherRequest
from swagger_client.models.substitute_class_teacher_response import SubstituteClassTeacherResponse
from swagger_client.models.substitute_teacher_class import SubstituteTeacherClass
from swagger_client.models.time_card_event import TimeCardEvent
from swagger_client.models.time_clock_report import TimeClockReport
from swagger_client.models.unavailability import Unavailability
from swagger_client.models.upcoming_autopay_event import UpcomingAutopayEvent
from swagger_client.models.update_appointment_request import UpdateAppointmentRequest
from swagger_client.models.update_appointment_response import UpdateAppointmentResponse
from swagger_client.models.update_client_request import UpdateClientRequest
from swagger_client.models.update_client_response import UpdateClientResponse
from swagger_client.models.update_client_service_request import UpdateClientServiceRequest
from swagger_client.models.update_client_service_response import UpdateClientServiceResponse
from swagger_client.models.update_client_visit_request import UpdateClientVisitRequest
from swagger_client.models.update_client_visit_response import UpdateClientVisitResponse
from swagger_client.models.update_contact_log_comment import UpdateContactLogComment
from swagger_client.models.update_contact_log_request import UpdateContactLogRequest
from swagger_client.models.update_contact_log_type import UpdateContactLogType
from swagger_client.models.upload_client_document_request import UploadClientDocumentRequest
from swagger_client.models.upload_client_document_response import UploadClientDocumentResponse
from swagger_client.models.upload_client_photo_request import UploadClientPhotoRequest
from swagger_client.models.upload_client_photo_response import UploadClientPhotoResponse
from swagger_client.models.user import User
from swagger_client.models.visit import Visit
from swagger_client.models.waitlist_entry import WaitlistEntry
|
|
import os
import uuid
from osipkd.tools import row2dict, xls_reader
from datetime import datetime
from sqlalchemy import not_, func
from pyramid.view import (
view_config,
)
from pyramid.httpexceptions import (
HTTPFound,
)
import colander
from deform import (
Form,
widget,
ValidationFailure,
)
from osipkd.models import (
DBSession
)
from osipkd.models.eis import (
Eis
)
from datatables import ColumnDT, DataTables
from osipkd.views.base_view import BaseViews
SESS_ADD_FAILED = 'Tambah carousel gagal'
SESS_EDIT_FAILED = 'Edit carousel gagal'
rek_widget = widget.AutocompleteInputWidget(
size=60,
values = '/rekening/act/headof',
min_length=1)
class Amount(colander.Schema):
amt_tahun = colander.SchemaNode(
colander.Integer(),
default = 0,
missing = 0)
amt_bulan = colander.SchemaNode(
colander.Integer(),
default = 0,
missing = 0)
amt_minggu = colander.SchemaNode(
colander.Integer(),
default = 0,
missing = 0)
amt_hari = colander.SchemaNode(
colander.Integer(),
default = 0,
missing = 0)
class AddSchema(colander.Schema):
tahun = colander.SchemaNode(
colander.Integer())
kode = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=18),
oid='kode')
uraian = colander.SchemaNode(
colander.String(),
widget = rek_widget,
validator=colander.Length(max=128),
oid = 'uraian')
order_id = colander.SchemaNode(
colander.Integer(),
default = 0,
missing = 0)
is_aktif = colander.SchemaNode(
colander.Boolean())
amount = Amount()
class EditSchema(AddSchema):
id = colander.SchemaNode(colander.String(),
missing=colander.drop,
widget=widget.HiddenWidget(readonly=True))
class view_carousel(BaseViews):
########
# List #
########
@view_config(route_name='carousel', renderer='templates/eis-carousel/list.pt',
permission='read')
def view_list(self):
return dict(a={})
##########
# Action #
##########
@view_config(route_name='carousel-act', renderer='json',
permission='read')
def carousel_act(self):
ses = self.request.session
req = self.request
params = req.params
url_dict = req.matchdict
if url_dict['act']=='grid':
columns = []
columns.append(ColumnDT('id'))
columns.append(ColumnDT('tahun'))
columns.append(ColumnDT('kode'))
columns.append(ColumnDT('uraian'))
columns.append(ColumnDT('amt_tahun', filter=self._number_format))
columns.append(ColumnDT('amt_bulan', filter=self._number_format))
columns.append(ColumnDT('amt_minggu', filter=self._number_format))
columns.append(ColumnDT('amt_hari', filter=self._number_format))
columns.append(ColumnDT('order_id', filter=self._number_format))
columns.append(ColumnDT('is_aktif', filter=self._number_format))
query = DBSession.query(Eis)
rowTable = DataTables(req, Eis, query, columns)
return rowTable.output_result()
#######
# Add #
#######
def form_validator(self, form, value):
if 'id' in form.request.matchdict:
uid = form.request.matchdict['id']
q = DBSession.query(Eis).filter_by(id=uid)
row = q.first()
else:
row = None
def get_form(self, class_form, row=None):
schema = class_form(validator=self.form_validator)
schema = schema.bind()
schema.request = self.request
if row:
schema.deserialize(row)
return Form(schema, buttons=('simpan','batal'))
def save(self, values, user, row=None):
if not row:
row = Eis()
row.created = datetime.now()
row.create_uid = user.id
row.from_dict(values)
row.updated = datetime.now()
row.update_uid = user.id
row.is_aktif = 'is_aktif' in values and values['is_aktif'] and 1 or 0
DBSession.add(row)
DBSession.flush()
return row
def save_request(self, values, row=None):
if 'id' in self.request.matchdict:
values['id'] = self.request.matchdict['id']
row = self.save(values, self.request.user, row)
self.request.session.flash('Carousel sudah disimpan.')
def route_list(self):
return HTTPFound(location=self.request.route_url('carousel'))
def session_failed(self, session_name):
#r = dict(form=self.session[session_name])
del self.session[session_name]
#return r
@view_config(route_name='carousel-add', renderer='templates/eis-carousel/add.pt',
permission='add')
def view_carousel_add(self):
req = self.request
ses = self.session
form = self.get_form(AddSchema)
if req.POST:
if 'simpan' in req.POST:
controls = req.POST.items()
try:
c = form.validate(controls)
except ValidationFailure, e:
#req.session[SESS_ADD_FAILED] = e.render()
#form.set_appstruct(rowd)
return dict(form=form)
#return HTTPFound(location=req.route_url('carousel-add'))
self.save_request(dict(controls))
return self.route_list()
elif SESS_ADD_FAILED in req.session:
return dict(form=form)
#return self.session_failed(SESS_ADD_FAILED)
return dict(form=form)
########
# Edit #
########
def query_id(self):
return DBSession.query(Eis).filter_by(id=self.request.matchdict['id'])
def id_not_found(self):
msg = 'Carousel ID %s Tidak Ditemukan.' % self.request.matchdict['id']
request.session.flash(msg, 'error')
return route_list()
@view_config(route_name='carousel-edit', renderer='templates/eis-carousel/add.pt',
permission='edit')
def view_carousel_edit(self):
request = self.request
row = self.query_id().first()
if not row:
return id_not_found(request)
#values = row.to_dict()
rowd={}
rowd['id'] = row.id
rowd['tahun'] = row.tahun
rowd['kode'] = row.kode
rowd['uraian'] = row.uraian
rowd['order_id'] = row.order_id
rowd['is_aktif'] = row.is_aktif
rowd['amount'] = {}
rowd['amount']['amt_tahun'] = row.amt_tahun
rowd['amount']['amt_bulan'] = row.amt_bulan
rowd['amount']['amt_minggu'] = row.amt_minggu
rowd['amount']['amt_hari'] = row.amt_hari
form = self.get_form(EditSchema)
form.set_appstruct(rowd)
if request.POST:
if 'simpan' in request.POST:
controls = request.POST.items()
print controls
try:
c = form.validate(controls)
except ValidationFailure, e:
return dict(form=form)
#request.session[SESS_EDIT_FAILED] = e.render()
#return HTTPFound(location=request.route_url('carousel-edit',
# id=row.id))
self.save_request(dict(controls), row)
return self.route_list()
elif SESS_EDIT_FAILED in request.session:
return self.session_failed(SESS_EDIT_FAILED)
return dict(form=form)
##########
# Delete #
##########
@view_config(route_name='carousel-delete', renderer='templates/eis-carousel/delete.pt',
permission='delete')
def view_carousel_delete(self):
request = self.request
q = self.query_id()
row = q.first()
if not row:
return self.id_not_found(request)
form = Form(colander.Schema(), buttons=('hapus','batal'))
if request.POST:
if 'hapus' in request.POST:
msg = 'Carousel ID %d %s sudah dihapus.' % (row.id, row.description)
try:
q.delete()
DBSession.flush()
except:
msg = 'Carousel ID %d %s tidak dapat dihapus.' % (row.id, row.description)
request.session.flash(msg)
return self.route_list()
return dict(row=row,
form=form.render())
|
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.preassembler.sitemapper import default_mapper as sm, MappedStatement
from indra.statements import *
from indra.util import unicode_strs
def test_check_agent_mod():
mapk1_valid = Agent('MAPK1',
mods=[ModCondition('phosphorylation', 'T', '185'),
ModCondition('phosphorylation', 'Y', '187')],
db_refs={'UP': 'P28482'})
res_valid = sm._map_agent_sites(mapk1_valid)
assert len(res_valid) == 2
assert res_valid[0] == []
assert isinstance(res_valid[1], Agent)
assert res_valid[1].matches(mapk1_valid)
mapk1_invalid = Agent('MAPK1',
mods=[ModCondition('phosphorylation', 'T', '183'),
ModCondition('phosphorylation', 'Y', '185')],
db_refs={'UP': 'P28482'})
res_invalid = sm._map_agent_sites(mapk1_invalid)
assert len(res_invalid) == 2
assert isinstance(res_invalid[0], list)
assert isinstance(res_invalid[1], Agent)
invalid_sites = res_invalid[0]
assert len(invalid_sites) == 2
map183 = invalid_sites[0]
assert map183[0] == ('MAPK1', 'T', '183')
assert len(map183[1]) == 3
assert map183[1][0] == 'T'
assert map183[1][1] == '185'
map185 = invalid_sites[1]
assert map185[0] == ('MAPK1', 'Y', '185')
assert len(map185[1]) == 3
assert map185[1][0] == 'Y'
assert map185[1][1] == '187'
new_agent = res_invalid[1]
assert len(new_agent.mods) == 2
assert new_agent.mods[0].matches(ModCondition('phosphorylation',
'T', '185'))
assert new_agent.mods[1].matches(ModCondition('phosphorylation',
'Y', '187'))
assert unicode_strs((mapk1_valid, res_valid, mapk1_invalid, res_invalid,
invalid_sites, map183, map185, new_agent))
def test_site_map_modification():
mapk1_invalid = Agent('MAPK1',
mods=[ModCondition('phosphorylation', 'T', '183'),
ModCondition('phosphorylation', 'Y', '185')],
db_refs={'UP': 'P28482'})
mapk3_invalid = Agent('MAPK3',
mods=[ModCondition('phosphorylation', 'T', '201')],
db_refs={'UP': 'P27361'})
map2k1_invalid = Agent('MAP2K1',
mods=[ModCondition('phosphorylation', 'S', '217'),
ModCondition('phosphorylation', 'S', '221')],
db_refs={'UP': 'Q02750'})
st1 = Phosphorylation(mapk1_invalid, mapk3_invalid, 'Y', '203')
st2 = Phosphorylation(map2k1_invalid, mapk1_invalid, 'Y', '218')
res = sm.map_sites([st1, st2])
assert len(res) == 2
valid_stmts = res[0]
mapped_stmts = res[1]
assert isinstance(valid_stmts, list)
assert isinstance(mapped_stmts, list)
assert len(valid_stmts) == 0
assert len(mapped_stmts) == 2
# MAPK1 -> MAPK3
mapped_stmt1 = mapped_stmts[0]
assert isinstance(mapped_stmt1, MappedStatement)
assert mapped_stmt1.original_stmt == st1
assert isinstance(mapped_stmt1.mapped_mods, list)
assert len(mapped_stmt1.mapped_mods) == 4 # FIXME
ms = mapped_stmt1.mapped_stmt
assert isinstance(ms, Statement)
agent1 = ms.enz
agent2 = ms.sub
assert agent1.name == 'MAPK1'
assert len(agent1.mods) == 2
assert agent1.mods[0].matches(ModCondition('phosphorylation', 'T', '185'))
assert agent1.mods[1].matches(ModCondition('phosphorylation', 'Y', '187'))
assert agent2.mods[0].matches(ModCondition('phosphorylation', 'T', '202'))
assert ms.residue == 'Y'
assert ms.position == '204'
# MAP2K1 -> MAPK1
mapped_stmt2 = mapped_stmts[1]
assert isinstance(mapped_stmt2, MappedStatement)
assert mapped_stmt2.original_stmt == st2
assert isinstance(mapped_stmt2.mapped_mods, list)
assert len(mapped_stmt2.mapped_mods) == 5 # FIXME
ms = mapped_stmt2.mapped_stmt
assert isinstance(ms, Statement)
agent1 = ms.enz
agent2 = ms.sub
assert agent1.name == 'MAP2K1'
assert len(agent1.mods) == 2
assert agent1.mods[0].matches(ModCondition('phosphorylation', 'S', '218'))
assert agent1.mods[1].matches(ModCondition('phosphorylation', 'S', '222'))
assert len(agent2.mods) == 2
assert agent2.mods[0].matches(ModCondition('phosphorylation', 'T', '185'))
assert agent2.mods[1].matches(ModCondition('phosphorylation', 'Y', '187'))
# The incorrect phosphorylation residue is passed through to the new
# statement unchanged
assert ms.residue == 'Y'
assert ms.position == '218'
# Check for unicode
assert unicode_strs((mapk1_invalid, mapk3_invalid, map2k1_invalid, st1,
st2, res, valid_stmts, mapped_stmts))
def test_site_map_activity_modification():
mc = [ModCondition('phosphorylation', 'T', '183'),
ModCondition('phosphorylation', 'Y', '185')]
mapk1 = Agent('MAPK1', mods=mc, db_refs={'UP': 'P28482'})
st1 = ActiveForm(mapk1, 'kinase', True)
(valid, mapped) = sm.map_sites([st1])
assert len(valid) == 0
assert len(mapped) == 1
ms = mapped[0]
assert ms.mapped_mods[0][0] == ('MAPK1', 'T', '183')
assert ms.mapped_mods[0][1][0] == 'T'
assert ms.mapped_mods[0][1][1] == '185'
assert ms.mapped_mods[1][0] == ('MAPK1', 'Y', '185')
assert ms.mapped_mods[1][1][0] == 'Y'
assert ms.mapped_mods[1][1][1] == '187'
assert ms.original_stmt == st1
assert ms.mapped_stmt.agent.mods[0].matches(ModCondition('phosphorylation',
'T', '185'))
assert ms.mapped_stmt.agent.mods[1].matches(ModCondition('phosphorylation',
'Y', '187'))
assert unicode_strs((mc, mapk1, st1, valid, mapped))
def test_site_map_selfmodification():
mapk1_invalid = Agent('MAPK1',
mods=[ModCondition('phosphorylation', 'T', '183')],
db_refs={'UP': 'P28482'})
st1 = SelfModification(mapk1_invalid, 'Y', '185')
(valid, mapped) = sm.map_sites([st1])
assert len(valid) == 0
assert len(mapped) == 1
mapped_stmt = mapped[0]
assert mapped_stmt.mapped_mods[0][0] == ('MAPK1', 'T', '183')
assert mapped_stmt.mapped_mods[0][1][0] == 'T'
assert mapped_stmt.mapped_mods[0][1][1] == '185'
assert mapped_stmt.mapped_mods[1][0] == ('MAPK1', 'Y', '185')
assert mapped_stmt.mapped_mods[1][1][0] == 'Y'
assert mapped_stmt.mapped_mods[1][1][1] == '187'
assert mapped_stmt.original_stmt == st1
ms = mapped_stmt.mapped_stmt
agent1 = ms.enz
assert agent1.mods[0].matches(ModCondition('phosphorylation', 'T', '185'))
assert ms.residue == 'Y'
assert ms.position == '187'
assert unicode_strs((mapk1_invalid, st1, valid, mapped))
# The following Statements are all handled by the same block of code and hence
# can be tested in similar fashion
def test_site_map_complex():
(mapk1_invalid, mapk3_invalid) = get_invalid_mapks()
st1 = RasGef(mapk1_invalid, mapk3_invalid)
res = sm.map_sites([st1])
check_validated_mapks(res, st1)
def test_site_map_rasgef():
(mapk1_invalid, mapk3_invalid) = get_invalid_mapks()
st1 = RasGef(mapk1_invalid, mapk3_invalid)
res = sm.map_sites([st1])
check_validated_mapks(res, st1)
def test_site_map_rasgap():
(mapk1_invalid, mapk3_invalid) = get_invalid_mapks()
st1 = RasGap(mapk1_invalid, mapk3_invalid)
res = sm.map_sites([st1])
check_validated_mapks(res, st1)
def test_site_map_activation():
(mapk1_invalid, mapk3_invalid) = get_invalid_mapks()
st1 = Activation(mapk1_invalid, mapk3_invalid, 'kinase')
res = sm.map_sites([st1])
check_validated_mapks(res, st1)
def test_site_map_hgnc():
"""Make sure site mapping is done even if only HGNC ID is given."""
(mapk1_invalid, mapk3_invalid) = get_invalid_mapks()
mapk1_invalid.db_refs = {'HGNC': '6871'}
st1 = ActiveForm(mapk1_invalid, 'kinase', True)
(valid, mapped) = sm.map_sites([st1])
assert len(valid) == 0
assert len(mapped) == 1
def get_invalid_mapks():
"""A handy function for getting the invalid MAPK agents we want."""
mapk1_invalid = Agent('MAPK1',
mods=[ModCondition('phosphorylation', 'T', '183'),
ModCondition('phosphorylation', 'Y', '185')],
db_refs={'UP': 'P28482'})
mapk3_invalid = Agent('MAPK3',
mods=[ModCondition('phosphorylation', 'T', '201'),
ModCondition('phosphorylation', 'Y', '203')],
db_refs={'UP': 'P27361'})
assert unicode_strs((mapk1_invalid, mapk3_invalid))
return (mapk1_invalid, mapk3_invalid)
def check_validated_mapks(res, st1):
"""Validate that the invalid MAPKs have been fixed appropriately."""
assert len(res) == 2
valid_stmts = res[0]
mapped_stmts = res[1]
assert isinstance(valid_stmts, list)
assert isinstance(mapped_stmts, list)
assert len(valid_stmts) == 0
assert len(mapped_stmts) == 1
mapped_stmt = mapped_stmts[0]
assert isinstance(mapped_stmt, MappedStatement)
assert mapped_stmt.original_stmt == st1
assert isinstance(mapped_stmt.mapped_mods, list)
assert len(mapped_stmt.mapped_mods) == 4
ms = mapped_stmt.mapped_stmt
assert isinstance(ms, Statement)
agents = ms.agent_list()
assert len(agents) == 2
agent1 = agents[0]
agent2 = agents[1]
assert agent1.name == 'MAPK1'
assert len(agent1.mods) == 2
assert agent1.mods[0].matches(ModCondition('phosphorylation', 'T', '185'))
assert agent1.mods[1].matches(ModCondition('phosphorylation', 'Y', '187'))
assert agent2.mods[0].matches(ModCondition('phosphorylation', 'T', '202'))
assert agent2.mods[1].matches(ModCondition('phosphorylation', 'Y', '204'))
assert unicode_strs((res, st1))
|
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import logging
import os
from . import nxprocess
from ..visualization import scene
from ..visualization import scene_view
from ..visualization import defaultsettings
from ..utils import instance
from ..utils import units
from ..io import xiaedf
logger = logging.getLogger(__name__)
def scene_modifyaxes(sc, parameters):
if parameters.get("originzero", False):
sc.originzero()
if parameters.get("transpose", False):
sc.transpose(True)
if parameters.get("flipx", False):
sc.flipx(increasing=parameters.get("incx", None))
elif parameters.get("incx", False):
sc.increasingx(True)
if parameters.get("flipy", False):
sc.flipy(increasing=parameters.get("incy", None))
elif parameters.get("incy", False):
sc.increasingy(True)
if parameters.get("aspectratio", None) is not None:
sc.aspectratio = parameters["aspectratio"]
def scene_globalscaling(sc, parameters):
rlo = parameters.get("rlo", None)
rhi = parameters.get("rhi", None)
if rlo is not None or rhi is not None:
vmin, vmax = sc.vminmax
dv = np.array(vmax) - np.array(vmin)
if rlo is None:
vmin = None
else:
rlo, func = instance.asarrayf(rlo)
vmin = vmin + dv * func(rlo)
if rhi is None:
vmax = None
else:
rhi, func = instance.asarrayf(rhi)
vmax = vmin + dv * func(rhi)
sc.scale(vmin=vmin, vmax=vmax)
alo = parameters.get("alo", None)
ahi = parameters.get("ahi", None)
if alo is not None or ahi is not None:
sc.scale(vmin=alo, vmax=ahi)
def createwriter(parameters, filename):
filename, ext = os.path.splitext(filename)
filename = filename + ".xlsx"
if filename not in parameters["writers"]:
writer = pd.ExcelWriter(filename)
parameters["writers"][filename] = writer
return parameters["writers"][filename]
def savewriters(parameters):
for filename, writer in parameters["writers"].items():
writer.save()
logger.info("Saved {}".format(filename))
def savefigures(filename, parameters):
figs = [plt.figure(i) for i in plt.get_fignums()]
if len(figs) > 1:
filename, ext = os.path.splitext(filename)
filename = "{}_{{:02d}}{}".format(filename, ext)
for i, fig in enumerate(figs):
name = filename.format(i + 1)
if not os.path.exists(os.path.dirname(name)):
os.makedirs(os.path.dirname(name))
saveparams = parameters.get("saveparams", {})
fig.savefig(name, **saveparams)
logger.info("Saved {}".format(name))
def closefigures():
for i in plt.get_fignums():
plt.close(i)
def specname(info):
filename = "spec{:02d}".format(info["spec"])
radix = "_".join((info["sample"], filename))
filename = os.path.join(info["sample"], radix, "{}.dat".format(radix))
return os.path.join(info["root"], filename)
def extractname(item):
if instance.isarray(item):
return item[0]
else:
return item
def ctrfilenames(info):
radix = "_".join((info["sample"], info["dataset"]))
root = info["root"]
sample = info["sample"]
fformat = xiaedf.xiaformat_ctr(radix, info["num"])
return [
os.path.join(root, sample, radix, "zap", fformat.format(extractname(item)))
for item in info["items"]
]
def specfilename(info):
filename = "spec{:02d}".format(info["spec"])
radix = "_".join((info["sample"], filename))
filename = os.path.join(info["sample"], radix, "{}.dat".format(radix))
return os.path.join(info["root"], filename)
def createscene(parameters, dependencies=None, output=None):
parameters = parameters.copy()
objects = parameters.pop("objects")
if dependencies is None:
dependencies = []
# Create scene and connect it to axes
figsize = parameters.get("figsize", None)
if figsize is None:
figsize = defaultsettings.default("figure.figsize")
defaultsettings.adapttofigsize(figsize, **parameters)
f, ax = plt.subplots(figsize=figsize)
if parameters.get("noaxes", False):
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# ax.set_axis_off()
sc = scene.Scene(
unit0=parameters.get("unitfast", "mm"),
unit1=parameters.get("unitslow", "mm"),
title=parameters.get("title", None),
)
sc.setaxes(ax)
# Add objects to scene
dependency_ctr = -1
for info in objects:
plotparams = info.get("plotparams", {}).copy()
plotparams["scene"] = sc
dataparams = info.get("dataparams", {}).copy()
dataparams["instrument"] = parameters.get("instrument", None)
pmin = plotparams.pop("lo", [])
pmax = plotparams.pop("hi", [])
if "spec" in info:
filename = specname(info)
item = scene_view.XanesSpec(
filename, info.get("items", None), plotparams=plotparams, **dataparams
)
elif "sample" in info:
filenames = ctrfilenames(info)
item = scene_view.ZapRoiMap(
filenames, info.get("items", None), plotparams=plotparams, **dataparams
)
else:
dependency_ctr += 1
i = info.get("dependency", dependency_ctr)
uri = str(dependencies[i])
item = scene_view.Nexus(
uri, info.get("items", None), plotparams=plotparams, **dataparams
)
if pmin and pmax:
item.selfscale(pmin=pmin, pmax=pmax)
item.useaxesnames()
# Modify axes
scene_modifyaxes(sc, parameters)
# Global intensity scaling
scene_globalscaling(sc, parameters)
# Save interpolated data
for item in sc:
try:
item.interpolatesave()
except AttributeError:
pass
# Plot/save figure
sc.updateview()
if parameters.get("tight_layout", False):
plt.tight_layout()
if output is not None:
savefigures(output, parameters)
if parameters.get("plot", True):
# TODO: this hangs when another window is opened in the mean time:
# sc.ax.get_figure().canvas.mpl_connect('resize_event', lambda event: sc.updateview())
plt.show()
closefigures()
class Task(nxprocess.Task):
"""Create scene image"""
def _parameters_defaults(self):
super(Task, self)._parameters_defaults()
self.required_parameters |= {
"objects",
"instrument",
"figsize",
"noaxes",
"unitfast",
"unitslow",
"tight_layout",
"plot",
"originzero",
"transpose",
"flipx",
"flipy",
"incx",
"incy",
"aspectratio",
"rlo",
"rhi",
"alo",
"ahi",
"title",
"saveparams",
}
parameters = self.parameters
parameters["figsize"] = parameters.get("figsize", None)
parameters["noaxes"] = parameters.get("noaxes", False)
parameters["tight_layout"] = parameters.get("tight_layout", False)
parameters["plot"] = parameters.get("plot", False)
parameters["unitfast"] = parameters.get("unitfast", "mm")
parameters["unitslow"] = parameters.get("unitslow", "mm")
parameters["aspectratio"] = parameters.get("aspectratio", None)
parameters["originzero"] = parameters.get("originzero", False)
parameters["transpose"] = parameters.get("transpose", False)
parameters["flipx"] = parameters.get("flipx", False)
parameters["flipy"] = parameters.get("flipy", False)
parameters["incx"] = parameters.get("incx", False)
parameters["incy"] = parameters.get("incy", False)
parameters["rlo"] = parameters.get("rlo", None)
parameters["rhi"] = parameters.get("rhi", None)
parameters["alo"] = parameters.get("alo", None)
parameters["ahi"] = parameters.get("ahi", None)
parameters["title"] = parameters.get("title", None)
parameters["saveparams"] = parameters.get("saveparams", {})
def _execute(self):
createscene(
self.parameters,
output=self.temp_localpath.path,
dependencies=self.previous_outputs,
)
@property
def temp_localpath(self):
path = super(Task, self).temp_localpath
return path.parent[path.name + ".png"]
@property
def output_localpath(self):
path = super(Task, self).output_localpath
return path.parent[path.name + ".png"]
|
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import errno
import logging as stdlib_logging
import os
import random
import shlex
import signal
from eventlet.green import subprocess
from eventlet import greenthread
import six
from virtman.openstack.common.gettextutils import _
from virtman.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class InvalidArgumentError(Exception):
def __init__(self, message=None):
super(InvalidArgumentError, self).__init__(message)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = _("Unexpected error while running command.")
if exit_code is None:
exit_code = '-'
message = _('%(description)s\n'
'Command: %(cmd)s\n'
'Exit code: %(exit_code)s\n'
'Stdout: %(stdout)r\n'
'Stderr: %(stderr)r') % {'description': description,
'cmd': cmd,
'exit_code': exit_code,
'stdout': stdout,
'stderr': stderr}
super(ProcessExecutionError, self).__init__(message)
class NoRootWrapSpecified(Exception):
def __init__(self, message=None):
super(NoRootWrapSpecified, self).__init__(message)
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
Allows optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
:param process_input: Send to opened process.
:type process_input: string
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:param loglevel: log level for execute commands.
:type loglevel: int. (Should be stdlib_logging.DEBUG or
stdlib_logging.INFO)
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False)
loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=_('Command requested root, but did not '
'specify a root helper.'))
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.log(loglevel, 'Running cmd (subprocess): %s',
' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell)
result = None
for _i in six.moves.range(20):
# NOTE(russellb) 20 is an arbitrary number of retries to
# prevent any chance of looping forever here.
try:
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
except OSError as e:
if e.errno in (errno.EAGAIN, errno.EINTR):
continue
raise
break
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
LOG.log(loglevel, 'Result was %s' % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.log(loglevel, '%r failed. Retrying.', cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except ProcessExecutionError as exn:
out, err = '', str(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug('Running cmd (SSH): %s', cmd)
if addl_env:
raise InvalidArgumentError(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise InvalidArgumentError(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s' % exit_status)
if check_exit_code and exit_status != 0:
raise ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
return (stdout, stderr)
|
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from rmake_test import rmakehelp
import StringIO
import unittest
testFrozenLead1 = '\xbe\xeb\xab\xba\x04\x00\x00\x00\00\00\00\00\00\x00\x00\x02\x00\x01\x05\x00\x04\x00\x01\xe2@\x06\x00\x04\x00\x12\xd6\x87'
testFrozenLead2 = 'this is a bad lead that will miserably fail'
testFrozenHeader1 = 'content-type: binary/xml\n'
testFrozenMessage1 = '\xbe\xeb\xab\xba\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x05\x00\x04\x00\x00\x00(\x06\x00\x04\x00\x00\x00\x16content-type: application/octect-stream\nThis is a test message'
testFrozenMessage2 = "\xbe\xeb\xab\xba\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x05\x00\x04\x00\x00\x00'\x06\x00\x04\x00\x00\x00\x17content-type: application/binary-stuff\nFirst line\nSecond line\n"
testPayload1 = "This is a test message"
class EnvelopeTest(rmakehelp.RmakeHelper):
def importPlugins(self):
global envelope
from rmake.messagebus import envelope
msgPayloadSize = 1234567
msgHeaderSize = 123456
def testLeadFreeze(self):
f = envelope.PLead()
f.msgPayloadSize.set(self.msgPayloadSize)
self.failUnlessEqual(f.msgPayloadSize(), self.msgPayloadSize)
f.msgHeaderSize.set(self.msgHeaderSize)
self.failUnlessEqual(f.msgHeaderSize(), self.msgHeaderSize)
data = f.freeze()
self.failUnlessEqual(data, testFrozenLead1)
def testHeaderFreeze(self):
f = envelope.PHeader()
f['content-type'] = "binary/xml"
data = f.freeze()
self.failUnlessEqual(data, testFrozenHeader1)
ff = envelope.PHeader()
ff.thawString(data)
self.failUnlessEqual(ff['content-type'], "binary/xml")
def testLeadChunkedThaw(self):
s1 = StringIO.StringIO(testFrozenLead1[:10])
s2 = StringIO.StringIO(testFrozenLead1[10:])
f = envelope.PLead()
self.failIf(f.thawFromStream(s1.read))
self.failUnless(f.thawFromStream(s2.read))
self.failUnlessEqual(f.msgHeaderSize(), self.msgHeaderSize)
self.failUnlessEqual(f.msgPayloadSize(), self.msgPayloadSize)
def testLeadThawBad(self):
f = envelope.PLead()
self.failUnlessRaises(envelope.BadMagicError, f.thawString, testFrozenLead2)
def testMessageThawLead(self):
m = envelope.Envelope()
s1 = StringIO.StringIO(testFrozenLead1[:10])
s2 = StringIO.StringIO(testFrozenLead1[10:])
self.failIf(m.thawLead(s1.read))
self.failUnless(m.thawLead(s2.read))
def testMessageThawHeader(self):
m = envelope.Envelope()
m.setHeaderSize(len(testFrozenHeader1))
s1 = StringIO.StringIO(testFrozenHeader1[:4])
s2 = StringIO.StringIO(testFrozenHeader1[4:])
self.failIf(m.thawHeader(s1.read))
self.failUnless(m.thawHeader(s2.read))
def testMessageFreezeBasic(self):
m = envelope.Envelope()
payloadStream = StringIO.StringIO(testPayload1)
m.setPayloadStream(payloadStream)
m.setPayloadSize(len(testPayload1))
payloadStream.seek(0)
m.setContentType('application/octect-stream')
data = m.freeze()
self.failUnlessEqual(data, testFrozenMessage1)
def testMessageThawBasic(self):
m = envelope.Envelope()
s = StringIO.StringIO(testFrozenMessage1)
xx = m.thawFromStream(s.read, blocking=True)
ss = m.getPayloadStream()
self.failUnlessEqual(ss.read(), testPayload1)
def testMessageThawChunked(self):
# Tests chunked reads from the stream
class ChunkedReadStream(object):
def __init__(self, stream):
self.stream = stream
self.chunkSize = 3
def read(self, size=None):
toread = self.chunkSize
if size:
toread = min(size, toread)
return self.stream.read(toread)
m = envelope.Envelope()
s = StringIO.StringIO(testFrozenMessage1)
cs = ChunkedReadStream(s)
m.thawFromStream(cs.read, blocking=True)
self.failUnlessEqual(m.readPayload(), testPayload1)
# Same test, with chunked reads for thawing
s.seek(0)
while not m.thawFromStream(cs.read):
pass
self.failUnlessEqual(m.readPayload(10), testPayload1[:10])
# Same test, testing hasComplete*
s.seek(0)
m.reset()
leadSize = envelope.PLead.frozenSize
bytesRead = 0
while bytesRead < leadSize - cs.chunkSize:
m.thawFromStream(cs.read)
self.failIf(m.hasCompleteLead())
bytesRead += cs.chunkSize
m.thawFromStream(cs.read)
self.failUnless(m.hasCompleteLead())
self.failIf(m.hasCompleteHeader())
# Header size
hs = m.getHeaderSize()
bytesRead = 0
while bytesRead < hs - cs.chunkSize:
m.thawFromStream(cs.read)
self.failIf(m.hasCompleteHeader())
bytesRead += cs.chunkSize
m.thawFromStream(cs.read)
self.failUnless(m.hasCompleteHeader())
self.failIf(m.hasCompletePayload())
# Same reason for not starting with 0 as above
bytesRead = 0
while bytesRead < len(testPayload1) - cs.chunkSize:
m.thawFromStream(cs.read)
self.failIf(m.hasCompletePayload())
bytesRead += cs.chunkSize
m.thawFromStream(cs.read)
self.failUnless(m.hasCompletePayload())
def testMessageContentType(self):
m = envelope.Envelope()
m.setContentType("text/foobared")
stream = StringIO.StringIO()
m.freezeToStream(stream.write)
stream.seek(0)
m.thawFromStream(stream.read)
self.failUnlessEqual(m.getContentType(), "text/foobared")
def testMessageWrite(self):
m = envelope.Envelope()
ct = "application/binary-stuff"
m.setContentType(ct)
line1 = "First line\n"
line2 = "Second line\n"
m.write(line1)
m.write(line2)
data = m.freeze()
self.failUnlessEqual(data, testFrozenMessage2)
self.failUnlessEqual(m.getContentType(), ct)
def testMessageSeekTell(self):
m = envelope.Envelope()
ct = "application/binary-stuff"
m.setContentType(ct)
line1 = "First line\n"
line2 = "Second line\n"
m.write(line1)
self.failUnlessEqual(m.tell(), len(line1))
m.write(line2)
self.failUnlessEqual(m.tell(), len(line1) + len(line2))
m.seek(0)
self.failUnlessEqual(m.tell(), 0)
m.seek(0, 2)
self.failUnlessEqual(m.tell(), len(line1) + len(line2))
m.seek(0)
m.write(line2)
self.failUnlessEqual(m.tell(), len(line2))
# Truncate up to 15 bytes
m.truncate(15)
self.failUnlessEqual(m.tell(), 15)
# Seek to 10
m.seek(10)
m.truncate()
self.failUnlessEqual(m.tell(), 10)
def testSlowMessageRead(self):
m = envelope.Envelope()
ct = "application/binary-stuff"
m.setContentType(ct)
line1 = "First line\n"
line2 = "Second line\n"
m.write(line1)
m.write(line2)
data = m.freeze()
# read in bytes one at a time.
m = envelope.Envelope()
for i in range(0, len(data)):
stream = StringIO.StringIO()
stream.write(data[i])
stream.seek(0)
complete = m.thawFromStream(stream.read)
if complete:
break
assert(i == (len(data) - 1))
assert(complete)
assert(m.freeze() == data)
def testSlowMessageWrite(self):
m = envelope.Envelope()
ct = "application/binary-stuff"
m.setContentType(ct)
line1 = "First line\n"
line2 = "Second line\n"
m.write(line1)
m.write(line2)
data = m.freeze()
outStream = StringIO.StringIO()
def writeone(data):
outStream.write(data[0])
return 1
for i in range(0, len(data)):
complete = m.freezeToStream(writeone)
if complete:
break
assert(complete)
outStream.seek(0)
assert(outStream.read() == data)
def testWriteMessageToTwoSources(self):
# We need to be able to write the same message
# to two sources simultaneously.
# Having a writer object gives us a separate marker
# into the payload stream, etc.
m = envelope.Envelope()
ct = "application/binary-stuff"
m.setContentType(ct)
line1 = "First line\n"
line2 = "Second line\n"
m.write(line1)
m.write(line2)
writer1 = m.getWriter()
writer2 = m.getWriter()
data = m.freeze()
outStream1 = StringIO.StringIO()
outStream2 = StringIO.StringIO()
def writeone(data):
outStream1.write(data[0])
return 1
def writeone2(data):
outStream2.write(data[0])
return 1
writer1Complete = False
writer2Complete = False
while not writer1Complete and not writer2Complete:
writer1Complete = writer1(writeone)
writer2Complete = writer2(writeone2)
outStream1.seek(0)
assert(outStream1.read() == data)
outStream2.seek(0)
assert(outStream2.read() == data)
|
|
# Copyright 2015 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_log import log
from oslo_utils import uuidutils
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.db import api as db_api
from neutron.i18n import _LE
from neutron import ipam
from neutron.ipam import driver as ipam_base
from neutron.ipam.drivers.neutrondb_ipam import db_api as ipam_db_api
from neutron.ipam import exceptions as ipam_exc
from neutron.ipam import subnet_alloc
from neutron.ipam import utils as ipam_utils
from neutron import manager
LOG = log.getLogger(__name__)
class NeutronDbSubnet(ipam_base.Subnet):
"""Manage IP addresses for Neutron DB IPAM driver.
This class implements the strategy for IP address allocation and
deallocation for the Neutron DB IPAM driver.
Allocation for IP addresses is based on the concept of availability
ranges, which were already used in Neutron's DB base class for handling
IPAM operations.
"""
@classmethod
def create_allocation_pools(cls, subnet_manager, session, pools):
for pool in pools:
subnet_manager.create_pool(
session,
netaddr.IPAddress(pool.first).format(),
netaddr.IPAddress(pool.last).format())
@classmethod
def create_from_subnet_request(cls, subnet_request, ctx):
ipam_subnet_id = uuidutils.generate_uuid()
subnet_manager = ipam_db_api.IpamSubnetManager(
ipam_subnet_id,
None)
# Create subnet resource
session = ctx.session
subnet_manager.create(session)
# If allocation pools are not specified, define them around
# the subnet's gateway IP
if not subnet_request.allocation_pools:
pools = ipam_utils.generate_pools(subnet_request.subnet_cidr,
subnet_request.gateway_ip)
else:
pools = subnet_request.allocation_pools
# Create IPAM allocation pools and availability ranges
cls.create_allocation_pools(subnet_manager, session, pools)
return cls(ipam_subnet_id,
ctx,
cidr=subnet_request.subnet_cidr,
allocation_pools=pools,
gateway_ip=subnet_request.gateway_ip,
tenant_id=subnet_request.tenant_id,
subnet_id=subnet_request.subnet_id,
subnet_id_not_set=True)
@classmethod
def load(cls, neutron_subnet_id, ctx):
"""Load an IPAM subnet from the database given its neutron ID.
:param neutron_subnet_id: neutron subnet identifier.
"""
ipam_subnet = ipam_db_api.IpamSubnetManager.load_by_neutron_subnet_id(
ctx.session, neutron_subnet_id)
if not ipam_subnet:
LOG.error(_LE("Unable to retrieve IPAM subnet as the referenced "
"Neutron subnet %s does not exist"),
neutron_subnet_id)
raise n_exc.SubnetNotFound(subnet_id=neutron_subnet_id)
pools = []
for pool in ipam_subnet.allocation_pools:
pools.append(netaddr.IPRange(pool['first_ip'], pool['last_ip']))
neutron_subnet = cls._fetch_subnet(ctx, neutron_subnet_id)
return cls(ipam_subnet['id'],
ctx,
cidr=neutron_subnet['cidr'],
allocation_pools=pools,
gateway_ip=neutron_subnet['gateway_ip'],
tenant_id=neutron_subnet['tenant_id'],
subnet_id=neutron_subnet_id)
@classmethod
def _fetch_subnet(cls, context, id):
plugin = manager.NeutronManager.get_plugin()
return plugin._get_subnet(context, id)
def __init__(self, internal_id, ctx, cidr=None,
allocation_pools=None, gateway_ip=None, tenant_id=None,
subnet_id=None, subnet_id_not_set=False):
# NOTE: In theory it could have been possible to grant the IPAM
# driver direct access to the database. While this is possible,
# it would have led to duplicate code and/or non-trivial
# refactorings in neutron.db.db_base_plugin_v2.
# This is because in the Neutron V2 plugin logic DB management is
# encapsulated within the plugin.
self._cidr = cidr
self._pools = allocation_pools
self._gateway_ip = gateway_ip
self._tenant_id = tenant_id
self._subnet_id = None if subnet_id_not_set else subnet_id
self.subnet_manager = ipam_db_api.IpamSubnetManager(internal_id,
self._subnet_id)
self._context = ctx
def _verify_ip(self, session, ip_address):
"""Verify whether IP address can be allocated on subnet.
:param session: database session
:param ip_address: String representing the IP address to verify
:raises: InvalidInput, IpAddressAlreadyAllocated
"""
# Ensure that the IP's are unique
if not self.subnet_manager.check_unique_allocation(session,
ip_address):
raise ipam_exc.IpAddressAlreadyAllocated(
subnet_id=self.subnet_manager.neutron_id,
ip=ip_address)
# Ensure that the IP is valid on the subnet
if not ipam_utils.check_subnet_ip(self._cidr, ip_address):
raise ipam_exc.InvalidIpForSubnet(
subnet_id=self.subnet_manager.neutron_id,
ip=ip_address)
def _allocate_specific_ip(self, session, ip_address,
allocation_pool_id=None):
"""Remove an IP address from subnet's availability ranges.
This method is supposed to be called from within a database
transaction, otherwise atomicity and integrity might not be
enforced and the operation might result in incosistent availability
ranges for the subnet.
:param session: database session
:param ip_address: ip address to mark as allocated
:param allocation_pool_id: identifier of the allocation pool from
which the ip address has been extracted. If not specified this
routine will scan all allocation pools.
:returns: list of IP ranges as instances of IPAvailabilityRange
"""
# Return immediately for EUI-64 addresses. For this
# class of subnets availability ranges do not apply
if ipv6_utils.is_eui64_address(ip_address):
return
LOG.debug("Removing %(ip_address)s from availability ranges for "
"subnet id:%(subnet_id)s",
{'ip_address': ip_address,
'subnet_id': self.subnet_manager.neutron_id})
# Netaddr's IPRange and IPSet objects work very well even with very
# large subnets, including IPv6 ones.
final_ranges = []
if allocation_pool_id:
av_ranges = self.subnet_manager.list_ranges_by_allocation_pool(
session, allocation_pool_id, locking=True)
else:
av_ranges = self.subnet_manager.list_ranges_by_subnet_id(
session, locking=True)
for db_range in av_ranges:
initial_ip_set = netaddr.IPSet(netaddr.IPRange(
db_range['first_ip'], db_range['last_ip']))
final_ip_set = initial_ip_set - netaddr.IPSet([ip_address])
if not final_ip_set:
# Range exhausted - bye bye
session.delete(db_range)
continue
if initial_ip_set == final_ip_set:
# IP address does not fall within the current range, move
# to the next one
final_ranges.append(db_range)
continue
for new_range in final_ip_set.iter_ipranges():
# store new range in database
# use netaddr.IPAddress format() method which is equivalent
# to str(...) but also enables us to use different
# representation formats (if needed) for IPv6.
first_ip = netaddr.IPAddress(new_range.first)
last_ip = netaddr.IPAddress(new_range.last)
if (db_range['first_ip'] == first_ip.format() or
db_range['last_ip'] == last_ip.format()):
db_range['first_ip'] = first_ip.format()
db_range['last_ip'] = last_ip.format()
LOG.debug("Adjusted availability range for pool %s",
db_range['allocation_pool_id'])
final_ranges.append(db_range)
else:
new_ip_range = self.subnet_manager.create_range(
session,
db_range['allocation_pool_id'],
first_ip.format(),
last_ip.format())
LOG.debug("Created availability range for pool %s",
new_ip_range['allocation_pool_id'])
final_ranges.append(new_ip_range)
# Most callers might ignore this return value, which is however
# useful for testing purposes
LOG.debug("Availability ranges for subnet id %(subnet_id)s "
"modified: %(new_ranges)s",
{'subnet_id': self.subnet_manager.neutron_id,
'new_ranges': ", ".join(["[%s; %s]" %
(r['first_ip'], r['last_ip']) for
r in final_ranges])})
return final_ranges
def _rebuild_availability_ranges(self, session):
"""Rebuild availability ranges.
This method should be called only when the availability ranges are
exhausted or when the subnet's allocation pools are updated,
which may trigger a deletion of the availability ranges.
For this operation to complete successfully, this method uses a
locking query to ensure that no IP is allocated while the regeneration
of availability ranges is in progress.
:param session: database session
"""
# List all currently allocated addresses, and prevent further
# allocations with a write-intent lock.
# NOTE: because of this driver's logic the write intent lock is
# probably unnecessary as this routine is called when the availability
# ranges for a subnet are exhausted and no further address can be
# allocated.
# TODO(salv-orlando): devise, if possible, a more efficient solution
# for building the IPSet to ensure decent performances even with very
# large subnets.
allocations = netaddr.IPSet(
[netaddr.IPAddress(allocation['ip_address']) for
allocation in self.subnet_manager.list_allocations(
session, locking=True)])
# MEH MEH
# There should be no need to set a write intent lock on the allocation
# pool table. Indeed it is not important for the correctness of this
# operation if the allocation pools are updated by another operation,
# which will result in the generation of new availability ranges.
# NOTE: it might be argued that an allocation pool update should in
# theory preempt rebuilding the availability range. This is an option
# to consider for future developments.
LOG.debug("Rebuilding availability ranges for subnet %s",
self.subnet_manager.neutron_id)
for pool in self.subnet_manager.list_pools(session):
# Create a set of all addresses in the pool
poolset = netaddr.IPSet(netaddr.IPRange(pool['first_ip'],
pool['last_ip']))
# Use set difference to find free addresses in the pool
available = poolset - allocations
# Write the ranges to the db
for ip_range in available.iter_ipranges():
av_range = self.subnet_manager.create_range(
session,
pool['id'],
netaddr.IPAddress(ip_range.first).format(),
netaddr.IPAddress(ip_range.last).format())
session.add(av_range)
def _generate_ip(self, session):
try:
return self._try_generate_ip(session)
except ipam_exc.IpAddressGenerationFailure:
self._rebuild_availability_ranges(session)
return self._try_generate_ip(session)
def _try_generate_ip(self, session):
"""Generate an IP address from availability ranges."""
ip_range = self.subnet_manager.get_first_range(session, locking=True)
if not ip_range:
LOG.debug("All IPs from subnet %(subnet_id)s allocated",
{'subnet_id': self.subnet_manager.neutron_id})
raise ipam_exc.IpAddressGenerationFailure(
subnet_id=self.subnet_manager.neutron_id)
# A suitable range was found. Return IP address.
ip_address = ip_range['first_ip']
LOG.debug("Allocated IP - %(ip_address)s from range "
"[%(first_ip)s; %(last_ip)s]",
{'ip_address': ip_address,
'first_ip': ip_address,
'last_ip': ip_range['last_ip']})
return ip_address, ip_range['allocation_pool_id']
def allocate(self, address_request):
# NOTE(salv-orlando): Creating a new db session might be a rather
# dangerous thing to do, if executed from within another database
# transaction. Therefore the IPAM driver should never be
# called from within a database transaction, which is also good
# practice since in the general case these drivers may interact
# with remote backends
session = self._context.session
all_pool_id = None
# NOTE(salv-orlando): It would probably better to have a simpler
# model for address requests and just check whether there is a
# specific IP address specified in address_request
if isinstance(address_request, ipam.SpecificAddressRequest):
# This handles both specific and automatic address requests
# Check availability of requested IP
ip_address = str(address_request.address)
self._verify_ip(session, ip_address)
else:
ip_address, all_pool_id = self._generate_ip(session)
self._allocate_specific_ip(session, ip_address, all_pool_id)
# Create IP allocation request object
# The only defined status at this stage is 'ALLOCATED'.
# More states will be available in the future - e.g.: RECYCLABLE
self.subnet_manager.create_allocation(session, ip_address)
return ip_address
def deallocate(self, address):
# This is almost a no-op because the Neutron DB IPAM driver does not
# delete IPAllocation objects, neither rebuilds availability ranges
# at every deallocation. The only operation it performs is to delete
# an IPRequest entry.
session = self._context.session
count = self.subnet_manager.delete_allocation(
session, address)
# count can hardly be greater than 1, but it can be 0...
if not count:
raise ipam_exc.IpAddressAllocationNotFound(
subnet_id=self.subnet_manager.neutron_id,
ip_address=address)
def update_allocation_pools(self, pools):
# Pools have already been validated in the subnet request object which
# was sent to the subnet pool driver. Further validation should not be
# required.
session = db_api.get_session()
self.subnet_manager.delete_allocation_pools(session)
self.create_allocation_pools(self.subnet_manager, session, pools)
self._pools = pools
def get_details(self):
"""Return subnet data as a SpecificSubnetRequest"""
return ipam.SpecificSubnetRequest(
self._tenant_id, self.subnet_manager.neutron_id,
self._cidr, self._gateway_ip, self._pools)
def associate_neutron_subnet(self, subnet_id):
"""Set neutron identifier for this subnet"""
session = self._context.session
if self._subnet_id:
raise
# IPAMSubnet does not have foreign key to Subnet,
# so need verify subnet existence.
NeutronDbSubnet._fetch_subnet(self._context, subnet_id)
self.subnet_manager.associate_neutron_id(session, subnet_id)
self._subnet_id = subnet_id
class NeutronDbPool(subnet_alloc.SubnetAllocator):
"""Subnet pools backed by Neutron Database.
As this driver does not implement yet the subnet pool concept, most
operations are either trivial or no-ops.
"""
def get_subnet(self, subnet_id):
"""Retrieve an IPAM subnet.
:param subnet_id: Neutron subnet identifier
:returns: a NeutronDbSubnet instance
"""
return NeutronDbSubnet.load(subnet_id, self._context)
def allocate_subnet(self, subnet_request):
"""Create an IPAMSubnet object for the provided cidr.
This method does not actually do any operation in the driver, given
its simplified nature.
:param cidr: subnet's CIDR
:returns: a NeutronDbSubnet instance
"""
if self._subnetpool:
subnet = super(NeutronDbPool, self).allocate_subnet(subnet_request)
subnet_request = subnet.get_details()
# SubnetRequest must be an instance of SpecificSubnet
if not isinstance(subnet_request, ipam.SpecificSubnetRequest):
raise ipam_exc.InvalidSubnetRequestType(
subnet_type=type(subnet_request))
return NeutronDbSubnet.create_from_subnet_request(subnet_request,
self._context)
def update_subnet(self, subnet_request):
"""Update subnet info the in the IPAM driver.
The only update subnet information the driver needs to be aware of
are allocation pools.
"""
if not subnet_request.subnet_id:
raise ipam_exc.InvalidSubnetRequest(
reason=("An identifier must be specified when updating "
"a subnet"))
if not subnet_request.allocation_pools:
LOG.debug("Update subnet request for subnet %s did not specify "
"new allocation pools, there is nothing to do",
subnet_request.subnet_id)
return
subnet = NeutronDbSubnet.load(subnet_request.subnet_id, self._context)
subnet.update_allocation_pools(subnet_request.allocation_pools)
return subnet
def remove_subnet(self, subnet):
"""Remove data structures for a given subnet.
All the IPAM-related data are cleared when a subnet is deleted thanks
to cascaded foreign key relationships.
"""
pass
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Mapping, Set, AsyncIterator, Callable
from uuid import uuid4
from google.api_core.client_options import ClientOptions
from google.auth.credentials import Credentials
from google.cloud.pubsublite.cloudpubsub.reassignment_handler import (
ReassignmentHandler,
DefaultReassignmentHandler,
)
from google.cloud.pubsublite.cloudpubsub.message_transforms import (
to_cps_subscribe_message,
add_id_to_cps_subscribe_transformer,
)
from google.cloud.pubsublite.internal.wire.client_cache import ClientCache
from google.cloud.pubsublite.types import FlowControlSettings
from google.cloud.pubsublite.cloudpubsub.internal.ack_set_tracker_impl import (
AckSetTrackerImpl,
)
from google.cloud.pubsublite.cloudpubsub.internal.assigning_subscriber import (
PartitionSubscriberFactory,
AssigningSingleSubscriber,
)
from google.cloud.pubsublite.cloudpubsub.internal.single_partition_subscriber import (
SinglePartitionSingleSubscriber,
)
from google.cloud.pubsublite.cloudpubsub.message_transformer import MessageTransformer
from google.cloud.pubsublite.cloudpubsub.nack_handler import (
NackHandler,
DefaultNackHandler,
)
from google.cloud.pubsublite.cloudpubsub.internal.single_subscriber import (
AsyncSingleSubscriber,
)
from google.cloud.pubsublite.internal.endpoints import regional_endpoint
from google.cloud.pubsublite.internal.wire.assigner import Assigner
from google.cloud.pubsublite.internal.wire.assigner_impl import AssignerImpl
from google.cloud.pubsublite.internal.wire.committer_impl import CommitterImpl
from google.cloud.pubsublite.internal.wire.fixed_set_assigner import FixedSetAssigner
from google.cloud.pubsublite.internal.wire.gapic_connection import (
GapicConnectionFactory,
)
from google.cloud.pubsublite.internal.wire.merge_metadata import merge_metadata
from google.cloud.pubsublite.internal.wire.pubsub_context import pubsub_context
import google.cloud.pubsublite.internal.wire.subscriber_impl as wire_subscriber
from google.cloud.pubsublite.internal.wire.subscriber_reset_handler import (
SubscriberResetHandler,
)
from google.cloud.pubsublite.types import Partition, SubscriptionPath
from google.cloud.pubsublite.internal.routing_metadata import (
subscription_routing_metadata,
)
from google.cloud.pubsublite_v1 import (
SubscribeRequest,
InitialSubscribeRequest,
StreamingCommitCursorRequest,
PartitionAssignmentRequest,
InitialPartitionAssignmentRequest,
InitialCommitCursorRequest,
)
from google.cloud.pubsublite_v1.services.subscriber_service.async_client import (
SubscriberServiceAsyncClient,
)
from google.cloud.pubsublite_v1.services.partition_assignment_service.async_client import (
PartitionAssignmentServiceAsyncClient,
)
from google.cloud.pubsublite_v1.services.cursor_service.async_client import (
CursorServiceAsyncClient,
)
_DEFAULT_FLUSH_SECONDS = 0.1
def _make_dynamic_assigner(
subscription: SubscriptionPath,
transport: str,
client_options: ClientOptions,
credentials: Optional[Credentials],
base_metadata: Optional[Mapping[str, str]],
) -> Assigner:
if base_metadata is None:
base_metadata = {}
def assignment_connection_factory(
requests: AsyncIterator[PartitionAssignmentRequest],
):
assignment_client = PartitionAssignmentServiceAsyncClient(credentials=credentials, transport=transport, client_options=client_options) # type: ignore
return assignment_client.assign_partitions(
requests, metadata=list(base_metadata.items())
)
return AssignerImpl(
InitialPartitionAssignmentRequest(
subscription=str(subscription), client_id=uuid4().bytes
),
GapicConnectionFactory(assignment_connection_factory),
)
def _make_partition_subscriber_factory(
subscription: SubscriptionPath,
transport: str,
client_options: ClientOptions,
credentials: Optional[Credentials],
base_metadata: Optional[Mapping[str, str]],
flow_control_settings: FlowControlSettings,
nack_handler: NackHandler,
message_transformer: MessageTransformer,
) -> PartitionSubscriberFactory:
subscribe_client_cache = ClientCache(
lambda: SubscriberServiceAsyncClient(
credentials=credentials, transport=transport, client_options=client_options
)
)
cursor_client_cache = ClientCache(
lambda: CursorServiceAsyncClient(
credentials=credentials, transport=transport, client_options=client_options
)
)
def factory(partition: Partition) -> AsyncSingleSubscriber:
final_metadata = merge_metadata(
base_metadata, subscription_routing_metadata(subscription, partition)
)
def subscribe_connection_factory(requests: AsyncIterator[SubscribeRequest]):
return subscribe_client_cache.get().subscribe(
requests, metadata=list(final_metadata.items())
)
def cursor_connection_factory(
requests: AsyncIterator[StreamingCommitCursorRequest],
):
return cursor_client_cache.get().streaming_commit_cursor(
requests, metadata=list(final_metadata.items())
)
def subscriber_factory(reset_handler: SubscriberResetHandler):
return wire_subscriber.SubscriberImpl(
InitialSubscribeRequest(
subscription=str(subscription), partition=partition.value
),
_DEFAULT_FLUSH_SECONDS,
GapicConnectionFactory(subscribe_connection_factory),
reset_handler,
)
committer = CommitterImpl(
InitialCommitCursorRequest(
subscription=str(subscription), partition=partition.value
),
_DEFAULT_FLUSH_SECONDS,
GapicConnectionFactory(cursor_connection_factory),
)
ack_set_tracker = AckSetTrackerImpl(committer)
return SinglePartitionSingleSubscriber(
subscriber_factory,
flow_control_settings,
ack_set_tracker,
nack_handler,
add_id_to_cps_subscribe_transformer(partition, message_transformer),
)
return factory
def make_async_subscriber(
subscription: SubscriptionPath,
transport: str,
per_partition_flow_control_settings: FlowControlSettings,
nack_handler: Optional[NackHandler] = None,
reassignment_handler: Optional[ReassignmentHandler] = None,
message_transformer: Optional[MessageTransformer] = None,
fixed_partitions: Optional[Set[Partition]] = None,
credentials: Optional[Credentials] = None,
client_options: Optional[ClientOptions] = None,
metadata: Optional[Mapping[str, str]] = None,
) -> AsyncSingleSubscriber:
"""
Make a Pub/Sub Lite AsyncSubscriber.
Args:
subscription: The subscription to subscribe to.
transport: The transport type to use.
per_partition_flow_control_settings: The flow control settings for each partition subscribed to. Note that these
settings apply to each partition individually, not in aggregate.
nack_handler: An optional handler for when nack() is called on a Message. The default will fail the client.
message_transformer: An optional transformer from Pub/Sub Lite messages to Cloud Pub/Sub messages.
fixed_partitions: A fixed set of partitions to subscribe to. If not present, will instead use auto-assignment.
credentials: The credentials to use to connect. GOOGLE_DEFAULT_CREDENTIALS is used if None.
client_options: Other options to pass to the client. Note that if you pass any you must set api_endpoint.
metadata: Additional metadata to send with the RPC.
Returns:
A new AsyncSubscriber.
"""
metadata = merge_metadata(pubsub_context(framework="CLOUD_PUBSUB_SHIM"), metadata)
if client_options is None:
client_options = ClientOptions(
api_endpoint=regional_endpoint(subscription.location.region)
)
assigner_factory: Callable[[], Assigner]
if fixed_partitions:
assigner_factory = lambda: FixedSetAssigner(fixed_partitions) # noqa: E731
else:
assigner_factory = lambda: _make_dynamic_assigner( # noqa: E731
subscription, transport, client_options, credentials, metadata,
)
if nack_handler is None:
nack_handler = DefaultNackHandler()
if reassignment_handler is None:
reassignment_handler = DefaultReassignmentHandler()
if message_transformer is None:
message_transformer = MessageTransformer.of_callable(to_cps_subscribe_message)
partition_subscriber_factory = _make_partition_subscriber_factory(
subscription,
transport,
client_options,
credentials,
metadata,
per_partition_flow_control_settings,
nack_handler,
message_transformer,
)
return AssigningSingleSubscriber(
assigner_factory, partition_subscriber_factory, reassignment_handler
)
|
|
"""Listens to Treadmill cleanup events.
When a treadmill app needs to be cleaned up then there will exist a symlink
to the app in the cleanup directory. A cleanup app will be created to do
the cleanup work necessary:
<treadmillroot>/
cleanup/
<instance>
cleaning/
<instance> -> ../cleanup_apps/<instance>
cleanup_apps/
<instance>
Treadmill runs svscan process pointing to 'cleaning' scan directory.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import glob
import logging
import os
import shutil
import time
from treadmill import dirwatch
from treadmill import fs
from treadmill import logcontext as lc
from treadmill import runtime as app_runtime
from treadmill import subproc
from treadmill import supervisor
_LOGGER = logging.getLogger(__name__)
# FIXME: This extremely high timeout value comes from the fact that we
# have a very high watchdog value in runtime.
_WATCHDOG_HEARTBEAT_SEC = 5 * 60
# Maximum number of cleanup request to process per cycle. Be careful of
# watchdog timeouts when increasing this value.
_MAX_REQUEST_PER_CYCLE = 1
_SERVICE_NAME = 'Cleanup'
def _islink(path):
"""
A local function wrap os.path.islink.
"""
# CAVEAT : Coverage uses islink internally to determine the scope to trace.
# While mocking islink in unit test, coverage uses moocked islink which may
# potentially cause StopIterationException.
return os.path.islink(path)
class Cleanup:
"""Orchestrate the cleanup of apps which are scheduled to be stopped and/or
removed.
"""
__slots__ = (
'tm_env',
)
def __init__(self, tm_env):
self.tm_env = tm_env
def _refresh_supervisor(self):
"""Notify the supervisor of new cleanup instances.
"""
_LOGGER.info('Refreshing svscan')
supervisor.control_svscan(self.tm_env.cleaning_dir, (
supervisor.SvscanControlAction.alarm,
supervisor.SvscanControlAction.nuke
))
def _add_cleanup_app(self, path):
"""Configure a new cleanup app.
"""
name = os.path.basename(path)
if name.startswith('.'):
_LOGGER.warning('Ignore %s', name)
return
cleaning_link = os.path.join(self.tm_env.cleaning_dir, name)
if _islink(cleaning_link):
_LOGGER.warning('Cleaning app already configured %s', name)
return
cleanup_link = os.path.join(self.tm_env.cleanup_dir, name)
if not _islink(cleanup_link):
_LOGGER.info('Ignore - not a link: %s', cleanup_link)
return
_LOGGER.info('Configure cleaning app: %s', name)
bin_name = 'scripts' if os.name == 'nt' else 'bin'
command = (
'{treadmill}/{bin}/treadmill sproc cleanup instance'
' --approot {tm_root}'
' {instance}'
).format(
treadmill=subproc.resolve('treadmill'),
bin=bin_name,
tm_root=self.tm_env.root,
instance=name
)
if os.name == 'posix':
command = 'exec ' + command
supervisor.create_service(
self.tm_env.cleanup_apps_dir,
name=name,
app_run_script=command,
userid='root',
monitor_policy={
'limit': 5,
'interval': 60,
'tombstone': {
'path': self.tm_env.cleanup_tombstone_dir,
'id': name,
},
'skip_path': os.path.join(self.tm_env.cleanup_dir, name)
},
log_run_script=None,
)
fs.symlink_safe(
cleaning_link,
os.path.join(self.tm_env.cleanup_apps_dir, name)
)
_LOGGER.debug('Cleanup app %s ready', name)
self._refresh_supervisor()
def _remove_cleanup_app(self, path):
"""Stop and remove a cleanup app.
"""
name = os.path.basename(path)
if name.startswith('.'):
_LOGGER.warning('Ignore %s', name)
return
cleaning_link = os.path.join(self.tm_env.cleaning_dir, name)
app_path = os.path.join(self.tm_env.cleanup_apps_dir, name)
_LOGGER.info('Removing cleanup app %s -> %s', cleaning_link, app_path)
if os.path.exists(cleaning_link):
_LOGGER.debug('Removing cleanup link %s', cleaning_link)
fs.rm_safe(cleaning_link)
self._refresh_supervisor()
_LOGGER.debug('Waiting on %s not being supervised', app_path)
supervisor.ensure_not_supervised(app_path)
else:
_LOGGER.debug('Cleanup link %s does not exist', cleaning_link)
_LOGGER.debug('Removing app directory %s', app_path)
fs.rmtree_safe(app_path)
def invoke(self, runtime, instance, runtime_param=None):
"""Actually do the cleanup of the instance.
"""
cleanup_link = os.path.join(self.tm_env.cleanup_dir, instance)
container_dir = os.readlink(cleanup_link)
_LOGGER.info('Cleanup: %s => %s', instance, container_dir)
if os.path.exists(container_dir):
with lc.LogContext(_LOGGER, os.path.basename(container_dir),
lc.ContainerAdapter) as log:
try:
app_runtime.get_runtime(
runtime, self.tm_env, container_dir, runtime_param
).finish()
except supervisor.InvalidServiceDirError:
log.info('Container dir is invalid, removing: %s',
container_dir)
shutil.rmtree(container_dir)
except Exception: # pylint: disable=W0703
if not os.path.exists(container_dir):
log.info('Container dir does not exist: %s',
container_dir)
else:
log.exception('Fatal error running finish %r.',
container_dir)
raise
else:
_LOGGER.info('Container dir does not exist: %r', container_dir)
fs.rm_safe(cleanup_link)
def _sync(self):
"""Synchronize cleanup to cleaning.
"""
cleanup_list = [
os.path.basename(filename)
for filename in glob.glob(os.path.join(self.tm_env.cleanup_dir,
'*'))
]
cleanup_apps = {
os.path.basename(filename)
for filename in glob.glob(
os.path.join(self.tm_env.cleanup_apps_dir, '*')
)
}
for instance in cleanup_list:
self._add_cleanup_app(instance)
cleanup_apps.discard(instance)
for instance in cleanup_apps:
self._remove_cleanup_app(instance)
def run(self):
"""Setup directories' watches and start the re-scan ticker.
"""
# Setup the watchdog
watchdog_lease = self.tm_env.watchdogs.create(
name='svc-{svc_name}'.format(svc_name=_SERVICE_NAME),
timeout='{hb:d}s'.format(hb=_WATCHDOG_HEARTBEAT_SEC),
content='Service {svc_name!r} failed'.format(
svc_name=_SERVICE_NAME),
)
# Wait on svscan starting up first to avoid race conditions with
# refreshing it later.
while True:
try:
self._refresh_supervisor()
_LOGGER.info('svscan is running.')
break
except subproc.CalledProcessError:
_LOGGER.info('Waiting on svscan running.')
time.sleep(0.2)
watcher = dirwatch.DirWatcher(self.tm_env.cleanup_dir)
watcher.on_created = self._add_cleanup_app
watcher.on_deleted = self._remove_cleanup_app
self._sync()
loop_timeout = _WATCHDOG_HEARTBEAT_SEC // 2
while True:
if watcher.wait_for_events(timeout=loop_timeout):
watcher.process_events(max_events=_MAX_REQUEST_PER_CYCLE)
# Heartbeat
watchdog_lease.heartbeat()
_LOGGER.info('Cleanup service shutdown.')
watchdog_lease.remove()
|
|
import requests, json, datetime, string, pytz
from django.conf import settings
from broLogTypes import broLogs
import logging
logger = logging.getLogger('elasticsearch_requests')
def getIndices():
"""Get a list of all bro indices
"""
result = Request(index="@bro-meta")._doRequest({"size": 65535})
indices = []
for hit in result["hits"]["hits"]:
if hit["es_source"]["name"].startswith(settings.ELASTICSEARCH_INDEX_PREFIX):
indices.append(hit["es_source"])
return indices
def indexNameToDatetime(indexName):
"""Convert a bro-201208121900 style-name to a datetime object.
"""
if indexName.startswith(settings.ELASTICSEARCH_INDEX_PREFIX) and not indexName.startswith(settings.ELASTICSEARCH_INDEX_PREFIX + "-"):
return pytz.timezone(settings.TIME_ZONE).localize(datetime.datetime.now())
indexTime = datetime.datetime.strptime(indexName.replace(settings.ELASTICSEARCH_INDEX_PREFIX + "-", ""), "%Y%m%d%H%M")
return pytz.timezone(settings.TIME_ZONE).localize(indexTime)
def indicesFromTime(startTime, indices):
"""Create a comma-separated list of the indices one needs to query for the given time window.
"""
endTime=pytz.timezone(settings.TIME_ZONE).localize(datetime.datetime.now())
if startTime == "all":
return [index["name"] for index in indices]
else:
number = ""
unit = ""
for i in range(len(startTime)):
if startTime[i] in string.ascii_letters:
unit = startTime[i:]
try:
number = int(number)
except:
raise ValueError("Format of time: 1m, 2days, etc.")
break
elif startTime[i] in string.whitespace:
continue
elif startTime[i] in string.digits:
number += startTime[i]
else:
raise ValueError("Format of time: 1m, 2days, etc.")
if not number or not unit or number < 1:
raise ValueError("Format of time: 1m, 2days, etc.")
units = {"day": ["day", "days", "d"],
"hour": ["hour", "hours", "h"],
"minute": ["minute", "minutes", "m"],
"second": ["second", "seconds", "s"]}
if unit in units["day"]:
then = endTime - datetime.timedelta(days=number)
elif unit in units["hour"]:
then = endTime - datetime.timedelta(hours=number)
elif unit in units["minute"]:
then = endTime - datetime.timedelta(minutes=number)
elif unit in units["second"]:
then = endTime - datetime.timedelta(seconds=number)
else:
raise ValueError("Possible time units: " + units.keys())
chosenIndices = []
for index in indices:
indexStart = pytz.utc.localize(datetime.datetime.utcfromtimestamp(index["start"]))
indexEnd = pytz.utc.localize(datetime.datetime.utcfromtimestamp(index["end"]))
if ( ( indexStart >= endTime ) ^ ( indexEnd >= then ) ):
chosenIndices.append(index["name"])
return chosenIndices
def queryEscape(query):
"""Certain chars need to be escaped
"""
bad_chars = [ (""", '"')
]
for char, replacement in bad_chars:
query = query.replace(char, replacement)
return query
def getCounts(query, index="_all", type=None):
"""Using a facet of types, return dict of type and count.
"""
hits = []
data = {"query":
{"constant_score":
{"filter":
{"query":
{"query_string": {"query": query}}}}},
"facets": {"term": {"terms": {"field": "_type", "size": 50, "order": "term"}}},
"size": 0
}
result = Request(index=index, type=type)._doRequest(data=data)
for i in result["facets"]["term"]["terms"]:
count, type = i.itervalues()
if type not in settings.ELASTICSEARCH_IGNORE_TYPES:
hits.append({"type": type, "total": count})
return hits
def doQuery(query, index="_all", sort=None, type=None, start=0, facets={}, size=settings.PAGE_SIZE):
"""Short wrapper for simple queries.
"""
if not sort:
sort = {"ts": {"order": "DESC"}}
data = {"query":
{"constant_score":
{"filter":
{"query":
{"query_string": {"query": query}}}}},
"size": size,
"from": start,
"sort": sort,
"facets": facets,
}
result = Request(index=index, type=type)._doRequest(data=data)
return result
def resultToTable(result, type):
"""Convert JSON result to a dict for use in HTML table template.
"""
header = [(field.name, field.type, field.description) for field in broLogs[type] if field.name not in settings.ELASTICSEARCH_IGNORE_COLUMNS.get(type, [])]
content = []
if type in settings.ELASTICSEARCH_IGNORE_TYPES:
return {}
if "hits" not in result.keys():
return {}
if "hits" not in result["hits"].keys():
return {}
if len(result["hits"]["hits"]) == 0:
return {}
for hit in result["hits"]["hits"]:
row = []
for column, fType, desc in header:
row.append((column, fType, hit["es_source"].get(column, "")))
content.append(row)
if len(hit["es_source"].keys()) > len(row):
assert "WARNING: Some fields weren't properly accounted for."
assert "Type: %s;\nKnown fields: %s.\nRecvd fields: %s." % (type, hit["es_source"].keys(), [x[0] for x in row])
logger.debug(content)
return {"header": header, "content": content, "took": result["took"]}
def getHealth():
"""Return cluster health information.
"""
health = Request(index="_cluster")._doRequest(operation="health", verb="GET")
return health
def getShardInfo():
"""Return shard health information.
"""
shards = Request()._doRequest(operation="_stats", verb="GET", search_opts="clear=true")
return shards
def getNodeInfo():
"""Return node health information.
"""
nodes = Request(index="_nodes")._doRequest(operation="stats", verb="GET", search_opts="clear=true&os=true&fs=true")
return nodes['nodes']
class Request(object):
"""A single request to ElasticSearch
"""
def __init__(self, index="_all", type=None, url=settings.ELASTICSEARCH_SERVER):
path = "http://%s/" % url
if index:
path += index + "/"
if type:
path += type + "/"
self.path = path
self.data = {}
self.requests_config = {"max_retries": 0}
def _doRequest(self, data=None, operation="_search", search_opts="", verb="POST"):
if data:
self.data = dict(self.data.items() + data.items())
if verb == "POST":
logger.debug("POST " + self.path + operation + "?" + search_opts)
logger.debug(" " + json.dumps(self.data))
result = requests.post(self.path + operation + "?" + search_opts, data=json.dumps(self.data), config=self.requests_config).text
else:
logger.debug("GET " + self.path + operation + "?" + search_opts)
result = requests.get(self.path + operation + "?" + search_opts, config=self.requests_config).text
# ElasticSearch internal fields are prefixed with _. This causes some issues w/ Django, so we prefix with es_ instead.
self.result = json.loads(result.replace('"_', '"es_'))
if "error" in self.result.keys():
raise IOError(self.result["error"])
return self.result
queryAll = lambda self: self._doRequest({"size": settings.PAGE_SIZE})
query = lambda self, query: self._doRequest({"query": query, "size": settings.PAGE_SIZE})
|
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'SeverityEnum' : _MetaInfoEnum('SeverityEnum', 'ydk.models.ietf.ietf_syslog_types',
{
'emergency':'emergency',
'alert':'alert',
'critical':'critical',
'error':'error',
'warning':'warning',
'notice':'notice',
'info':'info',
'debug':'debug',
}, 'ietf-syslog-types', _yang_ns._namespaces['ietf-syslog-types']),
'SyslogFacilityIdentity' : {
'meta_info' : _MetaInfoClass('SyslogFacilityIdentity',
False,
[
],
'ietf-syslog-types',
'syslog-facility',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'Local3Identity' : {
'meta_info' : _MetaInfoClass('Local3Identity',
False,
[
],
'ietf-syslog-types',
'local3',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'DaemonIdentity' : {
'meta_info' : _MetaInfoClass('DaemonIdentity',
False,
[
],
'ietf-syslog-types',
'daemon',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'Local0Identity' : {
'meta_info' : _MetaInfoClass('Local0Identity',
False,
[
],
'ietf-syslog-types',
'local0',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'NtpIdentity' : {
'meta_info' : _MetaInfoClass('NtpIdentity',
False,
[
],
'ietf-syslog-types',
'ntp',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'CronIdentity' : {
'meta_info' : _MetaInfoClass('CronIdentity',
False,
[
],
'ietf-syslog-types',
'cron',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'AuditIdentity' : {
'meta_info' : _MetaInfoClass('AuditIdentity',
False,
[
],
'ietf-syslog-types',
'audit',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'KernIdentity' : {
'meta_info' : _MetaInfoClass('KernIdentity',
False,
[
],
'ietf-syslog-types',
'kern',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'Local4Identity' : {
'meta_info' : _MetaInfoClass('Local4Identity',
False,
[
],
'ietf-syslog-types',
'local4',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'MailIdentity' : {
'meta_info' : _MetaInfoClass('MailIdentity',
False,
[
],
'ietf-syslog-types',
'mail',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'UserIdentity' : {
'meta_info' : _MetaInfoClass('UserIdentity',
False,
[
],
'ietf-syslog-types',
'user',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'FtpIdentity' : {
'meta_info' : _MetaInfoClass('FtpIdentity',
False,
[
],
'ietf-syslog-types',
'ftp',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'Local6Identity' : {
'meta_info' : _MetaInfoClass('Local6Identity',
False,
[
],
'ietf-syslog-types',
'local6',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'ConsoleIdentity' : {
'meta_info' : _MetaInfoClass('ConsoleIdentity',
False,
[
],
'ietf-syslog-types',
'console',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'LprIdentity' : {
'meta_info' : _MetaInfoClass('LprIdentity',
False,
[
],
'ietf-syslog-types',
'lpr',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'Local1Identity' : {
'meta_info' : _MetaInfoClass('Local1Identity',
False,
[
],
'ietf-syslog-types',
'local1',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'AuthIdentity' : {
'meta_info' : _MetaInfoClass('AuthIdentity',
False,
[
],
'ietf-syslog-types',
'auth',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'Local2Identity' : {
'meta_info' : _MetaInfoClass('Local2Identity',
False,
[
],
'ietf-syslog-types',
'local2',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'Local5Identity' : {
'meta_info' : _MetaInfoClass('Local5Identity',
False,
[
],
'ietf-syslog-types',
'local5',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'Local7Identity' : {
'meta_info' : _MetaInfoClass('Local7Identity',
False,
[
],
'ietf-syslog-types',
'local7',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'UucpIdentity' : {
'meta_info' : _MetaInfoClass('UucpIdentity',
False,
[
],
'ietf-syslog-types',
'uucp',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'NewsIdentity' : {
'meta_info' : _MetaInfoClass('NewsIdentity',
False,
[
],
'ietf-syslog-types',
'news',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'AuthprivIdentity' : {
'meta_info' : _MetaInfoClass('AuthprivIdentity',
False,
[
],
'ietf-syslog-types',
'authpriv',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'Cron2Identity' : {
'meta_info' : _MetaInfoClass('Cron2Identity',
False,
[
],
'ietf-syslog-types',
'cron2',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
'SyslogIdentity' : {
'meta_info' : _MetaInfoClass('SyslogIdentity',
False,
[
],
'ietf-syslog-types',
'syslog',
_yang_ns._namespaces['ietf-syslog-types'],
'ydk.models.ietf.ietf_syslog_types'
),
},
}
|
|
from __future__ import unicode_literals
import re
from datetime import date, datetime
from decimal import Decimal
from django import template
from django.conf import settings
from django.template import defaultfilters
from django.utils.encoding import force_text
from django.utils.formats import number_format
from django.utils.translation import pgettext, ungettext, ugettext as _
from django.utils.timezone import is_aware, utc
register = template.Library()
@register.filter(is_safe=True)
def ordinal(value):
"""
Converts an integer to its ordinal as a string. 1 is '1st', 2 is '2nd',
3 is '3rd', etc. Works for any integer.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
suffixes = (_('th'), _('st'), _('nd'), _('rd'), _('th'), _('th'), _('th'), _('th'), _('th'), _('th'))
if value % 100 in (11, 12, 13): # special case
return "%d%s" % (value, suffixes[0])
return "%d%s" % (value, suffixes[value % 10])
@register.filter(is_safe=True)
def intcomma(value, use_l10n=True):
"""
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
if settings.USE_L10N and use_l10n:
try:
if not isinstance(value, (float, Decimal)):
value = int(value)
except (TypeError, ValueError):
return intcomma(value, False)
else:
return number_format(value, force_grouping=True)
orig = force_text(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new, use_l10n)
# A tuple of standard large number to their converters
intword_converters = (
(6, lambda number: (
ungettext('%(value).1f million', '%(value).1f million', number),
ungettext('%(value)s million', '%(value)s million', number),
)),
(9, lambda number: (
ungettext('%(value).1f billion', '%(value).1f billion', number),
ungettext('%(value)s billion', '%(value)s billion', number),
)),
(12, lambda number: (
ungettext('%(value).1f trillion', '%(value).1f trillion', number),
ungettext('%(value)s trillion', '%(value)s trillion', number),
)),
(15, lambda number: (
ungettext('%(value).1f quadrillion', '%(value).1f quadrillion', number),
ungettext('%(value)s quadrillion', '%(value)s quadrillion', number),
)),
(18, lambda number: (
ungettext('%(value).1f quintillion', '%(value).1f quintillion', number),
ungettext('%(value)s quintillion', '%(value)s quintillion', number),
)),
(21, lambda number: (
ungettext('%(value).1f sextillion', '%(value).1f sextillion', number),
ungettext('%(value)s sextillion', '%(value)s sextillion', number),
)),
(24, lambda number: (
ungettext('%(value).1f septillion', '%(value).1f septillion', number),
ungettext('%(value)s septillion', '%(value)s septillion', number),
)),
(27, lambda number: (
ungettext('%(value).1f octillion', '%(value).1f octillion', number),
ungettext('%(value)s octillion', '%(value)s octillion', number),
)),
(30, lambda number: (
ungettext('%(value).1f nonillion', '%(value).1f nonillion', number),
ungettext('%(value)s nonillion', '%(value)s nonillion', number),
)),
(33, lambda number: (
ungettext('%(value).1f decillion', '%(value).1f decillion', number),
ungettext('%(value)s decillion', '%(value)s decillion', number),
)),
(100, lambda number: (
ungettext('%(value).1f googol', '%(value).1f googol', number),
ungettext('%(value)s googol', '%(value)s googol', number),
)),
)
@register.filter(is_safe=False)
def intword(value):
"""
Converts a large integer to a friendly text representation. Works best
for numbers over 1 million. For example, 1000000 becomes '1.0 million',
1200000 becomes '1.2 million' and '1200000000' becomes '1.2 billion'.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if value < 1000000:
return value
def _check_for_i18n(value, float_formatted, string_formatted):
"""
Use the i18n enabled defaultfilters.floatformat if possible
"""
if settings.USE_L10N:
value = defaultfilters.floatformat(value, 1)
template = string_formatted
else:
template = float_formatted
return template % {'value': value}
for exponent, converters in intword_converters:
large_number = 10 ** exponent
if value < large_number * 1000:
new_value = value / float(large_number)
return _check_for_i18n(new_value, *converters(new_value))
return value
@register.filter(is_safe=True)
def apnumber(value):
"""
For numbers 1-9, returns the number spelled out. Otherwise, returns the
number. This follows Associated Press style.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if not 0 < value < 10:
return value
return (_('one'), _('two'), _('three'), _('four'), _('five'), _('six'), _('seven'), _('eight'), _('nine'))[value-1]
# Perform the comparison in the default time zone when USE_TZ = True
# (unless a specific time zone has been applied with the |timezone filter).
@register.filter(expects_localtime=True)
def naturalday(value, arg=None):
"""
For date values that are tomorrow, today or yesterday compared to
present day returns representing string. Otherwise, returns a string
formatted according to settings.DATE_FORMAT.
"""
try:
tzinfo = getattr(value, 'tzinfo', None)
value = date(value.year, value.month, value.day)
except AttributeError:
# Passed value wasn't a date object
return value
except ValueError:
# Date arguments out of range
return value
today = datetime.now(tzinfo).date()
delta = value - today
if delta.days == 0:
return _('today')
elif delta.days == 1:
return _('tomorrow')
elif delta.days == -1:
return _('yesterday')
return defaultfilters.date(value, arg)
# This filter doesn't require expects_localtime=True because it deals properly
# with both naive and aware datetimes. Therefore avoid the cost of conversion.
@register.filter
def naturaltime(value):
"""
For date and time values shows how many seconds, minutes or hours ago
compared to current timestamp returns representing string.
"""
if not isinstance(value, date): # datetime is a subclass of date
return value
now = datetime.now(utc if is_aware(value) else None)
if value < now:
delta = now - value
if delta.days != 0:
return pgettext(
'naturaltime', '%(delta)s ago'
) % {'delta': defaultfilters.timesince(value, now)}
elif delta.seconds == 0:
return _('now')
elif delta.seconds < 60:
return ungettext(
# Translators: \\u00a0 is non-breaking space
'a second ago', '%(count)s\u00a0seconds ago', delta.seconds
) % {'count': delta.seconds}
elif delta.seconds // 60 < 60:
count = delta.seconds // 60
return ungettext(
# Translators: \\u00a0 is non-breaking space
'a minute ago', '%(count)s\u00a0minutes ago', count
) % {'count': count}
else:
count = delta.seconds // 60 // 60
return ungettext(
# Translators: \\u00a0 is non-breaking space
'an hour ago', '%(count)s\u00a0hours ago', count
) % {'count': count}
else:
delta = value - now
if delta.days != 0:
return pgettext(
'naturaltime', '%(delta)s from now'
) % {'delta': defaultfilters.timeuntil(value, now)}
elif delta.seconds == 0:
return _('now')
elif delta.seconds < 60:
return ungettext(
# Translators: \\u00a0 is non-breaking space
'a second from now', '%(count)s\u00a0seconds from now', delta.seconds
) % {'count': delta.seconds}
elif delta.seconds // 60 < 60:
count = delta.seconds // 60
return ungettext(
# Translators: \\u00a0 is non-breaking space
'a minute from now', '%(count)s\u00a0minutes from now', count
) % {'count': count}
else:
count = delta.seconds // 60 // 60
return ungettext(
# Translators: \\u00a0 is non-breaking space
'an hour from now', '%(count)s\u00a0hours from now', count
) % {'count': count}
|
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
"""gGRC Collection REST services implementation. Common to all gGRC collection
resources.
"""
import datetime
import hashlib
import logging
import time
from exceptions import TypeError
from wsgiref.handlers import format_date_time
from urllib import urlencode
from blinker import Namespace
from flask import url_for, request, current_app, g, has_request_context
from flask.views import View
from sqlalchemy import and_, or_
from sqlalchemy.exc import IntegrityError
import sqlalchemy.orm.exc
from werkzeug.exceptions import BadRequest, Forbidden
from sqlalchemy.orm.properties import RelationshipProperty
import ggrc.builder.json
from flask.ext.sqlalchemy import Pagination
from ggrc import db, utils
from ggrc.utils import as_json, UnicodeSafeJsonWrapper, benchmark
from ggrc.fulltext import get_indexer
from ggrc.fulltext.recordbuilder import fts_record_for
from ggrc.login import get_current_user_id, get_current_user
from ggrc.models.cache import Cache
from ggrc.models.event import Event
from ggrc.models.revision import Revision
from ggrc.models.exceptions import ValidationError, translate_message
from ggrc.rbac import permissions, context_query_filter
from .attribute_query import AttributeQueryBuilder
from ggrc.models.background_task import BackgroundTask, create_task
from ggrc import settings
CACHE_EXPIRY_COLLECTION = 60
def get_oauth_credentials():
from flask import session
if session.has_key('oauth_credentials'):
return session['oauth_credentials']
else:
return None
def _get_cache_manager():
from ggrc.cache import CacheManager, MemCache
cache_manager = CacheManager()
cache_manager.initialize(MemCache())
return cache_manager
def get_cache_key(obj, type=None, id=None):
"""Returns a string identifier for the specified object or stub.
`obj` can be:
<db.Model> -- declarative model instance
(type, id) -- tuple
{ 'type': type, 'id': id } -- dict
"""
if isinstance(obj, tuple):
type, id = obj
elif isinstance(obj, dict):
type = obj.get('type', None)
id = obj.get('id', None)
if isinstance(type, (str, unicode)):
model = ggrc.models.get_model(type)
assert model is not None, "Invalid model name: {}".format(type)
type = ggrc.models.get_model(type)._inflector.table_plural
if not isinstance(obj, (tuple, dict)):
if type is None:
type = obj._inflector.table_plural
if id is None:
id = obj.id
return 'collection:{type}:{id}'.format(type=type, id=id)
def get_cache_class(obj):
return obj.__class__.__name__
def get_related_keys_for_expiration(context, o):
cls = get_cache_class(o)
keys = []
mappings = context.cache_manager.supported_mappings.get(cls, [])
if len(mappings) > 0:
for (cls, attr, polymorph) in mappings:
if polymorph:
key = get_cache_key(
None,
type=getattr(o, '{0}_type'.format(attr)),
id=getattr(o, '{0}_id'.format(attr)))
keys.append(key)
else:
obj = getattr(o, attr, None)
if obj:
if isinstance(obj, list):
for inner_o in obj:
key = get_cache_key(inner_o)
keys.append(key)
else:
key = get_cache_key(obj)
keys.append(key)
return keys
def set_ids_for_new_custom_attribute_values(objects, obj):
"""
When we are creating custom attribute values for
POST requests, obj.id is not yet defined. This is why we update
custom attribute values at this point and set the correct attributable_id
Args:
objects: newly created objects (we update only the ones that are CustomAttributeValue
obj: parent object to be set as attributable
Returns:
None
"""
from ggrc.models.custom_attribute_value import CustomAttributeValue
for object in objects:
if not isinstance(object, CustomAttributeValue):
continue
object.attributable_id = obj.id
# Disable state updating so that a newly create object doesn't go straight
# from Draft to Modified.
if hasattr(object, '_skip_os_state_update'):
object.skip_os_state_update()
db.session.add(object)
db.session.flush()
def update_memcache_before_commit(context, modified_objects, expiry_time):
"""
Preparing the memccache entries to be updated before DB commit
Also update the memcache to indicate the status cache operation 'InProgress' waiting for DB commit
Raises Exception on failures, cannot proceed with DB commit
Args:
context: POST/PUT/DELETE HTTP request or import Converter contextual object
modified_objects: objects in cache maintained prior to commiting to DB
expiry_time: Expiry time specified for memcache ADD and DELETE
Returns:
None
"""
if getattr(settings, 'MEMCACHE_MECHANISM', False) is False:
return
context.cache_manager = _get_cache_manager()
if len(modified_objects.new) > 0:
items_to_add = modified_objects.new.items()
for o, json_obj in items_to_add:
cls = get_cache_class(o)
if context.cache_manager.supported_classes.has_key(cls):
key = get_cache_key(o)
context.cache_manager.marked_for_delete.append(key)
context.cache_manager.marked_for_delete.extend(
get_related_keys_for_expiration(context, o))
if len(modified_objects.dirty) > 0:
items_to_update = modified_objects.dirty.items()
for o, json_obj in items_to_update:
cls = get_cache_class(o)
if context.cache_manager.supported_classes.has_key(cls):
key = get_cache_key(o)
context.cache_manager.marked_for_delete.append(key)
context.cache_manager.marked_for_delete.extend(
get_related_keys_for_expiration(context, o))
if len(modified_objects.deleted) > 0:
items_to_delete = modified_objects.deleted.items()
for o, json_obj in items_to_delete:
cls = get_cache_class(o)
if context.cache_manager.supported_classes.has_key(cls):
# FIXME: is explicit `id=...` *required* here to avoid querying the
# database for a possibly-deleted object?
key = get_cache_key(o)#, id=json_obj['id'])
context.cache_manager.marked_for_delete.append(key)
context.cache_manager.marked_for_delete.extend(
get_related_keys_for_expiration(context, o))
status_entries = {}
for key in context.cache_manager.marked_for_delete:
build_cache_status(status_entries, 'DeleteOp:' + key, expiry_time, 'InProgress')
if len(status_entries) > 0:
current_app.logger.info("CACHE: status entries: " + str(status_entries))
ret = context.cache_manager.bulk_add(status_entries, expiry_time)
if ret is not None and len(ret) == 0:
pass
else:
current_app.logger.error('CACHE: Unable to add status for newly created entries in memcache ' + str(ret))
def update_memcache_after_commit(context):
"""
The memccache entries is updated after DB commit
Logs error if there are errors in updating entries in cache
Args:
context: POST/PUT/DELETE HTTP request or import Converter contextual object
modified_objects: objects in cache maintained prior to commiting to DB
Returns:
None
"""
if getattr(settings, 'MEMCACHE_MECHANISM', False) is False:
return
if context.cache_manager is None:
current_app.logger.error("CACHE: Error in initiaizing cache manager")
return
cache_manager = context.cache_manager
# TODO(dan): check for duplicates in marked_for_delete
if len(cache_manager.marked_for_delete) > 0:
#current_app.logger.info("CACHE: Bulk Delete: " + str(cache_manager.marked_for_delete))
delete_result = cache_manager.bulk_delete(cache_manager.marked_for_delete, 0)
# TODO(dan): handling failure including network errors, currently we log errors
if delete_result is not True:
current_app.logger.error("CACHE: Failed to remoe collection from cache")
status_entries = []
for key in cache_manager.marked_for_delete:
status_entries.append('DeleteOp:' + str(key))
if len(status_entries) > 0:
delete_result = cache_manager.bulk_delete(status_entries, 0)
# TODO(dan): handling failure including network errors, currently we log errors
if delete_result is not True:
current_app.logger.error("CACHE: Failed to remove status entries from cache")
cache_manager.clear_cache()
def build_cache_status(data, key, expiry_timeout, status):
"""
Build the dictionary for storing operational status of cache
Args:
data: dictionary to update
key: key to dictionary
expiry_timeout: timeout for expiry cache
status: Update status entry, e.g.InProgress
Returns:
None
"""
data[key] = {'expiry': expiry_timeout, 'status': status}
def inclusion_filter(obj):
return permissions.is_allowed_read(obj.__class__.__name__, obj.id, obj.context_id)
def get_cache(create=False):
"""
Retrieves the cache from the Flask global object. The create arg
indicates if a new cache should be created if none exists. If we
are not in a request context, no cache is created (return None).
"""
if has_request_context():
cache = getattr(g, 'cache', None)
if cache is None and create:
cache = g.cache = Cache()
return cache
else:
logging.warning("No request context - no cache created")
return None
def get_modified_objects(session):
session.flush()
cache = get_cache()
if cache:
return cache.copy()
else:
return None
def update_index(session, cache):
if cache:
indexer = get_indexer()
for obj in cache.new:
indexer.create_record(fts_record_for(obj), commit=False)
for obj in cache.dirty:
indexer.update_record(fts_record_for(obj), commit=False)
for obj in cache.deleted:
indexer.delete_record(obj.id, obj.__class__.__name__, commit=False)
session.commit()
def log_event(session, obj=None, current_user_id=None):
revisions = []
session.flush()
if current_user_id is None:
current_user_id = get_current_user_id()
cache = get_cache()
for o, log_json in cache.dirty.items():
revision = Revision(o, current_user_id, 'modified', log_json)
revisions.append(revision)
for o, log_json in cache.deleted.items():
revision = Revision(o, current_user_id, 'deleted', log_json)
revisions.append(revision)
for o, log_json in cache.new.items():
revision = Revision(o, current_user_id, 'created', log_json)
revisions.append(revision)
if obj is None:
resource_id = 0
resource_type = None
action = 'IMPORT'
context_id = 0
else:
resource_id = obj.id
resource_type = str(obj.__class__.__name__)
action = request.method
context_id = obj.context_id
if revisions:
event = Event(
modified_by_id=current_user_id,
action=action,
resource_id=resource_id,
resource_type=resource_type,
context_id=context_id)
event.revisions = revisions
session.add(event)
class ModelView(View):
DEFAULT_PAGE_SIZE = 20
MAX_PAGE_SIZE = 100
pk = 'id'
pk_type = 'int'
_model = None
# Simple accessor properties
@property
def request(self):
return request
@property
def model(self):
return self._model
@property
def modified_attr_name(self):
return 'updated_at'
@property
def modified_attr(self):
"""Return the model attribute to be used for Last-Modified header and
sorting collection elements.
"""
return getattr(self.model, self.modified_attr_name)
def modified_at(self, obj):
return getattr(obj, self.modified_attr_name)
def _get_type_select_column(self, model):
mapper = model._sa_class_manager.mapper
if mapper.polymorphic_on is None:
#if len(mapper.self_and_descendants) == 1:
type_column = sqlalchemy.literal(mapper.class_.__name__)
else:
# Handle polymorphic types with CASE
type_column = sqlalchemy.case(
value=mapper.polymorphic_on,
whens={
val: m.class_.__name__
for val, m in mapper.polymorphic_map.items()
})
return type_column
def _get_type_where_clause(self, model):
mapper = model._sa_class_manager.mapper
if mapper.polymorphic_on is None:
return True
else:
mappers = list(mapper.self_and_descendants)
polymorphic_on_values = list(
val
for val, m in mapper.polymorphic_map.items()
if m in mappers)
return mapper.polymorphic_on.in_(polymorphic_on_values)
#def _get_polymorphic_column(self, model):
# mapper = model._sa_class_manager.mapper
# if len(mapper.self_and_descendants) > 1:
# return mapper.polymorphic_on
# else:
# return sqlalchemy.literal(mapper.class_.__name__)
def _get_matching_types(self, model):
mapper = model._sa_class_manager.mapper
if len(list(mapper.self_and_descendants)) == 1:
return mapper.class_.__name__
else:
# FIXME: Actually needs to use 'self_and_descendants'
return [m.class_.__name__ for m in mapper.self_and_descendants]
def get_match_columns(self, model):
mapper = model._sa_class_manager.mapper
columns = []
columns.append(mapper.primary_key[0].label('id'))
#columns.append(model.id.label('id'))
columns.append(self._get_type_select_column(model).label('type'))
if hasattr(mapper.c, 'context_id'):
columns.append(mapper.c.context_id.label('context_id'))
if hasattr(mapper.c, 'updated_at'):
columns.append(mapper.c.updated_at.label('updated_at'))
#columns.append(self._get_polymorphic_column(model))
return columns
def get_collection_matches(self, model, filter_by_contexts=True):
columns = self.get_match_columns(self.model)
query = db.session.query(*columns).filter(
self._get_type_where_clause(model))
return self.filter_query_by_request(
query, filter_by_contexts=filter_by_contexts)
def get_resource_match_query(self, model, id):
columns = self.get_match_columns(model)
query = db.session.query(*columns).filter(
and_(
self._get_type_where_clause(model),
columns[0] == id))
return query
# Default model/DB helpers
def get_collection(self, filter_by_contexts=True):
if '__stubs_only' not in request.args and \
hasattr(self.model, 'eager_query'):
query = self.model.eager_query()
else:
query = db.session.query(self.model)
return self.filter_query_by_request(
query, filter_by_contexts=filter_by_contexts)
def filter_query_by_request(self, query, filter_by_contexts=True):
joinlist = []
if request.args:
querybuilder = AttributeQueryBuilder(self.model)
filter, joinlist, options = querybuilder.collection_filters(request.args)
if filter is not None:
for j in joinlist:
query = query.join(j)
query = query.filter(filter)
if filter_by_contexts:
contexts = permissions.read_contexts_for(self.model.__name__)
resources = permissions.read_resources_for(self.model.__name__)
filter_expr = context_query_filter(self.model.context_id, contexts)
if resources:
filter_expr = or_(filter_expr, self.model.id.in_(resources))
query = query.filter(filter_expr)
for j in joinlist:
j_class = j.property.mapper.class_
j_contexts = permissions.read_contexts_for(j_class.__name__)
j_resources = permissions.read_resources_for(j_class.__name__)
if j_contexts is not None:
j_filter_expr = context_query_filter(j_class.context_id, j_contexts)
if resources:
j_filter_expr = or_(j_filter_expr, self.model.id.in_(j_resources))
query = query.filter(j_filter_expr)
elif resources:
query = query.filter(self.model.id.in_(resources))
if '__search' in request.args:
terms = request.args['__search']
types = self._get_matching_types(self.model)
indexer = get_indexer()
models = indexer._get_grouped_types(types)
search_query = indexer._get_type_query(models, 'read', None)
search_query = and_(search_query, indexer._get_filter_query(terms))
search_query = db.session.query(indexer.record_type.key).filter(search_query)
if '__mywork' in request.args:
search_query = indexer._add_owner_query(
search_query, models, get_current_user_id())
search_subquery = search_query.subquery()
query = query.filter(self.model.id.in_(search_subquery))
order_properties = []
if '__sort' in request.args:
sort_attrs = request.args['__sort'].split(",")
sort_desc = request.args.get('__sort_desc', False)
for sort_attr in sort_attrs:
attr_desc = sort_desc
if sort_attr.startswith('-'):
attr_desc = not sort_desc
sort_attr = sort_attr[1:]
order_property = getattr(self.model, sort_attr, None)
if order_property and hasattr(order_property, 'desc'):
if attr_desc:
order_property = order_property.desc()
order_properties.append(order_property)
else:
# Possibly throw an exception instead, if sorting by invalid attribute?
pass
order_properties.append(self.modified_attr.desc())
order_properties.append(self.model.id.desc())
query = query.order_by(*order_properties)
if '__limit' in request.args:
try:
limit = int(request.args['__limit'])
query = query.limit(limit)
except (TypeError, ValueError):
pass
query = query.distinct()
return query
def get_object(self, id):
# This could also use `self.pk`
# .one() is required as long as any .eager_load() adds joins using
# 'contains_eager()' to the core query, because 'LIMIT 1' breaks up
# that JOIN result (e.g. Categorizable)
try:
return self.get_collection(filter_by_contexts=False)\
.filter(self.model.id == id).one()
except sqlalchemy.orm.exc.NoResultFound:
return None
def not_found_message(self):
return '{0} not found.'.format(self.model._inflector.title_singular)
def not_found_response(self):
return current_app.make_response((self.not_found_message(), 404, []))
def collection_last_modified(self):
"""Calculate the last time a member of the collection was modified. This
method relies on the fact that the collection table has an `updated_at` or
other column with a relevant timestamp; services for models that don't have
this field **MUST** override this method.
"""
result = db.session.query(
self.modified_attr).order_by(self.modified_attr.desc()).first()
if result is not None:
return self.modified_at(result)
return datetime.datetime.now()
# Routing helpers
@classmethod
def endpoint_name(cls):
return cls.__name__
@classmethod
def url_for_preserving_querystring(cls, *args, **kwargs):
url = cls.url_for(*args, **kwargs)
# preserve original query string
idx = request.url.find('?')
querystring = '' if idx < 0 else '?' + request.url[idx+1:]
return url + querystring
@classmethod
def base_url_for(cls, _memoized_base_url={}):
if cls not in _memoized_base_url:
_memoized_base_url[cls] = url_for(cls.endpoint_name())
return _memoized_base_url[cls]
@classmethod
def url_for(cls, *args, **kwargs):
url = cls.base_url_for()
if len(args) > 0:
arg = args[0]
id = arg if not isinstance(arg, db.Model) else arg.id
url = url + '/' + str(id)
if 'id' in kwargs:
url = url + '/' + str(kwargs['id'])
del kwargs['id']
if len(kwargs) > 0:
url = url + '?' + urlencode(kwargs)
return url
@classmethod
def decorate_view_func(cls, view_func, decorators):
if not isinstance(decorators, (list, tuple)):
decorators = (decorators,)
for decorator in reversed(decorators):
view_func = decorator(view_func)
return view_func
# View base class for Views handling
# - /resources (GET, POST)
# - /resources/<pk:pk_type> (GET, PUT, POST, DELETE)
class Resource(ModelView):
"""View base class for Views handling. Will typically be registered with an
application following a collection style for routes. Collection `GET` and
`POST` will have a route like `/resources` while collection member
resource routes will have routes likej `/resources/<pk:pk_type>`.
To register a Resource subclass FooCollection with a Flask application:
..
FooCollection.add_to(app, '/foos')
By default will only support the `application/json` content-type.
"""
signals = Namespace()
model_posted = signals.signal(
'Model POSTed',
"""
Indicates that a model object was received via POST and will be committed
to the database. The sender in the signal will be the model class of the
POSTed resource. The following arguments will be sent along with the
signal:
:obj: The model instance created from the POSTed JSON.
:src: The original POSTed JSON dictionary.
:service: The instance of Resource handling the POST request.
""",)
model_put = signals.signal(
'Model PUT',
"""
Indicates that a model object update was received via PUT and will be
updated in the database. The sender in the signal will be the model class
of the PUT resource. The following arguments will be sent along with the
signal:
:obj: The model instance updated from the PUT JSON.
:src: The original PUT JSON dictionary.
:service: The instance of Resource handling the PUT request.
""",)
model_deleted = signals.signal(
'Model DELETEd',
"""
Indicates that a model object was DELETEd and will be removed from the
databse. The sender in the signal will be the model class of the DELETEd
resource. The followin garguments will be sent along with the signal:
:obj: The model instance removed.
:service: The instance of Resource handling the DELETE request.
""",)
def dispatch_request(self, *args, **kwargs):
with benchmark("Dispatch request"):
with benchmark("dispatch_request > Check Headers"):
method = request.method
if method in ('POST', 'PUT', 'DELETE')\
and 'X-Requested-By' not in request.headers:
raise BadRequest('X-Requested-By header is REQUIRED.')
with benchmark("dispatch_request > Try"):
try:
if method == 'GET':
if self.pk in kwargs and kwargs[self.pk] is not None:
return self.get(*args, **kwargs)
else:
return self.collection_get()
elif method == 'POST':
if self.pk in kwargs and kwargs[self.pk] is not None:
return self.post(*args, **kwargs)
else:
return self.collection_post()
elif method == 'PUT':
return self.put(*args, **kwargs)
elif method == 'DELETE':
return self.delete(*args, **kwargs)
else:
raise NotImplementedError()
except (IntegrityError, ValidationError) as v:
message = translate_message(v)
current_app.logger.warn(message)
return (message, 403, [])
except Exception as e:
current_app.logger.exception(e)
raise
def post(*args, **kwargs):
raise NotImplementedError()
def get(self, id):
"""Default JSON request handlers"""
with benchmark("Query for object"):
obj = self.get_object(id)
if obj is None:
return self.not_found_response()
if 'Accept' in self.request.headers and \
'application/json' not in self.request.headers['Accept']:
return current_app.make_response((
'application/json', 406, [('Content-Type', 'text/plain')]))
with benchmark("Query read permissions"):
if not permissions.is_allowed_read(self.model.__name__, obj.id, obj.context_id):
raise Forbidden()
if not permissions.is_allowed_read_for(obj):
raise Forbidden()
with benchmark("Serialize object"):
object_for_json = self.object_for_json(obj)
if 'If-None-Match' in self.request.headers and \
self.request.headers['If-None-Match'] == etag(object_for_json):
with benchmark("Make response"):
return current_app.make_response(
('', 304, [('Etag', etag(object_for_json))]))
with benchmark("Make response"):
return self.json_success_response(
object_for_json, self.modified_at(obj))
def validate_headers_for_put_or_delete(self, obj):
"""rfc 6585 defines a new status code for missing required headers"""
required_headers = set(['If-Match', 'If-Unmodified-Since'])
missing_headers = required_headers.difference(set(self.request.headers.keys()))
if missing_headers:
return current_app.make_response(
('required headers: ' + ', '.join(missing_headers),
428, [('Content-Type', 'text/plain')]))
if request.headers['If-Match'] != etag(self.object_for_json(obj)) or \
request.headers['If-Unmodified-Since'] != \
self.http_timestamp(self.modified_at(obj)):
return current_app.make_response((
'The resource could not be updated due to a conflict with the '
'current state on the server. Please resolve the conflict by '
'refreshing the resource.',
409,
[('Content-Type', 'text/plain')]
))
return None
def json_update(self, obj, src):
ggrc.builder.json.update(obj, src)
def put(self, id):
with benchmark("Query for object"):
obj = self.get_object(id)
if obj is None:
return self.not_found_response()
src = UnicodeSafeJsonWrapper(self.request.json)
with benchmark("Query update permissions"):
if not permissions.is_allowed_update(self.model.__name__, obj.id, obj.context_id):
raise Forbidden()
if not permissions.is_allowed_update_for(obj):
raise Forbidden()
new_context = self.get_context_id_from_json(src)
if new_context != obj.context_id \
and not permissions.is_allowed_update(self.model.__name__, obj.id, new_context):
raise Forbidden()
if self.request.mimetype != 'application/json':
return current_app.make_response(
('Content-Type must be application/json', 415, []))
header_error = self.validate_headers_for_put_or_delete(obj)
if header_error:
return header_error
root_attribute = self.model._inflector.table_singular
try:
src = src[root_attribute]
except KeyError, e:
return current_app.make_response((
'Required attribute "{0}" not found'.format(root_attribute), 400, []))
with benchmark("Deserialize object"):
self.json_update(obj, src)
obj.modified_by_id = get_current_user_id()
db.session.add(obj)
with benchmark("Send PUTed event"):
self.model_put.send(obj.__class__, obj=obj, src=src, service=self)
with benchmark("Get modified objects"):
modified_objects = get_modified_objects(db.session)
with benchmark("Log event"):
log_event(db.session, obj)
with benchmark("Update memcache before commit for resource collection PUT"):
update_memcache_before_commit(self.request, modified_objects, CACHE_EXPIRY_COLLECTION)
with benchmark("Commit"):
db.session.commit()
with benchmark("Query for object"):
obj = self.get_object(id)
with benchmark("Update index"):
update_index(db.session, modified_objects)
with benchmark("Update memcache after commit for resource collection PUT"):
update_memcache_after_commit(self.request)
with benchmark("Serialize collection"):
object_for_json = self.object_for_json(obj)
with benchmark("Make response"):
return self.json_success_response(
object_for_json, self.modified_at(obj))
def delete(self, id):
if 'X-Appengine-Taskname' not in request.headers:
task = create_task(request.method, request.full_path)
if getattr(settings, 'APP_ENGINE', False):
return self.json_success_response(
self.object_for_json(task, 'background_task'),
self.modified_at(task))
else:
task = BackgroundTask.query.get(request.args.get("task_id"))
task.start()
try:
with benchmark("Query for object"):
obj = self.get_object(id)
if obj is None:
return self.not_found_response()
with benchmark("Query delete permissions"):
if not permissions.is_allowed_delete(self.model.__name__, obj.id, obj.context_id):
raise Forbidden()
if not permissions.is_allowed_delete_for(obj):
raise Forbidden()
header_error = self.validate_headers_for_put_or_delete(obj)
if header_error:
return header_error
db.session.delete(obj)
with benchmark("Send DELETEd event"):
self.model_deleted.send(obj.__class__, obj=obj, service=self)
with benchmark("Get modified objects"):
modified_objects = get_modified_objects(db.session)
with benchmark("Log event"):
log_event(db.session, obj)
with benchmark("Update memcache before commit for resource collection DELETE"):
update_memcache_before_commit(self.request, modified_objects, CACHE_EXPIRY_COLLECTION)
with benchmark("Commit"):
db.session.commit()
with benchmark("Update index"):
update_index(db.session, modified_objects)
with benchmark("Update memcache after commit for resource collection DELETE"):
update_memcache_after_commit(self.request)
with benchmark("Query for object"):
object_for_json = self.object_for_json(obj)
with benchmark("Make response"):
result = self.json_success_response(
object_for_json, self.modified_at(obj))
except:
import traceback
task.finish("Failure", traceback.format_exc())
raise
task.finish("Success", result)
return result
def has_cache(self):
return getattr(settings, 'MEMCACHE_MECHANISM', False)
def apply_paging(self, matches_query):
page_size = min(
int(request.args.get('__page_size', self.DEFAULT_PAGE_SIZE)),
self.MAX_PAGE_SIZE)
if '__page_only' in request.args:
page_number = int(request.args.get('__page', 0))
matches = []
total = matches_query.count()
else:
page_number = int(request.args.get('__page', 1))
matches = matches_query\
.limit(page_size)\
.offset((page_number-1)*page_size)\
.all()
if page_number == 1 and len(matches) < page_size:
total = len(matches)
else:
total = matches_query.count()
page = Pagination(
matches_query, page_number, page_size, total, matches)
collection_extras = {
'paging': self.build_page_object_for_json(page)
}
return matches, collection_extras
def get_matched_resources(self, matches):
cache_objs = {}
if self.has_cache():
self.request.cache_manager = _get_cache_manager()
with benchmark("Query cache for resources"):
cache_objs = self.get_resources_from_cache(matches)
database_matches = [m for m in matches if m not in cache_objs]
else:
database_matches = matches
database_objs = {}
if len(database_matches) > 0:
with benchmark("Query database for resources"):
database_objs = self.get_resources_from_database(matches)
if self.has_cache():
with benchmark("Add resources to cache"):
self.add_resources_to_cache(database_objs)
return cache_objs, database_objs
def collection_get(self):
with benchmark("dispatch_request > collection_get > Check headers"):
if 'Accept' in self.request.headers and \
'application/json' not in self.request.headers['Accept']:
return current_app.make_response((
'application/json', 406, [('Content-Type', 'text/plain')]))
with benchmark("dispatch_request > collection_get > Get collection matches"):
# We skip querying by contexts for Creator role and relationship objects,
# because it will filter out objects that the Creator can access.
# We are doing a special permissions check for these objects
# below in the filter_resource method.
filter_by_contexts = not (self.model.__name__ == "Relationship" and _is_creator())
matches_query = self.get_collection_matches(self.model, filter_by_contexts)
with benchmark("dispatch_request > collection_get > Query Data"):
if '__page' in request.args or '__page_only' in request.args:
with benchmark("Query matches with paging"):
matches, extras = self.apply_paging(matches_query)
else:
with benchmark("Query matches"):
matches = matches_query.all()
extras = {}
with benchmark("dispatch_request > collection_get > Get matched resources"):
cache_op = None
if '__stubs_only' in request.args:
objs = [{
'id': m[0],
'type': m[1],
'href': utils.url_for(m[1], id=m[0]),
'context_id': m[2]
} for m in matches]
else:
cache_objs, database_objs = self.get_matched_resources(matches)
objs = {}
objs.update(cache_objs)
objs.update(database_objs)
objs = [objs[m] for m in matches if m in objs]
with benchmark("Filter resources based on permissions"):
objs = filter_resource(objs)
cache_op = 'Hit' if len(cache_objs) > 0 else 'Miss'
with benchmark("dispatch_request > collection_get > Create Response"):
# Return custom fields specified via `__fields=id,title,description` etc.
# TODO this can be optimized by filter_resource() not retrieving the other fields to being with
if '__fields' in request.args:
custom_fields = request.args['__fields'].split(',')
objs = [
{f: o[f] for f in custom_fields if f in o}
for o in objs]
with benchmark("Serialize collection"):
collection = self.build_collection_representation(
objs, extras=extras)
if 'If-None-Match' in self.request.headers and \
self.request.headers['If-None-Match'] == etag(collection):
return current_app.make_response((
'', 304, [('Etag', etag(collection))]))
with benchmark("Make response"):
return self.json_success_response(
collection, self.collection_last_modified(), cache_op=cache_op)
def get_resources_from_cache(self, matches):
"""Get resources from cache for specified matches"""
resources = {}
# Skip right to memcache
memcache_client = self.request.cache_manager.cache_object.memcache_client
key_matches = {}
keys = []
for match in matches:
key = get_cache_key(None, id=match[0], type=match[1])
key_matches[key] = match
keys.append(key)
while len(keys) > 0:
slice_keys = keys[:32]
keys = keys[32:]
result = memcache_client.get_multi(slice_keys)
for key in result:
if 'selfLink' in result[key]:
resources[key_matches[key]] = result[key]
return resources
def add_resources_to_cache(self, match_obj_pairs):
"""Add resources to cache if they are not blocked by DeleteOp entries"""
# Skip right to memcache
memcache_client = self.request.cache_manager.cache_object.memcache_client
key_objs = {}
key_blockers = {}
keys = []
for match, obj in match_obj_pairs.items():
key = get_cache_key(None, id=match[0], type=match[1])
delete_op_key = "DeleteOp:{}".format(key)
keys.append(key)
key_objs[key] = obj
key_blockers[key] = delete_op_key
while len(keys) > 0:
slice_keys = keys[:32]
keys = keys[32:]
blocker_keys = [key_blockers[key] for key in slice_keys]
result = memcache_client.get_multi(blocker_keys)
# Reduce `slice_keys` to only unblocked keys
slice_keys = [
key for key in slice_keys if key_blockers[key] not in result]
memcache_client.add_multi(
{key: key_objs[key] for key in slice_keys})
def json_create(self, obj, src):
ggrc.builder.json.create(obj, src)
def get_context_id_from_json(self, src):
context = src.get('context', None)
if context:
context_id = context.get('id', None)
try:
return int(context_id)
except (ValueError, TypeError):
return None
return None
def handle_create(self, obj, src):
"""Do NOTHING by default"""
pass
def collection_post(self):
try:
if self.request.mimetype != 'application/json':
return current_app.make_response((
'Content-Type must be application/json', 415, []))
obj = self.model()
src = UnicodeSafeJsonWrapper(self.request.json)
root_attribute = self.model._inflector.table_singular
try:
src = src[root_attribute]
except KeyError, e:
return current_app.make_response((
'Required attribute "{0}" not found'.format(
root_attribute), 400, []))
with benchmark("Query create permissions"):
if not permissions.is_allowed_create(self.model.__name__, None,
self.get_context_id_from_json(src)):
raise Forbidden()
if src.get('private') == True and src.get('context') is not None \
and src['context'].get('id') is not None:
raise BadRequest(
'context MUST be "null" when creating a private resource.')
elif 'context' not in src:
raise BadRequest('context MUST be specified.')
with benchmark("Deserialize object"):
self.json_create(obj, src)
with benchmark("Query create permissions"):
if not permissions.is_allowed_create_for(obj):
# json_create sometimes adds objects to session, so we need to
# make sure the session is cleared
db.session.expunge_all()
raise Forbidden()
with benchmark("Send model POSTed event"):
self.model_posted.send(obj.__class__, obj=obj, src=src, service=self)
obj.modified_by_id = get_current_user_id()
db.session.add(obj)
with benchmark("Get modified objects"):
modified_objects = get_modified_objects(db.session)
with benchmark("Update custom attribute values"):
set_ids_for_new_custom_attribute_values(modified_objects.new, obj)
with benchmark("Log event"):
log_event(db.session, obj)
with benchmark("Update memcache before commit for resource collection POST"):
update_memcache_before_commit(self.request, modified_objects, CACHE_EXPIRY_COLLECTION)
with benchmark("Commit"):
db.session.commit()
with benchmark("Update index"):
update_index(db.session, modified_objects)
with benchmark("Update memcache after commit for resource collection POST"):
update_memcache_after_commit(self.request)
with benchmark("Serialize object"):
object_for_json = self.object_for_json(obj)
with benchmark("Make response"):
return self.json_success_response(
object_for_json, self.modified_at(obj), id=obj.id, status=201)
except IntegrityError as e:
msg = e.orig.args[1]
if obj.type == "Relationship" and \
msg.startswith("Duplicate entry") and \
msg.endswith("'uq_relationships'"):
R = obj.__class__
db.session.rollback()
obj = R.query.filter(
((R.source_type==obj.source_type) &
(R.source_id==obj.source_id) &
(R.destination_type==obj.destination_type) &
(R.destination_id==obj.destination_id)) |
((R.source_type==obj.destination_type) &
(R.source_id==obj.destination_id) &
(R.destination_type==obj.source_type) &
(R.destination_id==obj.source_id))
).first()
object_for_json = self.object_for_json(obj)
return self.json_success_response(object_for_json,
self.modified_at(obj),
id=obj.id, status=200)
raise e
@classmethod
def add_to(cls, app, url, model_class=None, decorators=()):
if model_class:
service_class = type(model_class.__name__, (cls,), {
'_model': model_class,
})
import ggrc.services
setattr(ggrc.services, model_class.__name__, service_class)
else:
service_class = cls
view_func = service_class.as_view(service_class.endpoint_name())
view_func = cls.decorate_view_func(view_func, decorators)
app.add_url_rule(
url,
defaults={cls.pk: None},
view_func=view_func,
methods=['GET', 'POST'])
app.add_url_rule(
'{url}/<{type}:{pk}>'.format(url=url, type=cls.pk_type, pk=cls.pk),
view_func=view_func,
methods=['GET', 'PUT', 'DELETE'])
# Response helpers
@classmethod
def as_json(cls, obj, **kwargs):
return as_json(obj, **kwargs)
def get_properties_to_include(self, inclusions):
#FIXME This needs to be improved to deal with branching paths... if that's
#desirable or needed.
if inclusions is not None:
if len(inclusions) == 0:
raise BadRequest(
'The __include query parameter requires at least one field to be '
'included.')
paths = inclusions.split(',')
inclusions = []
for p in paths:
path = p.split('.')
if len(path) == 1:
inclusions.append(tuple(path))
else:
inclusions.append((path[0], tuple(path[1:])))
else:
inclusions = ()
return inclusions
def build_page_object_for_json(self, paging):
def page_args(next_num, per_page):
# coerce the values to be plain strings, rather than unicode
ret = dict([(k, unicode(v)) for k, v in request.args.items()])
ret['__page'] = next_num
if '__page_size' in ret:
ret['__page_size'] = per_page
return ret
paging_obj = {}
base_url = self.url_for()
page_url = lambda params:\
base_url + '?' + urlencode(utils.encoded_dict(params))
if paging.has_next:
paging_obj['next'] = page_url(
page_args(paging.next_num, paging.per_page))
if paging.has_prev:
paging_obj['prev'] = page_url(
page_args(paging.prev_num, paging.per_page))
paging_obj['first'] = page_url(page_args(1, paging.per_page))
paging_obj['last'] = page_url(page_args(paging.pages, paging.per_page))
paging_obj['count'] = paging.pages
paging_obj['total'] = paging.total
return paging_obj
def get_resources_from_database(self, matches):
# FIXME: This is cheating -- `matches` should be allowed to be any model
model = self.model
ids = {m[0]: m for m in matches}
query = model.eager_query()
objs = query.filter(model.id.in_(ids.keys()))
resources = {}
includes = self.get_properties_to_include(request.args.get('__include'))
for obj in objs:
resources[ids[obj.id]] = ggrc.builder.json.publish(obj, includes)
ggrc.builder.json.publish_representation(resources)
return resources
def build_collection_representation(self, objs, extras=None):
table_plural = self.model._inflector.table_plural
collection_name = '{0}_collection'.format(table_plural)
resource = {
collection_name: {
'selfLink': self.url_for_preserving_querystring(),
table_plural: objs,
}
}
if extras:
resource[collection_name].update(extras)
return resource
def object_for_json(self, obj, model_name=None, properties_to_include=None):
model_name = model_name or self.model._inflector.table_singular
json_obj = ggrc.builder.json.publish(
obj, properties_to_include or [], inclusion_filter)
ggrc.builder.json.publish_representation(json_obj)
if hasattr(obj, "_json_extras"):
json_obj["extras"] = obj._json_extras
return {model_name: json_obj}
def build_resource_representation(self, obj, extras=None):
table_singular = self.model._inflector.table_singular
resource = {
table_singular: obj,
}
if extras:
resource.update(extras)
return resource
def http_timestamp(self, timestamp):
return format_date_time(time.mktime(timestamp.utctimetuple()))
def json_success_response(
self, response_object, last_modified, status=200, id=None, cache_op=None):
headers = [
('Last-Modified', self.http_timestamp(last_modified)),
('Etag', etag(response_object)),
('Content-Type', 'application/json'),
]
if id is not None:
headers.append(('Location', self.url_for(id=id)))
if cache_op:
headers.append(('X-GGRC-Cache', cache_op))
return current_app.make_response(
(self.as_json(response_object), status, headers))
def getval(self, src, attr, *args):
if args:
return src.get(unicode(attr), *args)
return src.get(unicode(attr))
class ReadOnlyResource(Resource):
def dispatch_request(self, *args, **kwargs):
method = request.method
if method == 'GET':
return super(ReadOnlyResource, self).dispatch_request(*args, **kwargs)
else:
raise NotImplementedError()
def filter_resource(resource, depth=0, user_permissions=None):
"""
Returns:
The subset of resources which are readable based on user_permissions
"""
if user_permissions is None:
user_permissions = permissions.permissions_for(get_current_user())
if type(resource) in (list, tuple):
filtered = []
for sub_resource in resource:
filtered_sub_resource = filter_resource(
sub_resource, depth=depth+1, user_permissions=user_permissions)
if filtered_sub_resource is not None:
filtered.append(filtered_sub_resource)
return filtered
elif type(resource) is dict and 'type' in resource:
# First check current level
context_id = False
if 'context' in resource:
if resource['context'] is None:
context_id = None
else:
context_id = resource['context']['id']
elif 'context_id' in resource:
context_id = resource['context_id']
assert context_id is not False, "No context found for object"
# In order to avoid loading full instances and using is_allowed_read_for,
# we are making a special test for the Creator here. Creator can only
# see relationship objects where he has read access on both source and
# destination. This is defined in Creator.py:220 file, but is_allowed_read
# can not check conditions without the full instance
if resource['type'] == "Relationship" and _is_creator():
# Make a check for relationship objects that are a special case
can_read = True
for t in ('source', 'destination'):
inst = resource[t]
contexts = permissions.read_contexts_for(inst['type']) or []
resources = permissions.read_resources_for(inst['type']) or []
if None in contexts or inst['context_id'] in contexts or inst['id'] in resources:
continue
can_read = False
if not can_read:
return None
if not user_permissions.is_allowed_read(resource['type'], resource['id'], context_id):
return None
else:
# Then, filter any typed keys
for key, value in resource.items():
if key == 'context':
# Explicitly allow `context` objects to pass through
pass
else:
# Apply filtering to sub-resources
if type(value) is dict and 'type' in value:
resource[key] = filter_resource(
value, depth=depth+1, user_permissions=user_permissions)
return resource
else:
assert False, "Non-object passed to filter_resource"
def _is_creator():
current_user = get_current_user()
return hasattr(current_user, 'system_wide_role') \
and current_user.system_wide_role == "Creator"
def etag(last_modified):
"""Generate the etag given a datetime for the last time the resource was
modified. This isn't as good as an etag generated off of a hash of the
representation, but, it doesn't require the representation in order to be
calculated. An alternative would be to keep an etag on the stored
representation, but this will do for now.
.. note::
Using the datetime implies the need for some care - the resolution of
the time object needs to be sufficient such that you don't end up with
the same etag due to two updates performed in rapid succession.
"""
return '"{0}"'.format(hashlib.sha1(str(last_modified)).hexdigest())
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Environment configuration object for Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import os
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat_internal
from tensorflow.python.util import function_utils
from tensorflow.python.util.tf_export import estimator_export
_USE_DEFAULT = object()
_VALID_DEVICE_FN_ARGS = set(['op'])
# A list of the property names in RunConfig that the user is allowed to change.
_DEFAULT_REPLACEABLE_LIST = [
'model_dir',
'tf_random_seed',
'save_summary_steps',
'save_checkpoints_steps',
'save_checkpoints_secs',
'session_config',
'keep_checkpoint_max',
'keep_checkpoint_every_n_hours',
'log_step_count_steps',
'train_distribute',
'device_fn'
]
_SAVE_CKPT_ERR = (
'`save_checkpoints_steps` and `save_checkpoints_secs` cannot be both set.'
)
_TF_CONFIG_ENV = 'TF_CONFIG'
_TASK_ENV_KEY = 'task'
_TASK_TYPE_KEY = 'type'
_TASK_ID_KEY = 'index'
_CLUSTER_KEY = 'cluster'
_SERVICE_KEY = 'service'
_SESSION_MASTER_KEY = 'session_master'
_EVAL_SESSION_MASTER_KEY = 'eval_session_master'
_MODEL_DIR_KEY = 'model_dir'
_LOCAL_MASTER = ''
_GRPC_SCHEME = 'grpc://'
def _get_session_master(cluster_spec, task_type, task_id, tf_config):
"""Returns the appropriate address for TensorFlow master.
The order of precedence to deteremine the TF session master is as follows:
1. If `tf_session_master` is set in TF_CONFIG environment variable, takes it.
2. If the cluster has only one node, returns empty string ''.
3. Returns the grpc address according to the task type and id in the cluster.
This is between-graph replication.
Note: task_type and task_id must be validated. Typically, validated using
`_validate_task_type_and_task_id`.
Args:
cluster_spec: A `ClusterSpec` instance.
task_type: String. Task type for current node.
task_id: Int. Task id for current node.
tf_config: Dict. Python dict for the TF_CONFIG environment variable.
Raises:
RuntimeError: If `cluster_spec` is not set.
"""
if _SESSION_MASTER_KEY in tf_config:
return tf_config[_SESSION_MASTER_KEY]
if not cluster_spec:
raise RuntimeError('Internal error: `_get_session_master` '
'does not expect empty cluster_spec.')
jobs = cluster_spec.jobs
# If there is only one node in the cluster, do things locally by setting
# master to ''. If a service or user sets TF_CONFIG with a single node, it's
# more performant to use a direct master rather than an RPC service.
if len(jobs) == 1 and len(cluster_spec.job_tasks(jobs[0])) == 1:
return _LOCAL_MASTER
# Lookup the master in cluster_spec using task_type and task_id,
# if possible.
addresses = cluster_spec.job_tasks(task_type)
return _GRPC_SCHEME + addresses[task_id]
def _get_eval_session_master(task_type, tf_config):
"""Returns the appropriate address for TensorFlow evaluation master."""
if task_type == TaskType.EVALUATOR:
return tf_config.get(_EVAL_SESSION_MASTER_KEY, _LOCAL_MASTER)
if _EVAL_SESSION_MASTER_KEY in tf_config:
raise ValueError('Key ({}) should not be set for task type other than {}. '
'Task type: {}'.format(_EVAL_SESSION_MASTER_KEY,
TaskType.EVALUATOR, task_type))
return _LOCAL_MASTER
def _count_ps(cluster_spec):
"""Counts the number of parameter servers in cluster_spec."""
if not cluster_spec:
raise RuntimeError(
'Internal error: `_count_ps` does not expect empty cluster_spec.')
return len(cluster_spec.as_dict().get(TaskType.PS, []))
def _count_worker(cluster_spec, chief_task_type):
"""Counts the number of workers (including chief) in cluster_spec."""
if not cluster_spec:
raise RuntimeError(
'Internal error: `_count_worker` does not expect empty cluster_spec.')
return (len(cluster_spec.as_dict().get(TaskType.WORKER, [])) +
len(cluster_spec.as_dict().get(chief_task_type, [])))
def _validate_service(service):
"""Validates the service key."""
if service is not None and not isinstance(service, dict):
raise TypeError(
'If "service" is set in TF_CONFIG, it must be a dict. Given %s' %
type(service))
return service
def _validate_task_type_and_task_id(cluster_spec, task_env, chief_task_type):
"""Validates the task type and index in `task_env` according to cluster."""
if chief_task_type not in cluster_spec.jobs:
raise ValueError(
'If "cluster" is set in TF_CONFIG, it must have one "%s" node.' %
chief_task_type)
if len(cluster_spec.job_tasks(chief_task_type)) > 1:
raise ValueError(
'The "cluster" in TF_CONFIG must have only one "%s" node.' %
chief_task_type)
task_type = task_env.get(_TASK_TYPE_KEY, None)
task_id = task_env.get(_TASK_ID_KEY, None)
if not task_type:
raise ValueError(
'If "cluster" is set in TF_CONFIG, task type must be set.')
if task_id is None:
raise ValueError(
'If "cluster" is set in TF_CONFIG, task index must be set.')
task_id = int(task_id)
# Check the task id bounds. Upper bound is not necessary as
# - for evaluator, there is no upper bound.
# - for non-evaluator, task id is upper bounded by the number of jobs in
# cluster spec, which will be checked later (when retrieving the `master`)
if task_id < 0:
raise ValueError('Task index must be non-negative number.')
# Evaluator is not part of the training cluster.
if task_type == TaskType.EVALUATOR:
return task_type, task_id
if task_type not in cluster_spec.jobs:
raise ValueError(
'%s is not a valid task_type in the cluster_spec:\n'
'%s\n\n'
'Note that these values may be coming from the TF_CONFIG environment '
'variable.' % (task_type, cluster_spec))
addresses = cluster_spec.job_tasks(task_type)
if not 0 <= task_id < len(addresses):
raise ValueError(
'%d is not a valid task_id for task_type %s in the cluster_spec:\n'
'%s\n\n'
'Note that these values may be coming from the TF_CONFIG environment '
'variable.' % (task_id, task_type, cluster_spec))
return task_type, task_id
def _get_global_id_in_cluster(
cluster_spec, task_type, task_id, chief_task_type):
"""Returns the global id in cluster."""
# Note: This is implementation details, which user should not rely on.
# The first id is 0, which is always for the `chief` node. All other nodes,
# except `ps`, are ordered alphabetical based on task type (alphabetically)
# and task id (ascendingly). `ps` are ordered last.
# Sort task names in cluster
task_type_ordered_list = [chief_task_type]
task_type_ordered_list.extend([
t for t in sorted(cluster_spec.jobs)
if t != chief_task_type and t != TaskType.PS
])
if TaskType.PS in cluster_spec.jobs:
task_type_ordered_list.append(TaskType.PS)
next_global_id = 0
for t in task_type_ordered_list:
if t == task_type:
return next_global_id + task_id
next_global_id += len(cluster_spec.job_tasks(t))
# This should never happen.
raise RuntimeError('Internal Error: `task_type` ({}) is not in '
'cluster_spec ({}).'.format(task_type, cluster_spec))
def _validate_save_ckpt_with_replaced_keys(new_copy, replaced_keys):
"""Validates the save ckpt properties."""
# Ensure one (and only one) of save_steps and save_secs is not None.
# Also, if user sets one save ckpt property, say steps, the other one (secs)
# should be set as None to improve usability.
save_steps = new_copy.save_checkpoints_steps
save_secs = new_copy.save_checkpoints_secs
if ('save_checkpoints_steps' in replaced_keys and
'save_checkpoints_secs' in replaced_keys):
# If user sets both properties explicitly, we need to error out if both
# are set or neither of them are set.
if save_steps is not None and save_secs is not None:
raise ValueError(_SAVE_CKPT_ERR)
elif 'save_checkpoints_steps' in replaced_keys and save_steps is not None:
new_copy._save_checkpoints_secs = None # pylint: disable=protected-access
elif 'save_checkpoints_secs' in replaced_keys and save_secs is not None:
new_copy._save_checkpoints_steps = None # pylint: disable=protected-access
def _validate_properties(run_config):
"""Validates the properties."""
def _validate(property_name, cond, message):
property_value = getattr(run_config, property_name)
if property_value is not None and not cond(property_value):
raise ValueError(message)
_validate('model_dir', lambda dir: dir,
message='model_dir should be non-empty')
_validate('save_summary_steps', lambda steps: steps >= 0,
message='save_summary_steps should be >= 0')
_validate('save_checkpoints_steps', lambda steps: steps >= 0,
message='save_checkpoints_steps should be >= 0')
_validate('save_checkpoints_secs', lambda secs: secs >= 0,
message='save_checkpoints_secs should be >= 0')
_validate('session_config',
lambda sc: isinstance(sc, config_pb2.ConfigProto),
message='session_config must be instance of ConfigProto')
_validate('keep_checkpoint_max', lambda keep_max: keep_max >= 0,
message='keep_checkpoint_max should be >= 0')
_validate('keep_checkpoint_every_n_hours', lambda keep_hours: keep_hours > 0,
message='keep_checkpoint_every_n_hours should be > 0')
_validate('log_step_count_steps', lambda num_steps: num_steps > 0,
message='log_step_count_steps should be > 0')
_validate('tf_random_seed', lambda seed: isinstance(seed, six.integer_types),
message='tf_random_seed must be integer.')
_validate('device_fn', lambda device_fn: six.callable(device_fn) and
set(function_utils.fn_args(device_fn)) == _VALID_DEVICE_FN_ARGS,
message='device_fn must be callable with exactly'
' one argument "op".')
class TaskType(object):
MASTER = 'master'
PS = 'ps'
WORKER = 'worker'
CHIEF = 'chief'
EVALUATOR = 'evaluator'
@estimator_export('estimator.RunConfig')
class RunConfig(object):
"""This class specifies the configurations for an `Estimator` run."""
def __init__(self,
model_dir=None,
tf_random_seed=None,
save_summary_steps=100,
save_checkpoints_steps=_USE_DEFAULT,
save_checkpoints_secs=_USE_DEFAULT,
session_config=None,
keep_checkpoint_max=5,
keep_checkpoint_every_n_hours=10000,
log_step_count_steps=100,
train_distribute=None,
device_fn=None):
"""Constructs a RunConfig.
All distributed training related properties `cluster_spec`, `is_chief`,
`master` , `num_worker_replicas`, `num_ps_replicas`, `task_id`, and
`task_type` are set based on the `TF_CONFIG` environment variable, if the
pertinent information is present. The `TF_CONFIG` environment variable is a
JSON object with attributes: `cluster` and `task`.
`cluster` is a JSON serialized version of `ClusterSpec`'s Python dict from
`server_lib.py`, mapping task types (usually one of the `TaskType` enums) to
a list of task addresses.
`task` has two attributes: `type` and `index`, where `type` can be any of
the task types in `cluster`. When `TF_CONFIG` contains said information,
the following properties are set on this class:
* `cluster_spec` is parsed from `TF_CONFIG['cluster']`. Defaults to {}. If
present, must have one and only one node in the `chief` attribute of
`cluster_spec`.
* `task_type` is set to `TF_CONFIG['task']['type']`. Must set if
`cluster_spec` is present; must be `worker` (the default value) if
`cluster_spec` is not set.
* `task_id` is set to `TF_CONFIG['task']['index']`. Must set if
`cluster_spec` is present; must be 0 (the default value) if
`cluster_spec` is not set.
* `master` is determined by looking up `task_type` and `task_id` in the
`cluster_spec`. Defaults to ''.
* `num_ps_replicas` is set by counting the number of nodes listed
in the `ps` attribute of `cluster_spec`. Defaults to 0.
* `num_worker_replicas` is set by counting the number of nodes listed
in the `worker` and `chief` attributes of `cluster_spec`. Defaults to 1.
* `is_chief` is determined based on `task_type` and `cluster`.
There is a special node with `task_type` as `evaluator`, which is not part
of the (training) `cluster_spec`. It handles the distributed evaluation job.
Example of non-chief node:
```
cluster = {'chief': ['host0:2222'],
'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
os.environ['TF_CONFIG'] = json.dumps(
{'cluster': cluster,
'task': {'type': 'worker', 'index': 1}})
config = RunConfig()
assert config.master == 'host4:2222'
assert config.task_id == 1
assert config.num_ps_replicas == 2
assert config.num_worker_replicas == 4
assert config.cluster_spec == server_lib.ClusterSpec(cluster)
assert config.task_type == 'worker'
assert not config.is_chief
```
Example of chief node:
```
cluster = {'chief': ['host0:2222'],
'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
os.environ['TF_CONFIG'] = json.dumps(
{'cluster': cluster,
'task': {'type': 'chief', 'index': 0}})
config = RunConfig()
assert config.master == 'host0:2222'
assert config.task_id == 0
assert config.num_ps_replicas == 2
assert config.num_worker_replicas == 4
assert config.cluster_spec == server_lib.ClusterSpec(cluster)
assert config.task_type == 'chief'
assert config.is_chief
```
Example of evaluator node (evaluator is not part of training cluster):
```
cluster = {'chief': ['host0:2222'],
'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
os.environ['TF_CONFIG'] = json.dumps(
{'cluster': cluster,
'task': {'type': 'evaluator', 'index': 0}})
config = RunConfig()
assert config.master == ''
assert config.evaluator_master == ''
assert config.task_id == 0
assert config.num_ps_replicas == 0
assert config.num_worker_replicas == 0
assert config.cluster_spec == {}
assert config.task_type == 'evaluator'
assert not config.is_chief
```
N.B.: If `save_checkpoints_steps` or `save_checkpoints_secs` is set,
`keep_checkpoint_max` might need to be adjusted accordingly, especially in
distributed training. For example, setting `save_checkpoints_secs` as 60
without adjusting `keep_checkpoint_max` (defaults to 5) leads to situation
that checkpoint would be garbage collected after 5 minutes. In distributed
training, the evaluation job starts asynchronously and might fail to load or
find the checkpoint due to race condition.
Args:
model_dir: directory where model parameters, graph, etc are saved. If
`PathLike` object, the path will be resolved. If `None`, will use a
default value set by the Estimator.
tf_random_seed: Random seed for TensorFlow initializers.
Setting this value allows consistency between reruns.
save_summary_steps: Save summaries every this many steps.
save_checkpoints_steps: Save checkpoints every this many steps. Can not be
specified with `save_checkpoints_secs`.
save_checkpoints_secs: Save checkpoints every this many seconds. Can not
be specified with `save_checkpoints_steps`. Defaults to 600 seconds if
both `save_checkpoints_steps` and `save_checkpoints_secs` are not set
in constructor. If both `save_checkpoints_steps` and
`save_checkpoints_secs` are None, then checkpoints are disabled.
session_config: a ConfigProto used to set session parameters, or None.
keep_checkpoint_max: The maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. Defaults to 5 (that is, the 5 most recent
checkpoint files are kept.)
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
to be saved. The default value of 10,000 hours effectively disables
the feature.
log_step_count_steps: The frequency, in number of global steps, that the
global step/sec and the loss will be logged during training.
train_distribute: an optional instance of
`tf.contrib.distribute.DistributionStrategy`. If specified,
then Estimator will distribute the user's model during training,
according to the policy specified by that strategy.
device_fn: A callable invoked for every `Operation` that takes the
`Operation` and returns the device string. If `None`, defaults to
the device function returned by `tf.train.replica_device_setter`
with round-robin strategy.
Raises:
ValueError: If both `save_checkpoints_steps` and `save_checkpoints_secs`
are set.
"""
if (save_checkpoints_steps == _USE_DEFAULT and
save_checkpoints_secs == _USE_DEFAULT):
save_checkpoints_steps = None
save_checkpoints_secs = 600
elif save_checkpoints_secs == _USE_DEFAULT:
save_checkpoints_secs = None
elif save_checkpoints_steps == _USE_DEFAULT:
save_checkpoints_steps = None
elif (save_checkpoints_steps is not None and
save_checkpoints_secs is not None):
raise ValueError(_SAVE_CKPT_ERR)
tf_config = json.loads(os.environ.get(_TF_CONFIG_ENV, '{}'))
if tf_config:
logging.info('TF_CONFIG environment variable: %s', tf_config)
model_dir = _get_model_dir(tf_config,
compat_internal.path_to_str(model_dir))
RunConfig._replace(
self,
allowed_properties_list=_DEFAULT_REPLACEABLE_LIST,
model_dir=model_dir,
tf_random_seed=tf_random_seed,
save_summary_steps=save_summary_steps,
save_checkpoints_steps=save_checkpoints_steps,
save_checkpoints_secs=save_checkpoints_secs,
session_config=session_config,
keep_checkpoint_max=keep_checkpoint_max,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
log_step_count_steps=log_step_count_steps,
train_distribute=train_distribute,
device_fn=device_fn)
self._init_distributed_setting_from_environment_var(tf_config)
# Get session_config only for distributed mode (cluster_spec is present).
if not self._session_config and self._cluster_spec:
RunConfig._replace(
self,
allowed_properties_list=_DEFAULT_REPLACEABLE_LIST,
session_config=self._get_default_session_config())
def _get_default_session_config(self):
"""Returns None or tf.ConfigProto instance with default device_filters set.
Device filters are set such that chief/master and worker communicates with
only ps. session_config=None for evaluators or any other TaskType.
"""
rewrite_opts = rewriter_config_pb2.RewriterConfig(
meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.ONE)
graph_opts = config_pb2.GraphOptions(rewrite_options=rewrite_opts)
device_filters = None
if self._task_type == TaskType.MASTER:
device_filters = ['/job:ps', '/job:master']
elif self._task_type == TaskType.CHIEF:
device_filters = ['/job:ps', '/job:chief']
elif self._task_type == TaskType.WORKER:
device_filters = ['/job:ps', '/job:worker/task:%d' % self._task_id]
elif self._task_type == TaskType.PS:
device_filters = ['/job:ps', '/job:worker', '/job:master']
else:
# If the task_type is `EVALUATOR` or something other than the ones in
# TaskType then don't set any device filters.
return None
return config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=graph_opts,
device_filters=device_filters)
def _init_distributed_setting_from_environment_var(self, tf_config):
"""Initialize distributed properties based on `tf_config`."""
self._service = _validate_service(tf_config.get(_SERVICE_KEY))
self._cluster_spec = server_lib.ClusterSpec(tf_config.get(_CLUSTER_KEY, {}))
task_env = tf_config.get(_TASK_ENV_KEY, {})
if self._cluster_spec and TaskType.MASTER in self._cluster_spec.jobs:
return self._init_distributed_setting_from_environment_var_with_master(
tf_config)
if self._cluster_spec:
# Distributed mode.
self._task_type, self._task_id = _validate_task_type_and_task_id(
self._cluster_spec, task_env, TaskType.CHIEF)
self._evaluation_master = _get_eval_session_master(
self._task_type, tf_config)
if self._task_type != TaskType.EVALUATOR:
self._master = _get_session_master(self._cluster_spec, self._task_type,
self._task_id, tf_config)
self._num_ps_replicas = _count_ps(self._cluster_spec)
self._num_worker_replicas = _count_worker(
self._cluster_spec, chief_task_type=TaskType.CHIEF)
self._global_id_in_cluster = _get_global_id_in_cluster(
self._cluster_spec,
self._task_type,
self._task_id,
chief_task_type=TaskType.CHIEF)
else:
# Evaluator is not part of the training cluster.
self._cluster_spec = server_lib.ClusterSpec({})
self._master = _LOCAL_MASTER
self._num_ps_replicas = 0
self._num_worker_replicas = 0
self._global_id_in_cluster = None # undefined
self._is_chief = self._task_type == TaskType.CHIEF
else:
# Local mode.
self._task_type = task_env.get(_TASK_TYPE_KEY, TaskType.WORKER)
self._task_id = int(task_env.get(_TASK_ID_KEY, 0))
self._global_id_in_cluster = 0
if self._task_type != TaskType.WORKER:
raise ValueError(
'If "cluster" is not set in TF_CONFIG, task type must be WORKER.')
if self._task_id != 0:
raise ValueError(
'If "cluster" is not set in TF_CONFIG, task index must be 0.')
self._master = tf_config.get(_SESSION_MASTER_KEY, _LOCAL_MASTER)
self._evaluation_master = tf_config.get(_EVAL_SESSION_MASTER_KEY,
_LOCAL_MASTER)
self._is_chief = True
self._num_ps_replicas = 0
self._num_worker_replicas = 1
def _init_distributed_setting_from_environment_var_with_master(self,
tf_config):
"""Initialize distributed properties for legacy cluster with `master`."""
# There is no tech reason, why user cannot have chief and master in the same
# cluster, but it is super confusing (which is really the chief?). So, block
# this case.
if TaskType.CHIEF in self._cluster_spec.jobs:
raise ValueError('If `master` node exists in `cluster`, job '
'`chief` is not supported.')
task_env = tf_config.get(_TASK_ENV_KEY, {})
self._task_type, self._task_id = _validate_task_type_and_task_id(
self._cluster_spec, task_env, TaskType.MASTER)
if self._task_type == TaskType.EVALUATOR:
raise ValueError('If `master` node exists in `cluster`, task_type '
'`evaluator` is not supported.')
self._global_id_in_cluster = _get_global_id_in_cluster(
self._cluster_spec,
self._task_type,
self._task_id,
chief_task_type=TaskType.MASTER)
self._master = _get_session_master(self._cluster_spec, self._task_type,
self._task_id, tf_config)
self._evaluation_master = _get_eval_session_master(self._task_type,
tf_config)
self._num_ps_replicas = _count_ps(self._cluster_spec)
self._num_worker_replicas = _count_worker(
self._cluster_spec, chief_task_type=TaskType.MASTER)
self._is_chief = self._task_type == TaskType.MASTER
@property
def cluster_spec(self):
return self._cluster_spec
@property
def device_fn(self):
"""Returns the device_fn.
If device_fn is not `None`, it overrides the default
device function used in `Estimator`.
Otherwise the default one is used.
"""
return self._device_fn
@property
def evaluation_master(self):
return self._evaluation_master
@property
def is_chief(self):
return self._is_chief
@property
def master(self):
return self._master
@property
def num_ps_replicas(self):
return self._num_ps_replicas
@property
def num_worker_replicas(self):
return self._num_worker_replicas
@property
def task_id(self):
return self._task_id
@property
def global_id_in_cluster(self):
"""The global id in the training cluster.
All global ids in the training cluster are assigned from an increasing
sequence of consecutive integers. The first id is 0.
Note: Task id (the property field `task_id`) is tracking the index of the
node among all nodes with the SAME task type. For example, given the cluster
definition as follows:
```
cluster = {'chief': ['host0:2222'],
'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
```
Nodes with task type `worker` can have id 0, 1, 2. Nodes with task type
`ps` can have id, 0, 1. So, `task_id` is not unique, but the pair
(`task_type`, `task_id`) can uniquely determine a node in the cluster.
Global id, i.e., this field, is tracking the index of the node among ALL
nodes in the cluster. It is uniquely assigned. For example, for the cluster
spec given above, the global ids are assigned as:
```
task_type | task_id | global_id
--------------------------------
chief | 0 | 0
worker | 0 | 1
worker | 1 | 2
worker | 2 | 3
ps | 0 | 4
ps | 1 | 5
```
Returns:
An integer id.
"""
return self._global_id_in_cluster
@property
def task_type(self):
return self._task_type
@property
def tf_random_seed(self):
return self._tf_random_seed
@property
def save_summary_steps(self):
return self._save_summary_steps
@property
def save_checkpoints_secs(self):
return self._save_checkpoints_secs
@property
def session_config(self):
return self._session_config
@property
def save_checkpoints_steps(self):
return self._save_checkpoints_steps
@property
def keep_checkpoint_max(self):
return self._keep_checkpoint_max
@property
def keep_checkpoint_every_n_hours(self):
return self._keep_checkpoint_every_n_hours
@property
def log_step_count_steps(self):
return self._log_step_count_steps
@property
def model_dir(self):
return self._model_dir
@property
def service(self):
"""Returns the platform defined (in TF_CONFIG) service dict."""
return self._service
@property
def train_distribute(self):
"""Returns the optional `tf.contrib.distribute.DistributionStrategy` object.
"""
return self._train_distribute
def replace(self, **kwargs):
"""Returns a new instance of `RunConfig` replacing specified properties.
Only the properties in the following list are allowed to be replaced:
- `model_dir`,
- `tf_random_seed`,
- `save_summary_steps`,
- `save_checkpoints_steps`,
- `save_checkpoints_secs`,
- `session_config`,
- `keep_checkpoint_max`,
- `keep_checkpoint_every_n_hours`,
- `log_step_count_steps`,
- `train_distribute`,
- `device_fn`.
In addition, either `save_checkpoints_steps` or `save_checkpoints_secs`
can be set (should not be both).
Args:
**kwargs: keyword named properties with new values.
Raises:
ValueError: If any property name in `kwargs` does not exist or is not
allowed to be replaced, or both `save_checkpoints_steps` and
`save_checkpoints_secs` are set.
Returns:
a new instance of `RunConfig`.
"""
return RunConfig._replace(
copy.deepcopy(self),
allowed_properties_list=_DEFAULT_REPLACEABLE_LIST,
**kwargs)
@staticmethod
def _replace(config, allowed_properties_list=None, **kwargs):
"""See `replace`.
N.B.: This implementation assumes that for key named "foo", the underlying
property the RunConfig holds is "_foo" (with one leading underscore).
Args:
config: The RunConfig to replace the values of.
allowed_properties_list: The property name list allowed to be replaced.
**kwargs: keyword named properties with new values.
Raises:
ValueError: If any property name in `kwargs` does not exist or is not
allowed to be replaced, or both `save_checkpoints_steps` and
`save_checkpoints_secs` are set.
Returns:
a new instance of `RunConfig`.
"""
allowed_properties_list = allowed_properties_list or []
for key, new_value in six.iteritems(kwargs):
if key in allowed_properties_list:
setattr(config, '_' + key, new_value)
continue
raise ValueError(
'Replacing {} is not supported. Allowed properties are {}.'.format(
key, allowed_properties_list))
_validate_save_ckpt_with_replaced_keys(config, kwargs.keys())
_validate_properties(config)
return config
def _get_model_dir(tf_config, model_dir):
"""Returns `model_dir` based user provided `tf_config` or `model_dir`."""
# pylint: disable=g-explicit-bool-comparison
# Empty string is treated as False in Python condition check, which triggers
# some confusing error messages. For example, 'a or b' returns None if a is ''
# and b is None. `None` is allowed for model_dir but '' is not allowed. Here,
# explicitly check empty string to provide clear error message.
if model_dir == '':
raise ValueError('model_dir should be non-empty.')
model_dir_in_tf_config = tf_config.get('model_dir')
if model_dir_in_tf_config == '':
raise ValueError('model_dir in TF_CONFIG should be non-empty.')
if model_dir_in_tf_config:
if model_dir and model_dir_in_tf_config != model_dir:
raise ValueError(
'`model_dir` provided in RunConfig construct, if set, '
'must have the same value as the model_dir in TF_CONFIG. '
'model_dir: {}\nTF_CONFIG["model_dir"]: {}.\n'.format(
model_dir, model_dir_in_tf_config))
logging.info('Using model_dir in TF_CONFIG: %s', model_dir_in_tf_config)
return model_dir or model_dir_in_tf_config
|
|
# Copyright 2013 IBM Corp.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Client side of the conductor RPC API."""
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_versionedobjects import base as ovo_base
import nova.conf
from nova.objects import base as objects_base
from nova import rpc
CONF = nova.conf.CONF
rpcapi_cap_opt = cfg.StrOpt('conductor',
help='Set a version cap for messages sent to conductor services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class ConductorAPI(object):
"""Client side of the conductor RPC API
API version history:
* 1.0 - Initial version.
* 1.1 - Added migration_update
* 1.2 - Added instance_get_by_uuid and instance_get_all_by_host
* 1.3 - Added aggregate_host_add and aggregate_host_delete
* 1.4 - Added migration_get
* 1.5 - Added bw_usage_update
* 1.6 - Added get_backdoor_port()
* 1.7 - Added aggregate_get_by_host, aggregate_metadata_add,
and aggregate_metadata_delete
* 1.8 - Added security_group_get_by_instance and
security_group_rule_get_by_security_group
* 1.9 - Added provider_fw_rule_get_all
* 1.10 - Added agent_build_get_by_triple
* 1.11 - Added aggregate_get
* 1.12 - Added block_device_mapping_update_or_create
* 1.13 - Added block_device_mapping_get_all_by_instance
* 1.14 - Added block_device_mapping_destroy
* 1.15 - Added instance_get_all_by_filters and
instance_get_all_hung_in_rebooting and
instance_get_active_by_window
Deprecated instance_get_all_by_host
* 1.16 - Added instance_destroy
* 1.17 - Added instance_info_cache_delete
* 1.18 - Added instance_type_get
* 1.19 - Added vol_get_usage_by_time and vol_usage_update
* 1.20 - Added migration_get_unconfirmed_by_dest_compute
* 1.21 - Added service_get_all_by
* 1.22 - Added ping
* 1.23 - Added instance_get_all
Un-Deprecate instance_get_all_by_host
* 1.24 - Added instance_get
* 1.25 - Added action_event_start and action_event_finish
* 1.26 - Added instance_info_cache_update
* 1.27 - Added service_create
* 1.28 - Added binary arg to service_get_all_by
* 1.29 - Added service_destroy
* 1.30 - Added migration_create
* 1.31 - Added migration_get_in_progress_by_host_and_node
* 1.32 - Added optional node to instance_get_all_by_host
* 1.33 - Added compute_node_create and compute_node_update
* 1.34 - Added service_update
* 1.35 - Added instance_get_active_by_window_joined
* 1.36 - Added instance_fault_create
* 1.37 - Added task_log_get, task_log_begin_task, task_log_end_task
* 1.38 - Added service name to instance_update
* 1.39 - Added notify_usage_exists
* 1.40 - Added security_groups_trigger_handler and
security_groups_trigger_members_refresh
Remove instance_get_active_by_window
* 1.41 - Added fixed_ip_get_by_instance, network_get,
instance_floating_address_get_all, quota_commit,
quota_rollback
* 1.42 - Added get_ec2_ids, aggregate_metadata_get_by_host
* 1.43 - Added compute_stop
* 1.44 - Added compute_node_delete
* 1.45 - Added project_id to quota_commit and quota_rollback
* 1.46 - Added compute_confirm_resize
* 1.47 - Added columns_to_join to instance_get_all_by_host and
instance_get_all_by_filters
* 1.48 - Added compute_unrescue
... Grizzly supports message version 1.48. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 1.48.
* 1.49 - Added columns_to_join to instance_get_by_uuid
* 1.50 - Added object_action() and object_class_action()
* 1.51 - Added the 'legacy' argument to
block_device_mapping_get_all_by_instance
* 1.52 - Pass instance objects for compute_confirm_resize
* 1.53 - Added compute_reboot
* 1.54 - Added 'update_cells' argument to bw_usage_update
* 1.55 - Pass instance objects for compute_stop
* 1.56 - Remove compute_confirm_resize and
migration_get_unconfirmed_by_dest_compute
* 1.57 - Remove migration_create()
* 1.58 - Remove migration_get()
... Havana supports message version 1.58. So, any changes to existing
methods in 1.x after that point should be done such that they can
handle the version_cap being set to 1.58.
* 1.59 - Remove instance_info_cache_update()
* 1.60 - Remove aggregate_metadata_add() and aggregate_metadata_delete()
* ... - Remove security_group_get_by_instance() and
security_group_rule_get_by_security_group()
* 1.61 - Return deleted instance from instance_destroy()
* 1.62 - Added object_backport()
* 1.63 - Changed the format of values['stats'] from a dict to a JSON string
in compute_node_update()
* 1.64 - Added use_slave to instance_get_all_filters()
- Remove instance_type_get()
- Remove aggregate_get()
- Remove aggregate_get_by_host()
- Remove instance_get()
- Remove migration_update()
- Remove block_device_mapping_destroy()
* 2.0 - Drop backwards compatibility
- Remove quota_rollback() and quota_commit()
- Remove aggregate_host_add() and aggregate_host_delete()
- Remove network_migrate_instance_start() and
network_migrate_instance_finish()
- Remove vol_get_usage_by_time
... Icehouse supports message version 2.0. So, any changes to
existing methods in 2.x after that point should be done such
that they can handle the version_cap being set to 2.0.
* Remove instance_destroy()
* Remove compute_unrescue()
* Remove instance_get_all_by_filters()
* Remove instance_get_active_by_window_joined()
* Remove instance_fault_create()
* Remove action_event_start() and action_event_finish()
* Remove instance_get_by_uuid()
* Remove agent_build_get_by_triple()
... Juno supports message version 2.0. So, any changes to
existing methods in 2.x after that point should be done such
that they can handle the version_cap being set to 2.0.
* 2.1 - Make notify_usage_exists() take an instance object
* Remove bw_usage_update()
* Remove notify_usage_exists()
... Kilo supports message version 2.1. So, any changes to
existing methods in 2.x after that point should be done such
that they can handle the version_cap being set to 2.1.
* Remove get_ec2_ids()
* Remove service_get_all_by()
* Remove service_create()
* Remove service_destroy()
* Remove service_update()
* Remove migration_get_in_progress_by_host_and_node()
* Remove aggregate_metadata_get_by_host()
* Remove block_device_mapping_update_or_create()
* Remove block_device_mapping_get_all_by_instance()
* Remove instance_get_all_by_host()
* Remove compute_node_update()
* Remove compute_node_delete()
* Remove security_groups_trigger_handler()
* Remove task_log_get()
* Remove task_log_begin_task()
* Remove task_log_end_task()
* Remove security_groups_trigger_members_refresh()
* Remove vol_usage_update()
* Remove instance_update()
* 2.2 - Add object_backport_versions()
* 2.3 - Add object_class_action_versions()
* Remove compute_node_create()
* Remove object_backport()
* 3.0 - Drop backwards compatibility
... Liberty supports message version 3.0. So, any changes to
existing methods in 3.x after that point should be done such
that they can handle the version_cap being set to 3.0.
* Remove provider_fw_rule_get_all()
"""
VERSION_ALIASES = {
'grizzly': '1.48',
'havana': '1.58',
'icehouse': '2.0',
'juno': '2.0',
'kilo': '2.1',
'liberty': '3.0',
}
def __init__(self):
super(ConductorAPI, self).__init__()
target = messaging.Target(topic=CONF.conductor.topic, version='3.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.conductor,
CONF.upgrade_levels.conductor)
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target,
version_cap=version_cap,
serializer=serializer)
# TODO(hanlind): This method can be removed once oslo.versionedobjects
# has been converted to use version_manifests in remotable_classmethod
# operations, which will use the new class action handler.
def object_class_action(self, context, objname, objmethod, objver,
args, kwargs):
versions = ovo_base.obj_tree_get_versions(objname)
return self.object_class_action_versions(context,
objname,
objmethod,
versions,
args, kwargs)
def object_class_action_versions(self, context, objname, objmethod,
object_versions, args, kwargs):
cctxt = self.client.prepare()
return cctxt.call(context, 'object_class_action_versions',
objname=objname, objmethod=objmethod,
object_versions=object_versions,
args=args, kwargs=kwargs)
def object_action(self, context, objinst, objmethod, args, kwargs):
cctxt = self.client.prepare()
return cctxt.call(context, 'object_action', objinst=objinst,
objmethod=objmethod, args=args, kwargs=kwargs)
def object_backport_versions(self, context, objinst, object_versions):
cctxt = self.client.prepare()
return cctxt.call(context, 'object_backport_versions', objinst=objinst,
object_versions=object_versions)
class ComputeTaskAPI(object):
"""Client side of the conductor 'compute' namespaced RPC API
API version history:
1.0 - Initial version (empty).
1.1 - Added unified migrate_server call.
1.2 - Added build_instances
1.3 - Added unshelve_instance
1.4 - Added reservations to migrate_server.
1.5 - Added the leagacy_bdm parameter to build_instances
1.6 - Made migrate_server use instance objects
1.7 - Do not send block_device_mapping and legacy_bdm to build_instances
1.8 - Add rebuild_instance
1.9 - Converted requested_networks to NetworkRequestList object
1.10 - Made migrate_server() and build_instances() send flavor objects
1.11 - Added clean_shutdown to migrate_server()
1.12 - Added request_spec to rebuild_instance()
1.13 - Added request_spec to migrate_server()
1.14 - Added request_spec to unshelve_instance()
"""
def __init__(self):
super(ComputeTaskAPI, self).__init__()
target = messaging.Target(topic=CONF.conductor.topic,
namespace='compute_task',
version='1.0')
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target, serializer=serializer)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit,
reservations=None, clean_shutdown=True, request_spec=None):
kw = {'instance': instance, 'scheduler_hint': scheduler_hint,
'live': live, 'rebuild': rebuild, 'flavor': flavor,
'block_migration': block_migration,
'disk_over_commit': disk_over_commit,
'reservations': reservations,
'clean_shutdown': clean_shutdown,
'request_spec': request_spec,
}
version = '1.13'
if not self.client.can_send_version(version):
del kw['request_spec']
version = '1.11'
if not self.client.can_send_version(version):
del kw['clean_shutdown']
version = '1.10'
if not self.client.can_send_version(version):
kw['flavor'] = objects_base.obj_to_primitive(flavor)
version = '1.6'
if not self.client.can_send_version(version):
kw['instance'] = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
version = '1.4'
cctxt = self.client.prepare(version=version)
return cctxt.call(context, 'migrate_server', **kw)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping, legacy_bdm=True):
image_p = jsonutils.to_primitive(image)
version = '1.10'
if not self.client.can_send_version(version):
version = '1.9'
if 'instance_type' in filter_properties:
flavor = filter_properties['instance_type']
flavor_p = objects_base.obj_to_primitive(flavor)
filter_properties = dict(filter_properties,
instance_type=flavor_p)
kw = {'instances': instances, 'image': image_p,
'filter_properties': filter_properties,
'admin_password': admin_password,
'injected_files': injected_files,
'requested_networks': requested_networks,
'security_groups': security_groups}
if not self.client.can_send_version(version):
version = '1.8'
kw['requested_networks'] = kw['requested_networks'].as_tuples()
if not self.client.can_send_version('1.7'):
version = '1.5'
bdm_p = objects_base.obj_to_primitive(block_device_mapping)
kw.update({'block_device_mapping': bdm_p,
'legacy_bdm': legacy_bdm})
cctxt = self.client.prepare(version=version)
cctxt.cast(context, 'build_instances', **kw)
def unshelve_instance(self, context, instance, request_spec=None):
version = '1.14'
kw = {'instance': instance,
'request_spec': request_spec
}
if not self.client.can_send_version(version):
version = '1.3'
del kw['request_spec']
cctxt = self.client.prepare(version=version)
cctxt.cast(context, 'unshelve_instance', **kw)
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None,
preserve_ephemeral=False, request_spec=None, kwargs=None):
version = '1.12'
kw = {'instance': instance,
'new_pass': new_pass,
'injected_files': injected_files,
'image_ref': image_ref,
'orig_image_ref': orig_image_ref,
'orig_sys_metadata': orig_sys_metadata,
'bdms': bdms,
'recreate': recreate,
'on_shared_storage': on_shared_storage,
'preserve_ephemeral': preserve_ephemeral,
'host': host,
'request_spec': request_spec,
}
if not self.client.can_send_version(version):
version = '1.8'
del kw['request_spec']
cctxt = self.client.prepare(version=version)
cctxt.cast(ctxt, 'rebuild_instance', **kw)
|
|
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate build.json data for all the end2end tests."""
import yaml
import collections
import hashlib
FixtureOptions = collections.namedtuple(
'FixtureOptions',
'fullstack includes_proxy dns_resolver name_resolution secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes enables_compression supports_compression is_inproc is_http2 supports_proxy_auth supports_write_buffering client_channel')
default_unsecure_fixture_options = FixtureOptions(
True, False, True, True, False, ['windows', 'linux', 'mac', 'posix'],
True, False, [], [], True, False, True, False, True, False, True, True)
socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace(
fullstack=False, dns_resolver=False, client_channel=False)
default_secure_fixture_options = default_unsecure_fixture_options._replace(
secure=True)
uds_fixture_options = default_unsecure_fixture_options._replace(
dns_resolver=False, platforms=['linux', 'mac', 'posix'],
exclude_iomgrs=['uv'])
local_fixture_options = default_secure_fixture_options._replace(
dns_resolver=False, platforms=['linux', 'mac', 'posix'],
exclude_iomgrs=['uv'])
fd_unsecure_fixture_options = default_unsecure_fixture_options._replace(
dns_resolver=False, fullstack=False, platforms=['linux', 'mac', 'posix'],
exclude_iomgrs=['uv'], client_channel=False)
inproc_fixture_options = default_unsecure_fixture_options._replace(
dns_resolver=False, fullstack=False, name_resolution=False,
supports_compression=False, is_inproc=True, is_http2=False,
supports_write_buffering=False, client_channel=False)
# maps fixture name to whether it requires the security library
END2END_FIXTURES = {
'h2_compress': default_unsecure_fixture_options._replace(enables_compression=True),
'h2_census': default_unsecure_fixture_options,
# This cmake target is disabled for now because it depends on OpenCensus,
# which is Bazel-only.
# 'h2_load_reporting': default_unsecure_fixture_options,
'h2_fakesec': default_secure_fixture_options._replace(ci_mac=False),
'h2_fd': fd_unsecure_fixture_options,
'h2_full': default_unsecure_fixture_options,
'h2_full+pipe': default_unsecure_fixture_options._replace(
platforms=['linux'], exclude_iomgrs=['uv']),
'h2_full+trace': default_unsecure_fixture_options._replace(tracing=True),
'h2_full+workarounds': default_unsecure_fixture_options,
'h2_http_proxy': default_unsecure_fixture_options._replace(
ci_mac=False, exclude_iomgrs=['uv'], supports_proxy_auth=True),
'h2_oauth2': default_secure_fixture_options._replace(
ci_mac=False, exclude_iomgrs=['uv']),
'h2_proxy': default_unsecure_fixture_options._replace(
includes_proxy=True, ci_mac=False, exclude_iomgrs=['uv']),
'h2_sockpair_1byte': socketpair_unsecure_fixture_options._replace(
ci_mac=False, exclude_configs=['msan'], large_writes=False,
exclude_iomgrs=['uv']),
'h2_sockpair': socketpair_unsecure_fixture_options._replace(
ci_mac=False, exclude_iomgrs=['uv']),
'h2_sockpair+trace': socketpair_unsecure_fixture_options._replace(
ci_mac=False, tracing=True, large_writes=False, exclude_iomgrs=['uv']),
'h2_ssl': default_secure_fixture_options,
'h2_local': local_fixture_options,
'h2_ssl_proxy': default_secure_fixture_options._replace(
includes_proxy=True, ci_mac=False, exclude_iomgrs=['uv']),
'h2_uds': uds_fixture_options,
'inproc': inproc_fixture_options
}
TestOptions = collections.namedtuple(
'TestOptions',
'needs_fullstack needs_dns needs_names proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky allows_compression needs_compression exclude_inproc needs_http2 needs_proxy_auth needs_write_buffering needs_client_channel')
default_test_options = TestOptions(
False, False, False, True, False, True, 1.0, [], False, False, True,
False, False, False, False, False, False)
connectivity_test_options = default_test_options._replace(
needs_fullstack=True)
LOWCPU = 0.1
# maps test names to options
END2END_TESTS = {
'authority_not_supported': default_test_options,
'bad_hostname': default_test_options._replace(needs_names=True),
'bad_ping': connectivity_test_options._replace(proxyable=False),
'binary_metadata': default_test_options._replace(cpu_cost=LOWCPU),
'resource_quota_server': default_test_options._replace(
large_writes=True, proxyable=False, allows_compression=False),
'call_creds': default_test_options._replace(secure=True),
'cancel_after_accept': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_client_done': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_invoke': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_round_trip': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_before_invoke': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_in_a_vacuum': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_with_status': default_test_options._replace(cpu_cost=LOWCPU),
'compressed_payload': default_test_options._replace(proxyable=False,
needs_compression=True),
'connectivity': connectivity_test_options._replace(needs_names=True,
proxyable=False, cpu_cost=LOWCPU, exclude_iomgrs=['uv']),
'channelz': default_test_options,
'default_host': default_test_options._replace(
needs_fullstack=True, needs_dns=True, needs_names=True),
'call_host_override': default_test_options._replace(
needs_fullstack=True, needs_dns=True, needs_names=True),
'disappearing_server': connectivity_test_options._replace(flaky=True,
needs_names=True),
'empty_batch': default_test_options._replace(cpu_cost=LOWCPU),
'filter_causes_close': default_test_options._replace(cpu_cost=LOWCPU),
'filter_call_init_fails': default_test_options,
'filter_latency': default_test_options._replace(cpu_cost=LOWCPU),
'filter_status_code': default_test_options._replace(cpu_cost=LOWCPU),
'graceful_server_shutdown': default_test_options._replace(
cpu_cost=LOWCPU, exclude_inproc=True),
'hpack_size': default_test_options._replace(proxyable=False,
traceable=False,
cpu_cost=LOWCPU),
'high_initial_seqno': default_test_options._replace(cpu_cost=LOWCPU),
'idempotent_request': default_test_options,
'invoke_large_request': default_test_options,
'keepalive_timeout': default_test_options._replace(proxyable=False,
cpu_cost=LOWCPU,
needs_http2=True),
'large_metadata': default_test_options,
'max_concurrent_streams': default_test_options._replace(
proxyable=False, cpu_cost=LOWCPU, exclude_inproc=True),
'max_connection_age': default_test_options._replace(cpu_cost=LOWCPU,
exclude_inproc=True),
'max_connection_idle': connectivity_test_options._replace(
proxyable=False, exclude_iomgrs=['uv'], cpu_cost=LOWCPU),
'max_message_length': default_test_options._replace(cpu_cost=LOWCPU),
'negative_deadline': default_test_options,
'network_status_change': default_test_options._replace(cpu_cost=LOWCPU),
'no_error_on_hotpath': default_test_options._replace(proxyable=False),
'no_logging': default_test_options._replace(traceable=False),
'no_op': default_test_options,
'payload': default_test_options,
# This cmake target is disabled for now because it depends on OpenCensus,
# which is Bazel-only.
# 'load_reporting_hook': default_test_options,
'ping_pong_streaming': default_test_options._replace(cpu_cost=LOWCPU),
'ping': connectivity_test_options._replace(proxyable=False,
cpu_cost=LOWCPU),
'proxy_auth': default_test_options._replace(needs_proxy_auth=True),
'registered_call': default_test_options,
'request_with_flags': default_test_options._replace(
proxyable=False, cpu_cost=LOWCPU),
'request_with_payload': default_test_options._replace(cpu_cost=LOWCPU),
# TODO(roth): Remove proxyable=False for all retry tests once we
# have a way for the proxy to propagate the fact that trailing
# metadata is available when initial metadata is returned.
# See https://github.com/grpc/grpc/issues/14467 for context.
'retry': default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_cancellation': default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
'retry_disabled': default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_exceeds_buffer_size_in_initial_batch': default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
'retry_exceeds_buffer_size_in_subsequent_batch':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_non_retriable_status': default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
'retry_non_retriable_status_before_recv_trailing_metadata_started':
default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
'retry_recv_initial_metadata': default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
'retry_recv_message': default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
'retry_server_pushback_delay': default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
'retry_server_pushback_disabled': default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
'retry_streaming': default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_streaming_after_commit': default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
'retry_streaming_succeeds_before_replay_finished':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_throttled': default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_too_many_attempts': default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
'server_finishes_request': default_test_options._replace(cpu_cost=LOWCPU),
'shutdown_finishes_calls': default_test_options._replace(cpu_cost=LOWCPU),
'shutdown_finishes_tags': default_test_options._replace(cpu_cost=LOWCPU),
'simple_cacheable_request': default_test_options._replace(cpu_cost=LOWCPU),
'stream_compression_compressed_payload': default_test_options._replace(
proxyable=False, exclude_inproc=True),
'stream_compression_payload': default_test_options._replace(
exclude_inproc=True),
'stream_compression_ping_pong_streaming': default_test_options._replace(
exclude_inproc=True),
'simple_delayed_request': connectivity_test_options,
'simple_metadata': default_test_options,
'simple_request': default_test_options,
'streaming_error_response': default_test_options._replace(cpu_cost=LOWCPU),
'trailing_metadata': default_test_options,
'workaround_cronet_compression': default_test_options,
'write_buffering': default_test_options._replace(
cpu_cost=LOWCPU, needs_write_buffering=True),
'write_buffering_at_end': default_test_options._replace(
cpu_cost=LOWCPU, needs_write_buffering=True),
}
def compatible(f, t):
if END2END_TESTS[t].needs_fullstack:
if not END2END_FIXTURES[f].fullstack:
return False
if END2END_TESTS[t].needs_dns:
if not END2END_FIXTURES[f].dns_resolver:
return False
if END2END_TESTS[t].needs_names:
if not END2END_FIXTURES[f].name_resolution:
return False
if not END2END_TESTS[t].proxyable:
if END2END_FIXTURES[f].includes_proxy:
return False
if not END2END_TESTS[t].traceable:
if END2END_FIXTURES[f].tracing:
return False
if END2END_TESTS[t].large_writes:
if not END2END_FIXTURES[f].large_writes:
return False
if not END2END_TESTS[t].allows_compression:
if END2END_FIXTURES[f].enables_compression:
return False
if END2END_TESTS[t].needs_compression:
if not END2END_FIXTURES[f].supports_compression:
return False
if END2END_TESTS[t].exclude_inproc:
if END2END_FIXTURES[f].is_inproc:
return False
if END2END_TESTS[t].needs_http2:
if not END2END_FIXTURES[f].is_http2:
return False
if END2END_TESTS[t].needs_proxy_auth:
if not END2END_FIXTURES[f].supports_proxy_auth:
return False
if END2END_TESTS[t].needs_write_buffering:
if not END2END_FIXTURES[f].supports_write_buffering:
return False
if END2END_TESTS[t].needs_client_channel:
if not END2END_FIXTURES[f].client_channel:
return False
return True
def without(l, e):
l = l[:]
l.remove(e)
return l
def main():
sec_deps = [
'grpc_test_util',
'grpc',
'gpr_test_util',
'gpr'
]
unsec_deps = [
'grpc_test_util_unsecure',
'grpc_unsecure',
'gpr_test_util',
'gpr'
]
json = {
'#': 'generated with test/end2end/gen_build_json.py',
'libs': [
{
'name': 'end2end_tests',
'build': 'private',
'language': 'c',
'secure': True,
'src': ['test/core/end2end/end2end_tests.cc',
'test/core/end2end/end2end_test_utils.cc'] + [
'test/core/end2end/tests/%s.cc' % t
for t in sorted(END2END_TESTS.keys())],
'headers': ['test/core/end2end/tests/cancel_test_helpers.h',
'test/core/end2end/end2end_tests.h'],
'deps': sec_deps,
'vs_proj_dir': 'test/end2end/tests',
}
] + [
{
'name': 'end2end_nosec_tests',
'build': 'private',
'language': 'c',
'secure': False,
'src': ['test/core/end2end/end2end_nosec_tests.cc',
'test/core/end2end/end2end_test_utils.cc'] + [
'test/core/end2end/tests/%s.cc' % t
for t in sorted(END2END_TESTS.keys())
if not END2END_TESTS[t].secure],
'headers': ['test/core/end2end/tests/cancel_test_helpers.h',
'test/core/end2end/end2end_tests.h'],
'deps': unsec_deps,
'vs_proj_dir': 'test/end2end/tests',
}
],
'targets': [
{
'name': '%s_test' % f,
'build': 'test',
'language': 'c',
'run': False,
'src': ['test/core/end2end/fixtures/%s.cc' % f],
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'deps': [
'end2end_tests'
] + sec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
}
for f in sorted(END2END_FIXTURES.keys())
] + [
{
'name': '%s_nosec_test' % f,
'build': 'test',
'language': 'c',
'secure': False,
'src': ['test/core/end2end/fixtures/%s.cc' % f],
'run': False,
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'deps': [
'end2end_nosec_tests'
] + unsec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
}
for f in sorted(END2END_FIXTURES.keys())
if not END2END_FIXTURES[f].secure
],
'tests': [
{
'name': '%s_test' % f,
'args': [t],
'exclude_configs': END2END_FIXTURES[f].exclude_configs,
'exclude_iomgrs': list(set(END2END_FIXTURES[f].exclude_iomgrs) |
set(END2END_TESTS[t].exclude_iomgrs)),
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'flaky': END2END_TESTS[t].flaky,
'language': 'c',
'cpu_cost': END2END_TESTS[t].cpu_cost,
}
for f in sorted(END2END_FIXTURES.keys())
for t in sorted(END2END_TESTS.keys()) if compatible(f, t)
] + [
{
'name': '%s_nosec_test' % f,
'args': [t],
'exclude_configs': END2END_FIXTURES[f].exclude_configs,
'exclude_iomgrs': list(set(END2END_FIXTURES[f].exclude_iomgrs) |
set(END2END_TESTS[t].exclude_iomgrs)),
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'flaky': END2END_TESTS[t].flaky,
'language': 'c',
'cpu_cost': END2END_TESTS[t].cpu_cost,
}
for f in sorted(END2END_FIXTURES.keys())
if not END2END_FIXTURES[f].secure
for t in sorted(END2END_TESTS.keys())
if compatible(f, t) and not END2END_TESTS[t].secure
],
'core_end2end_tests': dict(
(t, END2END_TESTS[t].secure)
for t in END2END_TESTS.keys()
)
}
print yaml.dump(json)
if __name__ == '__main__':
main()
|
|
import theano.tensor as T
from .. import init
from .. import nonlinearities
from ..utils import as_tuple
from ..theano_extensions import conv
from .base import Layer
__all__ = [
"Conv1DLayer",
"Conv2DLayer",
]
def conv_output_length(input_length, filter_size,
stride, border_mode, pad=0):
"""Helper function to compute the output size of a convolution operation
This function computes the length along a single axis, which corresponds
to a 1D convolution. It can also be used for convolutions with higher
dimensionalities by using it individually for each axis.
Parameters
----------
input_length : int
The size of the input.
filter_size : int
The size of the filter.
stride : int
The stride of the convolution operation.
border_mode : str, 'valid', 'full', 'same' or 'pad'
A string indicating the convolution border mode.
If 'valid', it is assumed that the convolution is only computed where
the input and the filter fully overlap.
If 'full', it is assumed that the convolution is computed wherever the
input and the filter overlap by at least one position.
If 'same', it is assumed that the convolution is computed wherever the
input and the filter overlap by at least half the filter size, when the
filter size is odd. In practice, the input is zero-padded with half the
filter size at the beginning and half at the end (or one less than half
in the case of an even filter size). This results in an output length
that is the same as the input length (for both odd and even filter
sizes).
If 'pad', zero padding of `pad` positions is assumed to be applied to
the input, and then a valid convolution is applied.
pad : int, optional (default 0)
If `border_mode` is set to 'pad', this is the size of the padding that
is applied on both sides of the input. Otherwise, this is ignored.
Returns
-------
int
The output size corresponding to the given convolution parameters.
Raises
------
RuntimeError
When an invalid border_mode string is specified, a `RuntimeError` is
raised.
"""
if input_length is None:
return None
if border_mode == 'valid':
output_length = input_length - filter_size + 1
elif border_mode == 'full':
output_length = input_length + filter_size - 1
elif border_mode == 'same':
output_length = input_length
elif border_mode == 'pad':
output_length = input_length + 2 * pad - filter_size + 1
else:
raise RuntimeError('Invalid border mode: {0}'.format(border_mode))
# This is the integer arithmetic equivalent to
# np.ceil(output_length / stride)
output_length = (output_length + stride - 1) // stride
return output_length
class Conv1DLayer(Layer):
"""
1D convolutional layer
Performs a 1D convolution on its input and optionally adds a bias and
applies an elementwise nonlinearity.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. The
output of this layer should be a 3D tensor, with shape
``(batch_size, num_input_channels, input_length)``.
num_filters : int
The number of learnable convolutional filters this layer has.
filter_size : int or tuple of int
An integer or a 1-element tuple specifying the size of the filters.
stride : int or tuple of int
An integer or a 1-element tuple specifying the stride of the
convolution operation.
border_mode : str, one of 'valid', 'full', 'same'
A string indicating the convolution border mode.
If 'valid', the convolution is only computed where the input and the
filter fully overlap.
If 'full', the convolution is computed wherever the input and the
filter overlap by at least one position.
If 'same', the convolution is computed wherever the input and the
filter overlap by at least half the filter size, when the filter size
is odd. In practice, the input is zero-padded with half the filter size
at the beginning and half at the end (or one less than half in the case
of an even filter size). This results in an output length that is the
same as the input length (for both odd and even filter sizes).
untie_biases : bool, default False
If ``False``, the layer will have a bias parameter for channel, which
is shared across all positions in this channel. As a result, the `b`
attribute will be a vector (1D).
If True, the layer will have separate bias parameters for each
position in each channel. As a result, the `b` attribute will be a
matrix (2D).
W : Theano shared variable, numpy array or callable
An initializer for the weights of the layer. This should initialize the
layer weights to a 3D array with shape
``(num_filters, num_input_channels, filter_length)``.
See :meth:`Layer.create_param` for more information.
b : Theano shared variable, numpy array, callable or None
An initializer for the biases of the layer. If None is provided, the
layer will have no biases. This should initialize the layer biases to
a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to
``False``. If it is set to ``True``, its shape should be
``(num_filters, input_length)`` instead.
See :meth:`Layer.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
convolution : callable
The convolution implementation to use. The
`lasagne.theano_extensions.conv` module provides some alternative
implementations for 1D convolutions, because the Theano API only
features a 2D convolution implementation. Usually it should be fine
to leave this at the default value.
**kwargs
Any additional keyword arguments are passed to the `Layer` superclass.
Attributes
----------
W : Theano shared variable
Variable representing the filter weights.
b : Theano shared variable
Variable representing the biases.
Notes
-----
Theano's default convolution function (`theano.tensor.nnet.conv.conv2d`)
does not support the 'same' border mode by default. This layer emulates
it by performing a 'full' convolution and then cropping the result, which
may negatively affect performance.
"""
def __init__(self, incoming, num_filters, filter_size, stride=1,
border_mode="valid", untie_biases=False,
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify,
convolution=conv.conv1d_mc0, **kwargs):
super(Conv1DLayer, self).__init__(incoming, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = as_tuple(filter_size, 1)
self.stride = as_tuple(stride, 1)
self.border_mode = border_mode
self.untie_biases = untie_biases
self.convolution = convolution
self.W = self.create_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
elif self.untie_biases:
output_shape = self.get_output_shape()
self.b = self.create_param(b, (num_filters, output_shape[2]),
name="b")
else:
self.b = self.create_param(b, (num_filters,), name="b")
def get_W_shape(self):
"""Get the shape of the weight matrix `W`.
Returns
-------
tuple of int
The shape of the weight matrix.
"""
num_input_channels = self.input_shape[1]
return (self.num_filters, num_input_channels, self.filter_size[0])
def get_params(self):
return [self.W] + self.get_bias_params()
def get_bias_params(self):
return [self.b] if self.b is not None else []
def get_output_shape_for(self, input_shape):
output_length = conv_output_length(input_shape[2],
self.filter_size[0],
self.stride[0],
self.border_mode)
return (input_shape[0], self.num_filters, output_length)
def get_output_for(self, input, input_shape=None, **kwargs):
# the optional input_shape argument is for when get_output_for is
# called directly with a different shape than self.input_shape.
if input_shape is None:
input_shape = self.input_shape
filter_shape = self.get_W_shape()
if self.border_mode in ['valid', 'full']:
conved = self.convolution(input, self.W, subsample=self.stride,
image_shape=input_shape,
filter_shape=filter_shape,
border_mode=self.border_mode)
elif self.border_mode == 'same':
if self.stride[0] != 1:
raise NotImplementedError("Strided convolution with "
"border_mode 'same' is not "
"supported by this layer yet.")
conved = self.convolution(input, self.W, subsample=self.stride,
image_shape=input_shape,
filter_shape=filter_shape,
border_mode='full')
shift = (self.filter_size[0] - 1) // 2
conved = conved[:, :, shift:input.shape[2] + shift]
else:
raise RuntimeError("Invalid border mode: '%s'" % self.border_mode)
if self.b is None:
activation = conved
elif self.untie_biases:
activation = conved + self.b.dimshuffle('x', 0, 1)
else:
activation = conved + self.b.dimshuffle('x', 0, 'x')
return self.nonlinearity(activation)
class Conv2DLayer(Layer):
"""
2D convolutional layer
Performs a 2D convolution on its input and optionally adds a bias and
applies an elementwise nonlinearity.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. The
output of this layer should be a 4D tensor, with shape
``(batch_size, num_input_channels, input_height, input_width)``.
num_filters : int
The number of learnable convolutional filters this layer has.
filter_size : int or tuple of int
An integer or a 2-element tuple specifying the size of the filters.
stride : int or tuple of int
An integer or a 2-element tuple specifying the stride of the
convolution operation.
border_mode : str, one of 'valid', 'full', 'same'
A string indicating the convolution border mode.
If 'valid', the convolution is only computed where the input and the
filter fully overlap.
If 'full', the convolution is computed wherever the input and the
filter overlap by at least one position.
If 'same', the convolution is computed wherever the input and the
filter overlap by at least half the filter size, when the filter size
is odd. In practice, the input is zero-padded with half the filter size
at the beginning and half at the end (or one less than half in the case
of an even filter size). This results in an output length that is the
same as the input length (for both odd and even filter sizes).
untie_biases : bool, default False
If ``False``, the layer will have a bias parameter for channel, which
is shared across all positions in this channel. As a result, the `b`
attribute will be a vector (1D).
If True, the layer will have separate bias parameters for each
position in each channel. As a result, the `b` attribute will be a
3D tensor.
W : Theano shared variable, numpy array or callable
An initializer for the weights of the layer. This should initialize the
layer weights to a 4D array with shape
``(num_filters, num_input_channels, filter_height, filter_width)``.
See :meth:`Layer.create_param` for more information.
b : Theano shared variable, numpy array, callable or None
An initializer for the biases of the layer. If None is provided, the
layer will have no biases. This should initialize the layer biases to
a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to
``False``. If it is set to ``True``, its shape should be
``(num_filters, input_height, input_width)`` instead.
See :meth:`Layer.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
convolution : callable
The convolution implementation to use. Usually it should be fine to
leave this at the default value.
**kwargs
Any additional keyword arguments are passed to the `Layer` superclass.
Attributes
----------
W : Theano shared variable
Variable representing the filter weights.
b : Theano shared variable
Variable representing the biases.
Notes
-----
Theano's default convolution function (`theano.tensor.nnet.conv.conv2d`)
does not support the 'same' border mode by default. This layer emulates
it by performing a 'full' convolution and then cropping the result, which
may negatively affect performance.
"""
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
border_mode="valid", untie_biases=False,
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify,
convolution=T.nnet.conv2d, **kwargs):
super(Conv2DLayer, self).__init__(incoming, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = as_tuple(filter_size, 2)
self.stride = as_tuple(stride, 2)
self.border_mode = border_mode
self.untie_biases = untie_biases
self.convolution = convolution
self.W = self.create_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
elif self.untie_biases:
output_shape = self.get_output_shape()
self.b = self.create_param(b, (num_filters, output_shape[2],
output_shape[3]), name="b")
else:
self.b = self.create_param(b, (num_filters,), name="b")
def get_W_shape(self):
"""Get the shape of the weight matrix `W`.
Returns
-------
tuple of int
The shape of the weight matrix.
"""
num_input_channels = self.input_shape[1]
return (self.num_filters, num_input_channels, self.filter_size[0],
self.filter_size[1])
def get_params(self):
return [self.W] + self.get_bias_params()
def get_bias_params(self):
return [self.b] if self.b is not None else []
def get_output_shape_for(self, input_shape):
output_rows = conv_output_length(input_shape[2],
self.filter_size[0],
self.stride[0],
self.border_mode)
output_columns = conv_output_length(input_shape[3],
self.filter_size[1],
self.stride[1],
self.border_mode)
return (input_shape[0], self.num_filters, output_rows, output_columns)
def get_output_for(self, input, input_shape=None, **kwargs):
# the optional input_shape argument is for when get_output_for is
# called directly with a different shape than self.input_shape.
if input_shape is None:
input_shape = self.input_shape
filter_shape = self.get_W_shape()
if self.border_mode in ['valid', 'full']:
conved = self.convolution(input, self.W, subsample=self.stride,
image_shape=input_shape,
filter_shape=filter_shape,
border_mode=self.border_mode)
elif self.border_mode == 'same':
if self.stride != (1, 1):
raise NotImplementedError("Strided convolution with "
"border_mode 'same' is not "
"supported by this layer yet.")
conved = self.convolution(input, self.W, subsample=self.stride,
image_shape=input_shape,
filter_shape=filter_shape,
border_mode='full')
shift_x = (self.filter_size[0] - 1) // 2
shift_y = (self.filter_size[1] - 1) // 2
conved = conved[:, :, shift_x:input.shape[2] + shift_x,
shift_y:input.shape[3] + shift_y]
else:
raise RuntimeError("Invalid border mode: '%s'" % self.border_mode)
if self.b is None:
activation = conved
elif self.untie_biases:
activation = conved + self.b.dimshuffle('x', 0, 1, 2)
else:
activation = conved + self.b.dimshuffle('x', 0, 'x', 'x')
return self.nonlinearity(activation)
# TODO: add Conv3DLayer
|
|
# -*- coding: utf-8 -*-
"""Persistence layer for the google drive addon.
"""
import os
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from django.db import models
from framework.auth import Auth
from framework.exceptions import HTTPError
from osf.models.external import ExternalProvider
from osf.models.files import File, Folder, BaseFileNode
from addons.base import exceptions
from addons.googledrive import settings as drive_settings
from addons.googledrive.client import (GoogleAuthClient,
GoogleDriveClient)
from addons.googledrive.serializer import GoogleDriveSerializer
from addons.googledrive.utils import to_hgrid
from website.util import api_v2_url
# from website.files.models.ext import PathFollowingFileNode
# TODO make googledrive "pathfollowing"
# A migration will need to be run that concats
# folder_path and filenode.path
# class GoogleDriveFileNode(PathFollowingFileNode):
class GoogleDriveFileNode(BaseFileNode):
_provider = 'googledrive'
FOLDER_ATTR_NAME = 'folder_path'
class GoogleDriveFolder(GoogleDriveFileNode, Folder):
pass
class GoogleDriveFile(GoogleDriveFileNode, File):
@property
def _hashes(self):
try:
return {'md5': self._history[-1]['extra']['hashes']['md5']}
except (IndexError, KeyError):
return None
class GoogleDriveProvider(ExternalProvider):
name = 'Google Drive'
short_name = 'googledrive'
client_id = drive_settings.CLIENT_ID
client_secret = drive_settings.CLIENT_SECRET
auth_url_base = '{}{}'.format(drive_settings.OAUTH_BASE_URL, 'auth?access_type=offline&approval_prompt=force')
callback_url = '{}{}'.format(drive_settings.API_BASE_URL, 'oauth2/v3/token')
auto_refresh_url = callback_url
refresh_time = drive_settings.REFRESH_TIME
expiry_time = drive_settings.EXPIRY_TIME
default_scopes = drive_settings.OAUTH_SCOPE
_auth_client = GoogleAuthClient()
_drive_client = GoogleDriveClient()
def handle_callback(self, response):
client = self._auth_client
info = client.userinfo(response['access_token'])
return {
'provider_id': info['sub'],
'display_name': info['name'],
'profile_url': info.get('profile', None)
}
def fetch_access_token(self, force_refresh=False):
self.refresh_oauth_key(force=force_refresh)
return self.account.oauth_key
class UserSettings(BaseOAuthUserSettings):
oauth_provider = GoogleDriveProvider
serializer = GoogleDriveSerializer
class NodeSettings(BaseOAuthNodeSettings, BaseStorageAddon):
oauth_provider = GoogleDriveProvider
provider_name = 'googledrive'
folder_id = models.TextField(null=True, blank=True)
folder_path = models.TextField(null=True, blank=True)
serializer = GoogleDriveSerializer
user_settings = models.ForeignKey(UserSettings, null=True, blank=True, on_delete=models.CASCADE)
_api = None
@property
def api(self):
"""Authenticated ExternalProvider instance"""
if self._api is None:
self._api = GoogleDriveProvider(self.external_account)
return self._api
@property
def complete(self):
return bool(self.has_auth and self.user_settings.verify_oauth_access(
node=self.owner,
external_account=self.external_account,
metadata={'folder': self.folder_id}
))
@property
def folder_name(self):
if not self.folder_id:
return None
if self.folder_path != '/':
return os.path.split(self.folder_path)[1]
else:
return '/ (Full Google Drive)'
def clear_settings(self):
self.folder_id = None
self.folder_path = None
def get_folders(self, **kwargs):
node = self.owner
# Defaults exist when called by the API, but are `None`
path = kwargs.get('path') or ''
folder_id = kwargs.get('folder_id') or 'root'
try:
access_token = self.fetch_access_token()
except exceptions.InvalidAuthError:
raise HTTPError(403)
client = GoogleDriveClient(access_token)
if folder_id == 'root':
about = client.about()
return [{
'addon': self.config.short_name,
'path': '/',
'kind': 'folder',
'id': about['rootFolderId'],
'name': '/ (Full Google Drive)',
'urls': {
'folders': api_v2_url('nodes/{}/addons/googledrive/folders/'.format(self.owner._id),
params={
'path': '/',
'id': about['rootFolderId']
})
}
}]
contents = [
to_hgrid(item, node, path=path)
for item in client.folders(folder_id)
]
return contents
def set_folder(self, folder, auth):
"""Configure this addon to point to a Google Drive folder
:param dict folder:
:param User user:
"""
self.folder_id = folder['id']
self.folder_path = folder['path']
# Tell the user's addon settings that this node is connecting
self.user_settings.grant_oauth_access(
node=self.owner,
external_account=self.external_account,
metadata={'folder': self.folder_id}
) # Performs a save on self.user_settings
self.save()
self.nodelogger.log('folder_selected', save=True)
@property
def selected_folder_name(self):
if self.folder_id is None:
return ''
elif self.folder_id == 'root':
return 'Full Google Drive'
else:
return self.folder_name
def deauthorize(self, auth=None, add_log=True, save=False):
"""Remove user authorization from this node and log the event."""
if add_log:
extra = {'folder_id': self.folder_id}
self.nodelogger.log(action='node_deauthorized', extra=extra, save=True)
self.clear_settings()
self.clear_auth()
if save:
self.save()
def serialize_waterbutler_credentials(self):
if not self.has_auth:
raise exceptions.AddonError('Addon is not authorized')
return {'token': self.fetch_access_token()}
def serialize_waterbutler_settings(self):
if not self.folder_id:
raise exceptions.AddonError('Folder is not configured')
return {
'folder': {
'id': self.folder_id,
'name': self.folder_name,
'path': self.folder_path
}
}
def create_waterbutler_log(self, auth, action, metadata):
url = self.owner.web_url_for('addon_view_or_download_file', path=metadata['path'], provider='googledrive')
self.owner.add_log(
'googledrive_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': metadata['path'],
'folder': self.folder_path,
'urls': {
'view': url,
'download': url + '?action=download'
},
},
)
def fetch_access_token(self):
return self.api.fetch_access_token()
def after_delete(self, user):
self.deauthorize(Auth(user=user), add_log=True, save=True)
def on_delete(self):
self.deauthorize(add_log=False)
self.save()
|
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
from nose.tools import assert_equal, assert_true
from matplotlib.testing.decorators import image_comparison, cleanup
from matplotlib.axes import Axes
import matplotlib.pyplot as plt
import numpy as np
@cleanup
def test_figure_label():
# pyplot figure creation, selection and closing with figure label and
# number
plt.close('all')
plt.figure('today')
plt.figure(3)
plt.figure('tomorrow')
plt.figure()
plt.figure(0)
plt.figure(1)
plt.figure(3)
assert_equal(plt.get_fignums(), [0, 1, 3, 4, 5])
assert_equal(plt.get_figlabels(), ['', 'today', '', 'tomorrow', ''])
plt.close(10)
plt.close()
plt.close(5)
plt.close('tomorrow')
assert_equal(plt.get_fignums(), [0, 1])
assert_equal(plt.get_figlabels(), ['', 'today'])
@cleanup
def test_fignum_exists():
# pyplot figure creation, selection and closing with fignum_exists
plt.figure('one')
plt.figure(2)
plt.figure('three')
plt.figure()
assert_equal(plt.fignum_exists('one'), True)
assert_equal(plt.fignum_exists(2), True)
assert_equal(plt.fignum_exists('three'), True)
assert_equal(plt.fignum_exists(4), True)
plt.close('one')
plt.close(4)
assert_equal(plt.fignum_exists('one'), False)
assert_equal(plt.fignum_exists(4), False)
@image_comparison(baseline_images=['figure_today'])
def test_figure():
# named figure support
fig = plt.figure('today')
ax = fig.add_subplot(111)
ax.set_title(fig.get_label())
ax.plot(list(xrange(5)))
# plot red line in a different figure.
plt.figure('tomorrow')
plt.plot([0, 1], [1, 0], 'r')
# Return to the original; make sure the red line is not there.
plt.figure('today')
plt.close('tomorrow')
@cleanup
def test_gca():
fig = plt.figure()
ax1 = fig.add_axes([0, 0, 1, 1])
assert_true(fig.gca(projection='rectilinear') is ax1)
assert_true(fig.gca() is ax1)
ax2 = fig.add_subplot(121, projection='polar')
assert_true(fig.gca() is ax2)
assert_true(fig.gca(polar=True)is ax2)
ax3 = fig.add_subplot(122)
assert_true(fig.gca() is ax3)
# the final request for a polar axes will end up creating one
# with a spec of 111.
assert_true(fig.gca(polar=True) is not ax3)
assert_true(fig.gca(polar=True) is not ax2)
assert_equal(fig.gca().get_geometry(), (1, 1, 1))
fig.sca(ax1)
assert_true(fig.gca(projection='rectilinear') is ax1)
assert_true(fig.gca() is ax1)
@image_comparison(baseline_images=['figure_suptitle'])
def test_suptitle():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('hello', color='r')
fig.suptitle('title', color='g', rotation='30')
@cleanup
def test_suptitle_fontproperties():
from matplotlib.font_manager import FontProperties
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fps = FontProperties(size='large', weight='bold')
txt = fig.suptitle('fontprops title', fontproperties=fps)
assert_equal(txt.get_fontsize(), fps.get_size_in_points())
assert_equal(txt.get_weight(), fps.get_weight())
@image_comparison(baseline_images=['alpha_background'],
# only test png and svg. The PDF output appears correct,
# but Ghostscript does not preserve the background color.
extensions=['png', 'svg'],
savefig_kwarg={'facecolor': (0, 1, 0.4),
'edgecolor': 'none'})
def test_alpha():
# We want an image which has a background color and an
# alpha of 0.4.
fig = plt.figure(figsize=[2, 1])
fig.set_facecolor((0, 1, 0.4))
fig.patch.set_alpha(0.4)
import matplotlib.patches as mpatches
fig.patches.append(mpatches.CirclePolygon([20, 20],
radius=15,
alpha=0.6,
facecolor='red'))
@cleanup
def test_too_many_figures():
import warnings
with warnings.catch_warnings(record=True) as w:
for i in range(22):
fig = plt.figure()
assert len(w) == 1
def test_iterability_axes_argument():
# This is a regression test for matplotlib/matplotlib#3196. If one of the
# arguments returned by _as_mpl_axes defines __getitem__ but is not
# iterable, this would raise an execption. This is because we check
# whether the arguments are iterable, and if so we try and convert them
# to a tuple. However, the ``iterable`` function returns True if
# __getitem__ is present, but some classes can define __getitem__ without
# being iterable. The tuple conversion is now done in a try...except in
# case it fails.
class MyAxes(Axes):
def __init__(self, *args, **kwargs):
kwargs.pop('myclass', None)
return Axes.__init__(self, *args, **kwargs)
class MyClass(object):
def __getitem__(self, item):
if item != 'a':
raise ValueError("item should be a")
def _as_mpl_axes(self):
return MyAxes, {'myclass': self}
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=MyClass())
plt.close(fig)
@cleanup
def test_set_fig_size():
fig = plt.figure()
# check figwidth
fig.set_figwidth(5)
assert_equal(fig.get_figwidth(), 5)
# check figheight
fig.set_figheight(1)
assert_equal(fig.get_figheight(), 1)
# check using set_size_inches
fig.set_size_inches(2, 4)
assert_equal(fig.get_figwidth(), 2)
assert_equal(fig.get_figheight(), 4)
# check using tuple to first argument
fig.set_size_inches((1, 3))
assert_equal(fig.get_figwidth(), 1)
assert_equal(fig.get_figheight(), 3)
@cleanup
def test_axes_remove():
fig, axes = plt.subplots(2, 2)
axes[-1, -1].remove()
for ax in axes.ravel()[:-1]:
assert ax in fig.axes
assert axes[-1, -1] not in fig.axes
assert_equal(len(fig.axes), 3)
def test_figaspect():
w, h = plt.figaspect(np.float64(2) / np.float64(1))
assert h / w == 2
w, h = plt.figaspect(2)
assert h / w == 2
w, h = plt.figaspect(np.zeros((1, 2)))
assert h / w == 0.5
w, h = plt.figaspect(np.zeros((2, 2)))
assert h / w == 1
if __name__ == "__main__":
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.